1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSI disk target driver. 31 */ 32 33 #include <sys/scsi/scsi.h> 34 #include <sys/dkbad.h> 35 #include <sys/dklabel.h> 36 #include <sys/dkio.h> 37 #include <sys/fdio.h> 38 #include <sys/cdio.h> 39 #include <sys/mhd.h> 40 #include <sys/vtoc.h> 41 #include <sys/dktp/fdisk.h> 42 #include <sys/file.h> 43 #include <sys/stat.h> 44 #include <sys/kstat.h> 45 #include <sys/vtrace.h> 46 #include <sys/note.h> 47 #include <sys/thread.h> 48 #include <sys/proc.h> 49 #include <sys/efi_partition.h> 50 #include <sys/var.h> 51 #include <sys/aio_req.h> 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 65 66 /* 67 * Loadable module info. 68 */ 69 #if (defined(__fibre)) 70 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 71 char _depends_on[] = "misc/scsi drv/fcp"; 72 #else 73 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 74 char _depends_on[] = "misc/scsi"; 75 #endif 76 77 /* 78 * Define the interconnect type, to allow the driver to distinguish 79 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 80 * 81 * This is really for backward compatability. In the future, the driver 82 * should actually check the "interconnect-type" property as reported by 83 * the HBA; however at present this property is not defined by all HBAs, 84 * so we will use this #define (1) to permit the driver to run in 85 * backward-compatability mode; and (2) to print a notification message 86 * if an FC HBA does not support the "interconnect-type" property. The 87 * behavior of the driver will be to assume parallel SCSI behaviors unless 88 * the "interconnect-type" property is defined by the HBA **AND** has a 89 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 90 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 91 * Channel behaviors (as per the old ssd). (Note that the 92 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 93 * will result in the driver assuming parallel SCSI behaviors.) 94 * 95 * (see common/sys/scsi/impl/services.h) 96 * 97 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 98 * since some FC HBAs may already support that, and there is some code in 99 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 100 * default would confuse that code, and besides things should work fine 101 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 102 * "interconnect_type" property. 103 */ 104 #if (defined(__fibre)) 105 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 106 #else 107 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 108 #endif 109 110 /* 111 * The name of the driver, established from the module name in _init. 112 */ 113 static char *sd_label = NULL; 114 115 /* 116 * Driver name is unfortunately prefixed on some driver.conf properties. 117 */ 118 #if (defined(__fibre)) 119 #define sd_max_xfer_size ssd_max_xfer_size 120 #define sd_config_list ssd_config_list 121 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 122 static char *sd_config_list = "ssd-config-list"; 123 #else 124 static char *sd_max_xfer_size = "sd_max_xfer_size"; 125 static char *sd_config_list = "sd-config-list"; 126 #endif 127 128 /* 129 * Driver global variables 130 */ 131 132 #if (defined(__fibre)) 133 /* 134 * These #defines are to avoid namespace collisions that occur because this 135 * code is currently used to compile two seperate driver modules: sd and ssd. 136 * All global variables need to be treated this way (even if declared static) 137 * in order to allow the debugger to resolve the names properly. 138 * It is anticipated that in the near future the ssd module will be obsoleted, 139 * at which time this namespace issue should go away. 140 */ 141 #define sd_state ssd_state 142 #define sd_io_time ssd_io_time 143 #define sd_failfast_enable ssd_failfast_enable 144 #define sd_ua_retry_count ssd_ua_retry_count 145 #define sd_report_pfa ssd_report_pfa 146 #define sd_max_throttle ssd_max_throttle 147 #define sd_min_throttle ssd_min_throttle 148 #define sd_rot_delay ssd_rot_delay 149 150 #define sd_retry_on_reservation_conflict \ 151 ssd_retry_on_reservation_conflict 152 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 153 #define sd_resv_conflict_name ssd_resv_conflict_name 154 155 #define sd_component_mask ssd_component_mask 156 #define sd_level_mask ssd_level_mask 157 #define sd_debug_un ssd_debug_un 158 #define sd_error_level ssd_error_level 159 160 #define sd_xbuf_active_limit ssd_xbuf_active_limit 161 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 162 163 #define sd_tr ssd_tr 164 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 165 #define sd_check_media_time ssd_check_media_time 166 #define sd_wait_cmds_complete ssd_wait_cmds_complete 167 #define sd_label_mutex ssd_label_mutex 168 #define sd_detach_mutex ssd_detach_mutex 169 #define sd_log_buf ssd_log_buf 170 #define sd_log_mutex ssd_log_mutex 171 172 #define sd_disk_table ssd_disk_table 173 #define sd_disk_table_size ssd_disk_table_size 174 #define sd_sense_mutex ssd_sense_mutex 175 #define sd_cdbtab ssd_cdbtab 176 177 #define sd_cb_ops ssd_cb_ops 178 #define sd_ops ssd_ops 179 #define sd_additional_codes ssd_additional_codes 180 181 #define sd_minor_data ssd_minor_data 182 #define sd_minor_data_efi ssd_minor_data_efi 183 184 #define sd_tq ssd_tq 185 #define sd_wmr_tq ssd_wmr_tq 186 #define sd_taskq_name ssd_taskq_name 187 #define sd_wmr_taskq_name ssd_wmr_taskq_name 188 #define sd_taskq_minalloc ssd_taskq_minalloc 189 #define sd_taskq_maxalloc ssd_taskq_maxalloc 190 191 #define sd_dump_format_string ssd_dump_format_string 192 193 #define sd_iostart_chain ssd_iostart_chain 194 #define sd_iodone_chain ssd_iodone_chain 195 196 #define sd_pm_idletime ssd_pm_idletime 197 198 #define sd_force_pm_supported ssd_force_pm_supported 199 200 #define sd_dtype_optical_bind ssd_dtype_optical_bind 201 #endif 202 203 204 #ifdef SDDEBUG 205 int sd_force_pm_supported = 0; 206 #endif /* SDDEBUG */ 207 208 void *sd_state = NULL; 209 int sd_io_time = SD_IO_TIME; 210 int sd_failfast_enable = 1; 211 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 212 int sd_report_pfa = 1; 213 int sd_max_throttle = SD_MAX_THROTTLE; 214 int sd_min_throttle = SD_MIN_THROTTLE; 215 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 216 217 int sd_retry_on_reservation_conflict = 1; 218 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 219 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 220 221 static int sd_dtype_optical_bind = -1; 222 223 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 224 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 225 226 /* 227 * Global data for debug logging. To enable debug printing, sd_component_mask 228 * and sd_level_mask should be set to the desired bit patterns as outlined in 229 * sddef.h. 230 */ 231 uint_t sd_component_mask = 0x0; 232 uint_t sd_level_mask = 0x0; 233 struct sd_lun *sd_debug_un = NULL; 234 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 235 236 /* Note: these may go away in the future... */ 237 static uint32_t sd_xbuf_active_limit = 512; 238 static uint32_t sd_xbuf_reserve_limit = 16; 239 240 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 241 242 /* 243 * Timer value used to reset the throttle after it has been reduced 244 * (typically in response to TRAN_BUSY) 245 */ 246 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 247 248 /* 249 * Interval value associated with the media change scsi watch. 250 */ 251 static int sd_check_media_time = 3000000; 252 253 /* 254 * Wait value used for in progress operations during a DDI_SUSPEND 255 */ 256 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 257 258 /* 259 * sd_label_mutex protects a static buffer used in the disk label 260 * component of the driver 261 */ 262 static kmutex_t sd_label_mutex; 263 264 /* 265 * sd_detach_mutex protects un_layer_count, un_detach_count, and 266 * un_opens_in_progress in the sd_lun structure. 267 */ 268 static kmutex_t sd_detach_mutex; 269 270 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 271 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 272 273 /* 274 * Global buffer and mutex for debug logging 275 */ 276 static char sd_log_buf[1024]; 277 static kmutex_t sd_log_mutex; 278 279 280 /* 281 * "Smart" Probe Caching structs, globals, #defines, etc. 282 * For parallel scsi and non-self-identify device only. 283 */ 284 285 /* 286 * The following resources and routines are implemented to support 287 * "smart" probing, which caches the scsi_probe() results in an array, 288 * in order to help avoid long probe times. 289 */ 290 struct sd_scsi_probe_cache { 291 struct sd_scsi_probe_cache *next; 292 dev_info_t *pdip; 293 int cache[NTARGETS_WIDE]; 294 }; 295 296 static kmutex_t sd_scsi_probe_cache_mutex; 297 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 298 299 /* 300 * Really we only need protection on the head of the linked list, but 301 * better safe than sorry. 302 */ 303 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 304 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 305 306 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 307 sd_scsi_probe_cache_head)) 308 309 310 /* 311 * Vendor specific data name property declarations 312 */ 313 314 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 315 316 static sd_tunables seagate_properties = { 317 SEAGATE_THROTTLE_VALUE, 318 0, 319 0, 320 0, 321 0, 322 0, 323 0, 324 0, 325 0 326 }; 327 328 329 static sd_tunables fujitsu_properties = { 330 FUJITSU_THROTTLE_VALUE, 331 0, 332 0, 333 0, 334 0, 335 0, 336 0, 337 0, 338 0 339 }; 340 341 static sd_tunables ibm_properties = { 342 IBM_THROTTLE_VALUE, 343 0, 344 0, 345 0, 346 0, 347 0, 348 0, 349 0, 350 0 351 }; 352 353 static sd_tunables purple_properties = { 354 PURPLE_THROTTLE_VALUE, 355 0, 356 0, 357 PURPLE_BUSY_RETRIES, 358 PURPLE_RESET_RETRY_COUNT, 359 PURPLE_RESERVE_RELEASE_TIME, 360 0, 361 0, 362 0 363 }; 364 365 static sd_tunables sve_properties = { 366 SVE_THROTTLE_VALUE, 367 0, 368 0, 369 SVE_BUSY_RETRIES, 370 SVE_RESET_RETRY_COUNT, 371 SVE_RESERVE_RELEASE_TIME, 372 SVE_MIN_THROTTLE_VALUE, 373 SVE_DISKSORT_DISABLED_FLAG, 374 0 375 }; 376 377 static sd_tunables maserati_properties = { 378 0, 379 0, 380 0, 381 0, 382 0, 383 0, 384 0, 385 MASERATI_DISKSORT_DISABLED_FLAG, 386 MASERATI_LUN_RESET_ENABLED_FLAG 387 }; 388 389 static sd_tunables pirus_properties = { 390 PIRUS_THROTTLE_VALUE, 391 0, 392 PIRUS_NRR_COUNT, 393 PIRUS_BUSY_RETRIES, 394 PIRUS_RESET_RETRY_COUNT, 395 0, 396 PIRUS_MIN_THROTTLE_VALUE, 397 PIRUS_DISKSORT_DISABLED_FLAG, 398 PIRUS_LUN_RESET_ENABLED_FLAG 399 }; 400 401 #endif 402 403 #if (defined(__sparc) && !defined(__fibre)) || \ 404 (defined(__i386) || defined(__amd64)) 405 406 407 static sd_tunables elite_properties = { 408 ELITE_THROTTLE_VALUE, 409 0, 410 0, 411 0, 412 0, 413 0, 414 0, 415 0, 416 0 417 }; 418 419 static sd_tunables st31200n_properties = { 420 ST31200N_THROTTLE_VALUE, 421 0, 422 0, 423 0, 424 0, 425 0, 426 0, 427 0, 428 0 429 }; 430 431 #endif /* Fibre or not */ 432 433 static sd_tunables lsi_properties_scsi = { 434 LSI_THROTTLE_VALUE, 435 0, 436 LSI_NOTREADY_RETRIES, 437 0, 438 0, 439 0, 440 0, 441 0, 442 0 443 }; 444 445 static sd_tunables symbios_properties = { 446 SYMBIOS_THROTTLE_VALUE, 447 0, 448 SYMBIOS_NOTREADY_RETRIES, 449 0, 450 0, 451 0, 452 0, 453 0, 454 0 455 }; 456 457 static sd_tunables lsi_properties = { 458 0, 459 0, 460 LSI_NOTREADY_RETRIES, 461 0, 462 0, 463 0, 464 0, 465 0, 466 0 467 }; 468 469 static sd_tunables lsi_oem_properties = { 470 0, 471 0, 472 LSI_OEM_NOTREADY_RETRIES, 473 0, 474 0, 475 0, 476 0, 477 0, 478 0 479 }; 480 481 482 483 #if (defined(SD_PROP_TST)) 484 485 #define SD_TST_CTYPE_VAL CTYPE_CDROM 486 #define SD_TST_THROTTLE_VAL 16 487 #define SD_TST_NOTREADY_VAL 12 488 #define SD_TST_BUSY_VAL 60 489 #define SD_TST_RST_RETRY_VAL 36 490 #define SD_TST_RSV_REL_TIME 60 491 492 static sd_tunables tst_properties = { 493 SD_TST_THROTTLE_VAL, 494 SD_TST_CTYPE_VAL, 495 SD_TST_NOTREADY_VAL, 496 SD_TST_BUSY_VAL, 497 SD_TST_RST_RETRY_VAL, 498 SD_TST_RSV_REL_TIME, 499 0, 500 0, 501 0 502 }; 503 #endif 504 505 /* This is similiar to the ANSI toupper implementation */ 506 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 507 508 /* 509 * Static Driver Configuration Table 510 * 511 * This is the table of disks which need throttle adjustment (or, perhaps 512 * something else as defined by the flags at a future time.) device_id 513 * is a string consisting of concatenated vid (vendor), pid (product/model) 514 * and revision strings as defined in the scsi_inquiry structure. Offsets of 515 * the parts of the string are as defined by the sizes in the scsi_inquiry 516 * structure. Device type is searched as far as the device_id string is 517 * defined. Flags defines which values are to be set in the driver from the 518 * properties list. 519 * 520 * Entries below which begin and end with a "*" are a special case. 521 * These do not have a specific vendor, and the string which follows 522 * can appear anywhere in the 16 byte PID portion of the inquiry data. 523 * 524 * Entries below which begin and end with a " " (blank) are a special 525 * case. The comparison function will treat multiple consecutive blanks 526 * as equivalent to a single blank. For example, this causes a 527 * sd_disk_table entry of " NEC CDROM " to match a device's id string 528 * of "NEC CDROM". 529 * 530 * Note: The MD21 controller type has been obsoleted. 531 * ST318202F is a Legacy device 532 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 533 * made with an FC connection. The entries here are a legacy. 534 */ 535 static sd_disk_config_t sd_disk_table[] = { 536 #if defined(__fibre) || defined(__i386) || defined(__amd64) 537 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 538 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 539 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 540 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 541 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 542 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 543 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 544 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 545 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 546 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 547 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 548 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 549 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 550 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 551 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 552 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 553 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 554 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 555 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 556 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 557 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 558 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 559 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 560 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 561 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 562 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 563 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 564 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 565 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 566 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 567 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 568 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 569 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 570 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 571 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 572 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 573 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 574 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 575 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 576 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 577 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 578 { "SUN T3", SD_CONF_BSET_THROTTLE | 579 SD_CONF_BSET_BSY_RETRY_COUNT| 580 SD_CONF_BSET_RST_RETRIES| 581 SD_CONF_BSET_RSV_REL_TIME, 582 &purple_properties }, 583 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 584 SD_CONF_BSET_BSY_RETRY_COUNT| 585 SD_CONF_BSET_RST_RETRIES| 586 SD_CONF_BSET_RSV_REL_TIME| 587 SD_CONF_BSET_MIN_THROTTLE| 588 SD_CONF_BSET_DISKSORT_DISABLED, 589 &sve_properties }, 590 { "SUN T4", SD_CONF_BSET_THROTTLE | 591 SD_CONF_BSET_BSY_RETRY_COUNT| 592 SD_CONF_BSET_RST_RETRIES| 593 SD_CONF_BSET_RSV_REL_TIME, 594 &purple_properties }, 595 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 596 SD_CONF_BSET_LUN_RESET_ENABLED, 597 &maserati_properties }, 598 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 599 SD_CONF_BSET_NRR_COUNT| 600 SD_CONF_BSET_BSY_RETRY_COUNT| 601 SD_CONF_BSET_RST_RETRIES| 602 SD_CONF_BSET_MIN_THROTTLE| 603 SD_CONF_BSET_DISKSORT_DISABLED| 604 SD_CONF_BSET_LUN_RESET_ENABLED, 605 &pirus_properties }, 606 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 607 SD_CONF_BSET_NRR_COUNT| 608 SD_CONF_BSET_BSY_RETRY_COUNT| 609 SD_CONF_BSET_RST_RETRIES| 610 SD_CONF_BSET_MIN_THROTTLE| 611 SD_CONF_BSET_DISKSORT_DISABLED| 612 SD_CONF_BSET_LUN_RESET_ENABLED, 613 &pirus_properties }, 614 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 615 SD_CONF_BSET_NRR_COUNT| 616 SD_CONF_BSET_BSY_RETRY_COUNT| 617 SD_CONF_BSET_RST_RETRIES| 618 SD_CONF_BSET_MIN_THROTTLE| 619 SD_CONF_BSET_DISKSORT_DISABLED| 620 SD_CONF_BSET_LUN_RESET_ENABLED, 621 &pirus_properties }, 622 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 625 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 626 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 627 #endif /* fibre or NON-sparc platforms */ 628 #if ((defined(__sparc) && !defined(__fibre)) ||\ 629 (defined(__i386) || defined(__amd64))) 630 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 631 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 632 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 633 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 634 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 635 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 636 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 637 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 638 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 639 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 640 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 641 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 642 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 643 &symbios_properties }, 644 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 645 &lsi_properties_scsi }, 646 #if defined(__i386) || defined(__amd64) 647 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 648 | SD_CONF_BSET_READSUB_BCD 649 | SD_CONF_BSET_READ_TOC_ADDR_BCD 650 | SD_CONF_BSET_NO_READ_HEADER 651 | SD_CONF_BSET_READ_CD_XD4), NULL }, 652 653 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 654 | SD_CONF_BSET_READSUB_BCD 655 | SD_CONF_BSET_READ_TOC_ADDR_BCD 656 | SD_CONF_BSET_NO_READ_HEADER 657 | SD_CONF_BSET_READ_CD_XD4), NULL }, 658 #endif /* __i386 || __amd64 */ 659 #endif /* sparc NON-fibre or NON-sparc platforms */ 660 661 #if (defined(SD_PROP_TST)) 662 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 663 | SD_CONF_BSET_CTYPE 664 | SD_CONF_BSET_NRR_COUNT 665 | SD_CONF_BSET_FAB_DEVID 666 | SD_CONF_BSET_NOCACHE 667 | SD_CONF_BSET_BSY_RETRY_COUNT 668 | SD_CONF_BSET_PLAYMSF_BCD 669 | SD_CONF_BSET_READSUB_BCD 670 | SD_CONF_BSET_READ_TOC_TRK_BCD 671 | SD_CONF_BSET_READ_TOC_ADDR_BCD 672 | SD_CONF_BSET_NO_READ_HEADER 673 | SD_CONF_BSET_READ_CD_XD4 674 | SD_CONF_BSET_RST_RETRIES 675 | SD_CONF_BSET_RSV_REL_TIME 676 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 677 #endif 678 }; 679 680 static const int sd_disk_table_size = 681 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 682 683 684 /* 685 * Return codes of sd_uselabel(). 686 */ 687 #define SD_LABEL_IS_VALID 0 688 #define SD_LABEL_IS_INVALID 1 689 690 #define SD_INTERCONNECT_PARALLEL 0 691 #define SD_INTERCONNECT_FABRIC 1 692 #define SD_INTERCONNECT_FIBRE 2 693 #define SD_INTERCONNECT_SSA 3 694 #define SD_IS_PARALLEL_SCSI(un) \ 695 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 696 697 /* 698 * Definitions used by device id registration routines 699 */ 700 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 701 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 702 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 703 #define WD_NODE 7 /* the whole disk minor */ 704 705 static kmutex_t sd_sense_mutex = {0}; 706 707 /* 708 * Macros for updates of the driver state 709 */ 710 #define New_state(un, s) \ 711 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 712 #define Restore_state(un) \ 713 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 714 715 static struct sd_cdbinfo sd_cdbtab[] = { 716 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 717 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 718 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 719 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 720 }; 721 722 /* 723 * Specifies the number of seconds that must have elapsed since the last 724 * cmd. has completed for a device to be declared idle to the PM framework. 725 */ 726 static int sd_pm_idletime = 1; 727 728 /* 729 * Internal function prototypes 730 */ 731 732 #if (defined(__fibre)) 733 /* 734 * These #defines are to avoid namespace collisions that occur because this 735 * code is currently used to compile two seperate driver modules: sd and ssd. 736 * All function names need to be treated this way (even if declared static) 737 * in order to allow the debugger to resolve the names properly. 738 * It is anticipated that in the near future the ssd module will be obsoleted, 739 * at which time this ugliness should go away. 740 */ 741 #define sd_log_trace ssd_log_trace 742 #define sd_log_info ssd_log_info 743 #define sd_log_err ssd_log_err 744 #define sdprobe ssdprobe 745 #define sdinfo ssdinfo 746 #define sd_prop_op ssd_prop_op 747 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 748 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 749 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 750 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 751 #define sd_spin_up_unit ssd_spin_up_unit 752 #define sd_enable_descr_sense ssd_enable_descr_sense 753 #define sd_set_mmc_caps ssd_set_mmc_caps 754 #define sd_read_unit_properties ssd_read_unit_properties 755 #define sd_process_sdconf_file ssd_process_sdconf_file 756 #define sd_process_sdconf_table ssd_process_sdconf_table 757 #define sd_sdconf_id_match ssd_sdconf_id_match 758 #define sd_blank_cmp ssd_blank_cmp 759 #define sd_chk_vers1_data ssd_chk_vers1_data 760 #define sd_set_vers1_properties ssd_set_vers1_properties 761 #define sd_validate_geometry ssd_validate_geometry 762 763 #if defined(_SUNOS_VTOC_16) 764 #define sd_convert_geometry ssd_convert_geometry 765 #endif 766 767 #define sd_resync_geom_caches ssd_resync_geom_caches 768 #define sd_read_fdisk ssd_read_fdisk 769 #define sd_get_physical_geometry ssd_get_physical_geometry 770 #define sd_get_virtual_geometry ssd_get_virtual_geometry 771 #define sd_update_block_info ssd_update_block_info 772 #define sd_swap_efi_gpt ssd_swap_efi_gpt 773 #define sd_swap_efi_gpe ssd_swap_efi_gpe 774 #define sd_validate_efi ssd_validate_efi 775 #define sd_use_efi ssd_use_efi 776 #define sd_uselabel ssd_uselabel 777 #define sd_build_default_label ssd_build_default_label 778 #define sd_has_max_chs_vals ssd_has_max_chs_vals 779 #define sd_inq_fill ssd_inq_fill 780 #define sd_register_devid ssd_register_devid 781 #define sd_get_devid_block ssd_get_devid_block 782 #define sd_get_devid ssd_get_devid 783 #define sd_create_devid ssd_create_devid 784 #define sd_write_deviceid ssd_write_deviceid 785 #define sd_check_vpd_page_support ssd_check_vpd_page_support 786 #define sd_setup_pm ssd_setup_pm 787 #define sd_create_pm_components ssd_create_pm_components 788 #define sd_ddi_suspend ssd_ddi_suspend 789 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 790 #define sd_ddi_resume ssd_ddi_resume 791 #define sd_ddi_pm_resume ssd_ddi_pm_resume 792 #define sdpower ssdpower 793 #define sdattach ssdattach 794 #define sddetach ssddetach 795 #define sd_unit_attach ssd_unit_attach 796 #define sd_unit_detach ssd_unit_detach 797 #define sd_create_minor_nodes ssd_create_minor_nodes 798 #define sd_create_errstats ssd_create_errstats 799 #define sd_set_errstats ssd_set_errstats 800 #define sd_set_pstats ssd_set_pstats 801 #define sddump ssddump 802 #define sd_scsi_poll ssd_scsi_poll 803 #define sd_send_polled_RQS ssd_send_polled_RQS 804 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 805 #define sd_init_event_callbacks ssd_init_event_callbacks 806 #define sd_event_callback ssd_event_callback 807 #define sd_disable_caching ssd_disable_caching 808 #define sd_make_device ssd_make_device 809 #define sdopen ssdopen 810 #define sdclose ssdclose 811 #define sd_ready_and_valid ssd_ready_and_valid 812 #define sdmin ssdmin 813 #define sdread ssdread 814 #define sdwrite ssdwrite 815 #define sdaread ssdaread 816 #define sdawrite ssdawrite 817 #define sdstrategy ssdstrategy 818 #define sdioctl ssdioctl 819 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 820 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 821 #define sd_checksum_iostart ssd_checksum_iostart 822 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 823 #define sd_pm_iostart ssd_pm_iostart 824 #define sd_core_iostart ssd_core_iostart 825 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 826 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 827 #define sd_checksum_iodone ssd_checksum_iodone 828 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 829 #define sd_pm_iodone ssd_pm_iodone 830 #define sd_initpkt_for_buf ssd_initpkt_for_buf 831 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 832 #define sd_setup_rw_pkt ssd_setup_rw_pkt 833 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 834 #define sd_buf_iodone ssd_buf_iodone 835 #define sd_uscsi_strategy ssd_uscsi_strategy 836 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 837 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 838 #define sd_uscsi_iodone ssd_uscsi_iodone 839 #define sd_xbuf_strategy ssd_xbuf_strategy 840 #define sd_xbuf_init ssd_xbuf_init 841 #define sd_pm_entry ssd_pm_entry 842 #define sd_pm_exit ssd_pm_exit 843 844 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 845 #define sd_pm_timeout_handler ssd_pm_timeout_handler 846 847 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 848 #define sdintr ssdintr 849 #define sd_start_cmds ssd_start_cmds 850 #define sd_send_scsi_cmd ssd_send_scsi_cmd 851 #define sd_bioclone_alloc ssd_bioclone_alloc 852 #define sd_bioclone_free ssd_bioclone_free 853 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 854 #define sd_shadow_buf_free ssd_shadow_buf_free 855 #define sd_print_transport_rejected_message \ 856 ssd_print_transport_rejected_message 857 #define sd_retry_command ssd_retry_command 858 #define sd_set_retry_bp ssd_set_retry_bp 859 #define sd_send_request_sense_command ssd_send_request_sense_command 860 #define sd_start_retry_command ssd_start_retry_command 861 #define sd_start_direct_priority_command \ 862 ssd_start_direct_priority_command 863 #define sd_return_failed_command ssd_return_failed_command 864 #define sd_return_failed_command_no_restart \ 865 ssd_return_failed_command_no_restart 866 #define sd_return_command ssd_return_command 867 #define sd_sync_with_callback ssd_sync_with_callback 868 #define sdrunout ssdrunout 869 #define sd_mark_rqs_busy ssd_mark_rqs_busy 870 #define sd_mark_rqs_idle ssd_mark_rqs_idle 871 #define sd_reduce_throttle ssd_reduce_throttle 872 #define sd_restore_throttle ssd_restore_throttle 873 #define sd_print_incomplete_msg ssd_print_incomplete_msg 874 #define sd_init_cdb_limits ssd_init_cdb_limits 875 #define sd_pkt_status_good ssd_pkt_status_good 876 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 877 #define sd_pkt_status_busy ssd_pkt_status_busy 878 #define sd_pkt_status_reservation_conflict \ 879 ssd_pkt_status_reservation_conflict 880 #define sd_pkt_status_qfull ssd_pkt_status_qfull 881 #define sd_handle_request_sense ssd_handle_request_sense 882 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 883 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 884 #define sd_validate_sense_data ssd_validate_sense_data 885 #define sd_decode_sense ssd_decode_sense 886 #define sd_print_sense_msg ssd_print_sense_msg 887 #define sd_extract_sense_info_descr ssd_extract_sense_info_descr 888 #define sd_sense_key_no_sense ssd_sense_key_no_sense 889 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 890 #define sd_sense_key_not_ready ssd_sense_key_not_ready 891 #define sd_sense_key_medium_or_hardware_error \ 892 ssd_sense_key_medium_or_hardware_error 893 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 894 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 895 #define sd_sense_key_fail_command ssd_sense_key_fail_command 896 #define sd_sense_key_blank_check ssd_sense_key_blank_check 897 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 898 #define sd_sense_key_default ssd_sense_key_default 899 #define sd_print_retry_msg ssd_print_retry_msg 900 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 901 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 902 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 903 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 904 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 905 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 906 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 907 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 908 #define sd_pkt_reason_default ssd_pkt_reason_default 909 #define sd_reset_target ssd_reset_target 910 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 911 #define sd_start_stop_unit_task ssd_start_stop_unit_task 912 #define sd_taskq_create ssd_taskq_create 913 #define sd_taskq_delete ssd_taskq_delete 914 #define sd_media_change_task ssd_media_change_task 915 #define sd_handle_mchange ssd_handle_mchange 916 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 917 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 918 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 919 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 920 #define sd_send_scsi_feature_GET_CONFIGURATION \ 921 sd_send_scsi_feature_GET_CONFIGURATION 922 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 923 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 924 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 925 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 926 ssd_send_scsi_PERSISTENT_RESERVE_IN 927 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 928 ssd_send_scsi_PERSISTENT_RESERVE_OUT 929 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 930 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 931 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 932 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 933 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 934 #define sd_alloc_rqs ssd_alloc_rqs 935 #define sd_free_rqs ssd_free_rqs 936 #define sd_dump_memory ssd_dump_memory 937 #define sd_uscsi_ioctl ssd_uscsi_ioctl 938 #define sd_get_media_info ssd_get_media_info 939 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 940 #define sd_dkio_get_geometry ssd_dkio_get_geometry 941 #define sd_dkio_set_geometry ssd_dkio_set_geometry 942 #define sd_dkio_get_partition ssd_dkio_get_partition 943 #define sd_dkio_set_partition ssd_dkio_set_partition 944 #define sd_dkio_partition ssd_dkio_partition 945 #define sd_dkio_get_vtoc ssd_dkio_get_vtoc 946 #define sd_dkio_get_efi ssd_dkio_get_efi 947 #define sd_build_user_vtoc ssd_build_user_vtoc 948 #define sd_dkio_set_vtoc ssd_dkio_set_vtoc 949 #define sd_dkio_set_efi ssd_dkio_set_efi 950 #define sd_build_label_vtoc ssd_build_label_vtoc 951 #define sd_write_label ssd_write_label 952 #define sd_clear_vtoc ssd_clear_vtoc 953 #define sd_clear_efi ssd_clear_efi 954 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 955 #define sd_setup_next_xfer ssd_setup_next_xfer 956 #define sd_dkio_get_temp ssd_dkio_get_temp 957 #define sd_dkio_get_mboot ssd_dkio_get_mboot 958 #define sd_dkio_set_mboot ssd_dkio_set_mboot 959 #define sd_setup_default_geometry ssd_setup_default_geometry 960 #define sd_update_fdisk_and_vtoc ssd_update_fdisk_and_vtoc 961 #define sd_check_mhd ssd_check_mhd 962 #define sd_mhd_watch_cb ssd_mhd_watch_cb 963 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 964 #define sd_sname ssd_sname 965 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 966 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 967 #define sd_take_ownership ssd_take_ownership 968 #define sd_reserve_release ssd_reserve_release 969 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 970 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 971 #define sd_persistent_reservation_in_read_keys \ 972 ssd_persistent_reservation_in_read_keys 973 #define sd_persistent_reservation_in_read_resv \ 974 ssd_persistent_reservation_in_read_resv 975 #define sd_mhdioc_takeown ssd_mhdioc_takeown 976 #define sd_mhdioc_failfast ssd_mhdioc_failfast 977 #define sd_mhdioc_release ssd_mhdioc_release 978 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 979 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 980 #define sd_mhdioc_inresv ssd_mhdioc_inresv 981 #define sr_change_blkmode ssr_change_blkmode 982 #define sr_change_speed ssr_change_speed 983 #define sr_atapi_change_speed ssr_atapi_change_speed 984 #define sr_pause_resume ssr_pause_resume 985 #define sr_play_msf ssr_play_msf 986 #define sr_play_trkind ssr_play_trkind 987 #define sr_read_all_subcodes ssr_read_all_subcodes 988 #define sr_read_subchannel ssr_read_subchannel 989 #define sr_read_tocentry ssr_read_tocentry 990 #define sr_read_tochdr ssr_read_tochdr 991 #define sr_read_cdda ssr_read_cdda 992 #define sr_read_cdxa ssr_read_cdxa 993 #define sr_read_mode1 ssr_read_mode1 994 #define sr_read_mode2 ssr_read_mode2 995 #define sr_read_cd_mode2 ssr_read_cd_mode2 996 #define sr_sector_mode ssr_sector_mode 997 #define sr_eject ssr_eject 998 #define sr_ejected ssr_ejected 999 #define sr_check_wp ssr_check_wp 1000 #define sd_check_media ssd_check_media 1001 #define sd_media_watch_cb ssd_media_watch_cb 1002 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1003 #define sr_volume_ctrl ssr_volume_ctrl 1004 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1005 #define sd_log_page_supported ssd_log_page_supported 1006 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1007 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1008 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1009 #define sd_range_lock ssd_range_lock 1010 #define sd_get_range ssd_get_range 1011 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1012 #define sd_range_unlock ssd_range_unlock 1013 #define sd_read_modify_write_task ssd_read_modify_write_task 1014 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1015 1016 #define sd_iostart_chain ssd_iostart_chain 1017 #define sd_iodone_chain ssd_iodone_chain 1018 #define sd_initpkt_map ssd_initpkt_map 1019 #define sd_destroypkt_map ssd_destroypkt_map 1020 #define sd_chain_type_map ssd_chain_type_map 1021 #define sd_chain_index_map ssd_chain_index_map 1022 1023 #define sd_failfast_flushctl ssd_failfast_flushctl 1024 #define sd_failfast_flushq ssd_failfast_flushq 1025 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1026 1027 #define sd_is_lsi ssd_is_lsi 1028 1029 #endif /* #if (defined(__fibre)) */ 1030 1031 1032 int _init(void); 1033 int _fini(void); 1034 int _info(struct modinfo *modinfop); 1035 1036 /*PRINTFLIKE3*/ 1037 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1038 /*PRINTFLIKE3*/ 1039 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1040 /*PRINTFLIKE3*/ 1041 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1042 1043 static int sdprobe(dev_info_t *devi); 1044 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1045 void **result); 1046 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1047 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1048 1049 /* 1050 * Smart probe for parallel scsi 1051 */ 1052 static void sd_scsi_probe_cache_init(void); 1053 static void sd_scsi_probe_cache_fini(void); 1054 static void sd_scsi_clear_probe_cache(void); 1055 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1056 1057 static int sd_spin_up_unit(struct sd_lun *un); 1058 static void sd_enable_descr_sense(struct sd_lun *un); 1059 static void sd_set_mmc_caps(struct sd_lun *un); 1060 1061 static void sd_read_unit_properties(struct sd_lun *un); 1062 static int sd_process_sdconf_file(struct sd_lun *un); 1063 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1064 int *data_list, sd_tunables *values); 1065 static void sd_process_sdconf_table(struct sd_lun *un); 1066 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1067 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1068 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1069 int list_len, char *dataname_ptr); 1070 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1071 sd_tunables *prop_list); 1072 static int sd_validate_geometry(struct sd_lun *un, int path_flag); 1073 1074 #if defined(_SUNOS_VTOC_16) 1075 static void sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g); 1076 #endif 1077 1078 static void sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 1079 int path_flag); 1080 static int sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, 1081 int path_flag); 1082 static void sd_get_physical_geometry(struct sd_lun *un, 1083 struct geom_cache *pgeom_p, int capacity, int lbasize, int path_flag); 1084 static void sd_get_virtual_geometry(struct sd_lun *un, int capacity, 1085 int lbasize); 1086 static int sd_uselabel(struct sd_lun *un, struct dk_label *l, int path_flag); 1087 static void sd_swap_efi_gpt(efi_gpt_t *); 1088 static void sd_swap_efi_gpe(int nparts, efi_gpe_t *); 1089 static int sd_validate_efi(efi_gpt_t *); 1090 static int sd_use_efi(struct sd_lun *, int); 1091 static void sd_build_default_label(struct sd_lun *un); 1092 1093 #if defined(_FIRMWARE_NEEDS_FDISK) 1094 static int sd_has_max_chs_vals(struct ipart *fdp); 1095 #endif 1096 static void sd_inq_fill(char *p, int l, char *s); 1097 1098 1099 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1100 int reservation_flag); 1101 static daddr_t sd_get_devid_block(struct sd_lun *un); 1102 static int sd_get_devid(struct sd_lun *un); 1103 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1104 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1105 static int sd_write_deviceid(struct sd_lun *un); 1106 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1107 static int sd_check_vpd_page_support(struct sd_lun *un); 1108 1109 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1110 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1111 1112 static int sd_ddi_suspend(dev_info_t *devi); 1113 static int sd_ddi_pm_suspend(struct sd_lun *un); 1114 static int sd_ddi_resume(dev_info_t *devi); 1115 static int sd_ddi_pm_resume(struct sd_lun *un); 1116 static int sdpower(dev_info_t *devi, int component, int level); 1117 1118 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1119 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1120 static int sd_unit_attach(dev_info_t *devi); 1121 static int sd_unit_detach(dev_info_t *devi); 1122 1123 static int sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi); 1124 static void sd_create_errstats(struct sd_lun *un, int instance); 1125 static void sd_set_errstats(struct sd_lun *un); 1126 static void sd_set_pstats(struct sd_lun *un); 1127 1128 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1129 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1130 static int sd_send_polled_RQS(struct sd_lun *un); 1131 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1132 1133 #if (defined(__fibre)) 1134 /* 1135 * Event callbacks (photon) 1136 */ 1137 static void sd_init_event_callbacks(struct sd_lun *un); 1138 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1139 #endif 1140 1141 1142 static int sd_disable_caching(struct sd_lun *un); 1143 static dev_t sd_make_device(dev_info_t *devi); 1144 1145 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1146 uint64_t capacity); 1147 1148 /* 1149 * Driver entry point functions. 1150 */ 1151 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1152 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1153 static int sd_ready_and_valid(struct sd_lun *un); 1154 1155 static void sdmin(struct buf *bp); 1156 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1157 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1158 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1159 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1160 1161 static int sdstrategy(struct buf *bp); 1162 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1163 1164 /* 1165 * Function prototypes for layering functions in the iostart chain. 1166 */ 1167 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1168 struct buf *bp); 1169 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1170 struct buf *bp); 1171 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1172 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1173 struct buf *bp); 1174 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1175 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1176 1177 /* 1178 * Function prototypes for layering functions in the iodone chain. 1179 */ 1180 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1181 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1182 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1183 struct buf *bp); 1184 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1185 struct buf *bp); 1186 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1187 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1188 struct buf *bp); 1189 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1190 1191 /* 1192 * Prototypes for functions to support buf(9S) based IO. 1193 */ 1194 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1195 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1196 static void sd_destroypkt_for_buf(struct buf *); 1197 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1198 struct buf *bp, int flags, 1199 int (*callback)(caddr_t), caddr_t callback_arg, 1200 diskaddr_t lba, uint32_t blockcount); 1201 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1202 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1203 1204 /* 1205 * Prototypes for functions to support USCSI IO. 1206 */ 1207 static int sd_uscsi_strategy(struct buf *bp); 1208 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1209 static void sd_destroypkt_for_uscsi(struct buf *); 1210 1211 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1212 uchar_t chain_type, void *pktinfop); 1213 1214 static int sd_pm_entry(struct sd_lun *un); 1215 static void sd_pm_exit(struct sd_lun *un); 1216 1217 static void sd_pm_idletimeout_handler(void *arg); 1218 1219 /* 1220 * sd_core internal functions (used at the sd_core_io layer). 1221 */ 1222 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1223 static void sdintr(struct scsi_pkt *pktp); 1224 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1225 1226 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 1227 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 1228 int path_flag); 1229 1230 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1231 daddr_t blkno, int (*func)(struct buf *)); 1232 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1233 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1234 static void sd_bioclone_free(struct buf *bp); 1235 static void sd_shadow_buf_free(struct buf *bp); 1236 1237 static void sd_print_transport_rejected_message(struct sd_lun *un, 1238 struct sd_xbuf *xp, int code); 1239 1240 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1241 int retry_check_flag, 1242 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1243 int c), 1244 void *user_arg, int failure_code, clock_t retry_delay, 1245 void (*statp)(kstat_io_t *)); 1246 1247 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1248 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1249 1250 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1251 struct scsi_pkt *pktp); 1252 static void sd_start_retry_command(void *arg); 1253 static void sd_start_direct_priority_command(void *arg); 1254 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1255 int errcode); 1256 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1257 struct buf *bp, int errcode); 1258 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1259 static void sd_sync_with_callback(struct sd_lun *un); 1260 static int sdrunout(caddr_t arg); 1261 1262 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1263 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1264 1265 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1266 static void sd_restore_throttle(void *arg); 1267 1268 static void sd_init_cdb_limits(struct sd_lun *un); 1269 1270 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1271 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1272 1273 /* 1274 * Error handling functions 1275 */ 1276 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1277 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1278 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1279 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1280 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1281 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1282 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1283 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1284 1285 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1286 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1287 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1288 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1289 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1290 struct sd_xbuf *xp); 1291 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1292 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1293 1294 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1295 void *arg, int code); 1296 static diskaddr_t sd_extract_sense_info_descr( 1297 struct scsi_descr_sense_hdr *sdsp); 1298 1299 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1300 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1301 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1302 uint8_t asc, 1303 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1304 static void sd_sense_key_not_ready(struct sd_lun *un, 1305 uint8_t asc, uint8_t ascq, 1306 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1307 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1308 int sense_key, uint8_t asc, 1309 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1311 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1312 static void sd_sense_key_unit_attention(struct sd_lun *un, 1313 uint8_t asc, 1314 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1315 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1318 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1319 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 static void sd_sense_key_default(struct sd_lun *un, 1322 int sense_key, 1323 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1324 1325 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1326 void *arg, int flag); 1327 1328 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1343 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1344 1345 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1346 1347 static void sd_start_stop_unit_callback(void *arg); 1348 static void sd_start_stop_unit_task(void *arg); 1349 1350 static void sd_taskq_create(void); 1351 static void sd_taskq_delete(void); 1352 static void sd_media_change_task(void *arg); 1353 1354 static int sd_handle_mchange(struct sd_lun *un); 1355 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1356 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1357 uint32_t *lbap, int path_flag); 1358 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1359 uint32_t *lbap, int path_flag); 1360 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1361 int path_flag); 1362 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1363 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1364 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1365 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1366 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1367 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1368 uchar_t usr_cmd, uchar_t *usr_bufp); 1369 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un); 1370 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1371 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1372 uchar_t *bufaddr, uint_t buflen); 1373 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1374 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1375 uchar_t *bufaddr, uint_t buflen, char feature); 1376 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1377 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1378 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1379 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1380 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1381 size_t buflen, daddr_t start_block, int path_flag); 1382 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1383 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1384 path_flag) 1385 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1386 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1387 path_flag) 1388 1389 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1390 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1391 uint16_t param_ptr, int path_flag); 1392 1393 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1394 static void sd_free_rqs(struct sd_lun *un); 1395 1396 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1397 uchar_t *data, int len, int fmt); 1398 1399 /* 1400 * Disk Ioctl Function Prototypes 1401 */ 1402 static int sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag); 1403 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1404 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1405 static int sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, 1406 int geom_validated); 1407 static int sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag); 1408 static int sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, 1409 int geom_validated); 1410 static int sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag); 1411 static int sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, 1412 int geom_validated); 1413 static int sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag); 1414 static int sd_dkio_partition(dev_t dev, caddr_t arg, int flag); 1415 static void sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1416 static int sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag); 1417 static int sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag); 1418 static int sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1419 static int sd_write_label(dev_t dev); 1420 static int sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl); 1421 static void sd_clear_vtoc(struct sd_lun *un); 1422 static void sd_clear_efi(struct sd_lun *un); 1423 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1424 static int sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag); 1425 static int sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag); 1426 static void sd_setup_default_geometry(struct sd_lun *un); 1427 #if defined(__i386) || defined(__amd64) 1428 static int sd_update_fdisk_and_vtoc(struct sd_lun *un); 1429 #endif 1430 1431 /* 1432 * Multi-host Ioctl Prototypes 1433 */ 1434 static int sd_check_mhd(dev_t dev, int interval); 1435 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1436 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1437 static char *sd_sname(uchar_t status); 1438 static void sd_mhd_resvd_recover(void *arg); 1439 static void sd_resv_reclaim_thread(); 1440 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1441 static int sd_reserve_release(dev_t dev, int cmd); 1442 static void sd_rmv_resv_reclaim_req(dev_t dev); 1443 static void sd_mhd_reset_notify_cb(caddr_t arg); 1444 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1445 mhioc_inkeys_t *usrp, int flag); 1446 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1447 mhioc_inresvs_t *usrp, int flag); 1448 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1449 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1450 static int sd_mhdioc_release(dev_t dev); 1451 static int sd_mhdioc_register_devid(dev_t dev); 1452 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1453 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1454 1455 /* 1456 * SCSI removable prototypes 1457 */ 1458 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1459 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1460 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1461 static int sr_pause_resume(dev_t dev, int mode); 1462 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1463 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1464 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1465 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1466 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1467 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1468 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1469 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1470 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1471 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1472 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1473 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1474 static int sr_eject(dev_t dev); 1475 static void sr_ejected(register struct sd_lun *un); 1476 static int sr_check_wp(dev_t dev); 1477 static int sd_check_media(dev_t dev, enum dkio_state state); 1478 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1479 static void sd_delayed_cv_broadcast(void *arg); 1480 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1482 1483 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1484 1485 /* 1486 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1487 */ 1488 static void sd_check_for_writable_cd(struct sd_lun *un); 1489 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1490 static void sd_wm_cache_destructor(void *wm, void *un); 1491 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1492 daddr_t endb, ushort_t typ); 1493 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1494 daddr_t endb); 1495 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1496 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1497 static void sd_read_modify_write_task(void * arg); 1498 static int 1499 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1500 struct buf **bpp); 1501 1502 1503 /* 1504 * Function prototypes for failfast support. 1505 */ 1506 static void sd_failfast_flushq(struct sd_lun *un); 1507 static int sd_failfast_flushq_callback(struct buf *bp); 1508 1509 /* 1510 * Function prototypes to check for lsi devices 1511 */ 1512 static void sd_is_lsi(struct sd_lun *un); 1513 1514 /* 1515 * Function prototypes for x86 support 1516 */ 1517 #if defined(__i386) || defined(__amd64) 1518 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1519 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1520 #endif 1521 1522 /* 1523 * Constants for failfast support: 1524 * 1525 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1526 * failfast processing being performed. 1527 * 1528 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1529 * failfast processing on all bufs with B_FAILFAST set. 1530 */ 1531 1532 #define SD_FAILFAST_INACTIVE 0 1533 #define SD_FAILFAST_ACTIVE 1 1534 1535 /* 1536 * Bitmask to control behavior of buf(9S) flushes when a transition to 1537 * the failfast state occurs. Optional bits include: 1538 * 1539 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1540 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1541 * be flushed. 1542 * 1543 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1544 * driver, in addition to the regular wait queue. This includes the xbuf 1545 * queues. When clear, only the driver's wait queue will be flushed. 1546 */ 1547 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1548 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1549 1550 /* 1551 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1552 * to flush all queues within the driver. 1553 */ 1554 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1555 1556 1557 /* 1558 * SD Testing Fault Injection 1559 */ 1560 #ifdef SD_FAULT_INJECTION 1561 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1562 static void sd_faultinjection(struct scsi_pkt *pktp); 1563 static void sd_injection_log(char *buf, struct sd_lun *un); 1564 #endif 1565 1566 /* 1567 * Device driver ops vector 1568 */ 1569 static struct cb_ops sd_cb_ops = { 1570 sdopen, /* open */ 1571 sdclose, /* close */ 1572 sdstrategy, /* strategy */ 1573 nodev, /* print */ 1574 sddump, /* dump */ 1575 sdread, /* read */ 1576 sdwrite, /* write */ 1577 sdioctl, /* ioctl */ 1578 nodev, /* devmap */ 1579 nodev, /* mmap */ 1580 nodev, /* segmap */ 1581 nochpoll, /* poll */ 1582 sd_prop_op, /* cb_prop_op */ 1583 0, /* streamtab */ 1584 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1585 CB_REV, /* cb_rev */ 1586 sdaread, /* async I/O read entry point */ 1587 sdawrite /* async I/O write entry point */ 1588 }; 1589 1590 static struct dev_ops sd_ops = { 1591 DEVO_REV, /* devo_rev, */ 1592 0, /* refcnt */ 1593 sdinfo, /* info */ 1594 nulldev, /* identify */ 1595 sdprobe, /* probe */ 1596 sdattach, /* attach */ 1597 sddetach, /* detach */ 1598 nodev, /* reset */ 1599 &sd_cb_ops, /* driver operations */ 1600 NULL, /* bus operations */ 1601 sdpower /* power */ 1602 }; 1603 1604 1605 /* 1606 * This is the loadable module wrapper. 1607 */ 1608 #include <sys/modctl.h> 1609 1610 static struct modldrv modldrv = { 1611 &mod_driverops, /* Type of module. This one is a driver */ 1612 SD_MODULE_NAME, /* Module name. */ 1613 &sd_ops /* driver ops */ 1614 }; 1615 1616 1617 static struct modlinkage modlinkage = { 1618 MODREV_1, 1619 &modldrv, 1620 NULL 1621 }; 1622 1623 1624 static struct scsi_asq_key_strings sd_additional_codes[] = { 1625 0x81, 0, "Logical Unit is Reserved", 1626 0x85, 0, "Audio Address Not Valid", 1627 0xb6, 0, "Media Load Mechanism Failed", 1628 0xB9, 0, "Audio Play Operation Aborted", 1629 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1630 0x53, 2, "Medium removal prevented", 1631 0x6f, 0, "Authentication failed during key exchange", 1632 0x6f, 1, "Key not present", 1633 0x6f, 2, "Key not established", 1634 0x6f, 3, "Read without proper authentication", 1635 0x6f, 4, "Mismatched region to this logical unit", 1636 0x6f, 5, "Region reset count error", 1637 0xffff, 0x0, NULL 1638 }; 1639 1640 1641 /* 1642 * Struct for passing printing information for sense data messages 1643 */ 1644 struct sd_sense_info { 1645 int ssi_severity; 1646 int ssi_pfa_flag; 1647 }; 1648 1649 /* 1650 * Table of function pointers for iostart-side routines. Seperate "chains" 1651 * of layered function calls are formed by placing the function pointers 1652 * sequentially in the desired order. Functions are called according to an 1653 * incrementing table index ordering. The last function in each chain must 1654 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1655 * in the sd_iodone_chain[] array. 1656 * 1657 * Note: It may seem more natural to organize both the iostart and iodone 1658 * functions together, into an array of structures (or some similar 1659 * organization) with a common index, rather than two seperate arrays which 1660 * must be maintained in synchronization. The purpose of this division is 1661 * to achiece improved performance: individual arrays allows for more 1662 * effective cache line utilization on certain platforms. 1663 */ 1664 1665 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1666 1667 1668 static sd_chain_t sd_iostart_chain[] = { 1669 1670 /* Chain for buf IO for disk drive targets (PM enabled) */ 1671 sd_mapblockaddr_iostart, /* Index: 0 */ 1672 sd_pm_iostart, /* Index: 1 */ 1673 sd_core_iostart, /* Index: 2 */ 1674 1675 /* Chain for buf IO for disk drive targets (PM disabled) */ 1676 sd_mapblockaddr_iostart, /* Index: 3 */ 1677 sd_core_iostart, /* Index: 4 */ 1678 1679 /* Chain for buf IO for removable-media targets (PM enabled) */ 1680 sd_mapblockaddr_iostart, /* Index: 5 */ 1681 sd_mapblocksize_iostart, /* Index: 6 */ 1682 sd_pm_iostart, /* Index: 7 */ 1683 sd_core_iostart, /* Index: 8 */ 1684 1685 /* Chain for buf IO for removable-media targets (PM disabled) */ 1686 sd_mapblockaddr_iostart, /* Index: 9 */ 1687 sd_mapblocksize_iostart, /* Index: 10 */ 1688 sd_core_iostart, /* Index: 11 */ 1689 1690 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1691 sd_mapblockaddr_iostart, /* Index: 12 */ 1692 sd_checksum_iostart, /* Index: 13 */ 1693 sd_pm_iostart, /* Index: 14 */ 1694 sd_core_iostart, /* Index: 15 */ 1695 1696 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1697 sd_mapblockaddr_iostart, /* Index: 16 */ 1698 sd_checksum_iostart, /* Index: 17 */ 1699 sd_core_iostart, /* Index: 18 */ 1700 1701 /* Chain for USCSI commands (all targets) */ 1702 sd_pm_iostart, /* Index: 19 */ 1703 sd_core_iostart, /* Index: 20 */ 1704 1705 /* Chain for checksumming USCSI commands (all targets) */ 1706 sd_checksum_uscsi_iostart, /* Index: 21 */ 1707 sd_pm_iostart, /* Index: 22 */ 1708 sd_core_iostart, /* Index: 23 */ 1709 1710 /* Chain for "direct" USCSI commands (all targets) */ 1711 sd_core_iostart, /* Index: 24 */ 1712 1713 /* Chain for "direct priority" USCSI commands (all targets) */ 1714 sd_core_iostart, /* Index: 25 */ 1715 }; 1716 1717 /* 1718 * Macros to locate the first function of each iostart chain in the 1719 * sd_iostart_chain[] array. These are located by the index in the array. 1720 */ 1721 #define SD_CHAIN_DISK_IOSTART 0 1722 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1723 #define SD_CHAIN_RMMEDIA_IOSTART 5 1724 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1725 #define SD_CHAIN_CHKSUM_IOSTART 12 1726 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1727 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1728 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1729 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1730 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1731 1732 1733 /* 1734 * Table of function pointers for the iodone-side routines for the driver- 1735 * internal layering mechanism. The calling sequence for iodone routines 1736 * uses a decrementing table index, so the last routine called in a chain 1737 * must be at the lowest array index location for that chain. The last 1738 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1739 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1740 * of the functions in an iodone side chain must correspond to the ordering 1741 * of the iostart routines for that chain. Note that there is no iodone 1742 * side routine that corresponds to sd_core_iostart(), so there is no 1743 * entry in the table for this. 1744 */ 1745 1746 static sd_chain_t sd_iodone_chain[] = { 1747 1748 /* Chain for buf IO for disk drive targets (PM enabled) */ 1749 sd_buf_iodone, /* Index: 0 */ 1750 sd_mapblockaddr_iodone, /* Index: 1 */ 1751 sd_pm_iodone, /* Index: 2 */ 1752 1753 /* Chain for buf IO for disk drive targets (PM disabled) */ 1754 sd_buf_iodone, /* Index: 3 */ 1755 sd_mapblockaddr_iodone, /* Index: 4 */ 1756 1757 /* Chain for buf IO for removable-media targets (PM enabled) */ 1758 sd_buf_iodone, /* Index: 5 */ 1759 sd_mapblockaddr_iodone, /* Index: 6 */ 1760 sd_mapblocksize_iodone, /* Index: 7 */ 1761 sd_pm_iodone, /* Index: 8 */ 1762 1763 /* Chain for buf IO for removable-media targets (PM disabled) */ 1764 sd_buf_iodone, /* Index: 9 */ 1765 sd_mapblockaddr_iodone, /* Index: 10 */ 1766 sd_mapblocksize_iodone, /* Index: 11 */ 1767 1768 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1769 sd_buf_iodone, /* Index: 12 */ 1770 sd_mapblockaddr_iodone, /* Index: 13 */ 1771 sd_checksum_iodone, /* Index: 14 */ 1772 sd_pm_iodone, /* Index: 15 */ 1773 1774 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1775 sd_buf_iodone, /* Index: 16 */ 1776 sd_mapblockaddr_iodone, /* Index: 17 */ 1777 sd_checksum_iodone, /* Index: 18 */ 1778 1779 /* Chain for USCSI commands (non-checksum targets) */ 1780 sd_uscsi_iodone, /* Index: 19 */ 1781 sd_pm_iodone, /* Index: 20 */ 1782 1783 /* Chain for USCSI commands (checksum targets) */ 1784 sd_uscsi_iodone, /* Index: 21 */ 1785 sd_checksum_uscsi_iodone, /* Index: 22 */ 1786 sd_pm_iodone, /* Index: 22 */ 1787 1788 /* Chain for "direct" USCSI commands (all targets) */ 1789 sd_uscsi_iodone, /* Index: 24 */ 1790 1791 /* Chain for "direct priority" USCSI commands (all targets) */ 1792 sd_uscsi_iodone, /* Index: 25 */ 1793 }; 1794 1795 1796 /* 1797 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1798 * each iodone-side chain. These are located by the array index, but as the 1799 * iodone side functions are called in a decrementing-index order, the 1800 * highest index number in each chain must be specified (as these correspond 1801 * to the first function in the iodone chain that will be called by the core 1802 * at IO completion time). 1803 */ 1804 1805 #define SD_CHAIN_DISK_IODONE 2 1806 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1807 #define SD_CHAIN_RMMEDIA_IODONE 8 1808 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1809 #define SD_CHAIN_CHKSUM_IODONE 15 1810 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1811 #define SD_CHAIN_USCSI_CMD_IODONE 20 1812 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1813 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1814 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1815 1816 1817 1818 1819 /* 1820 * Array to map a layering chain index to the appropriate initpkt routine. 1821 * The redundant entries are present so that the index used for accessing 1822 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1823 * with this table as well. 1824 */ 1825 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1826 1827 static sd_initpkt_t sd_initpkt_map[] = { 1828 1829 /* Chain for buf IO for disk drive targets (PM enabled) */ 1830 sd_initpkt_for_buf, /* Index: 0 */ 1831 sd_initpkt_for_buf, /* Index: 1 */ 1832 sd_initpkt_for_buf, /* Index: 2 */ 1833 1834 /* Chain for buf IO for disk drive targets (PM disabled) */ 1835 sd_initpkt_for_buf, /* Index: 3 */ 1836 sd_initpkt_for_buf, /* Index: 4 */ 1837 1838 /* Chain for buf IO for removable-media targets (PM enabled) */ 1839 sd_initpkt_for_buf, /* Index: 5 */ 1840 sd_initpkt_for_buf, /* Index: 6 */ 1841 sd_initpkt_for_buf, /* Index: 7 */ 1842 sd_initpkt_for_buf, /* Index: 8 */ 1843 1844 /* Chain for buf IO for removable-media targets (PM disabled) */ 1845 sd_initpkt_for_buf, /* Index: 9 */ 1846 sd_initpkt_for_buf, /* Index: 10 */ 1847 sd_initpkt_for_buf, /* Index: 11 */ 1848 1849 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1850 sd_initpkt_for_buf, /* Index: 12 */ 1851 sd_initpkt_for_buf, /* Index: 13 */ 1852 sd_initpkt_for_buf, /* Index: 14 */ 1853 sd_initpkt_for_buf, /* Index: 15 */ 1854 1855 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1856 sd_initpkt_for_buf, /* Index: 16 */ 1857 sd_initpkt_for_buf, /* Index: 17 */ 1858 sd_initpkt_for_buf, /* Index: 18 */ 1859 1860 /* Chain for USCSI commands (non-checksum targets) */ 1861 sd_initpkt_for_uscsi, /* Index: 19 */ 1862 sd_initpkt_for_uscsi, /* Index: 20 */ 1863 1864 /* Chain for USCSI commands (checksum targets) */ 1865 sd_initpkt_for_uscsi, /* Index: 21 */ 1866 sd_initpkt_for_uscsi, /* Index: 22 */ 1867 sd_initpkt_for_uscsi, /* Index: 22 */ 1868 1869 /* Chain for "direct" USCSI commands (all targets) */ 1870 sd_initpkt_for_uscsi, /* Index: 24 */ 1871 1872 /* Chain for "direct priority" USCSI commands (all targets) */ 1873 sd_initpkt_for_uscsi, /* Index: 25 */ 1874 1875 }; 1876 1877 1878 /* 1879 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1880 * The redundant entries are present so that the index used for accessing 1881 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1882 * with this table as well. 1883 */ 1884 typedef void (*sd_destroypkt_t)(struct buf *); 1885 1886 static sd_destroypkt_t sd_destroypkt_map[] = { 1887 1888 /* Chain for buf IO for disk drive targets (PM enabled) */ 1889 sd_destroypkt_for_buf, /* Index: 0 */ 1890 sd_destroypkt_for_buf, /* Index: 1 */ 1891 sd_destroypkt_for_buf, /* Index: 2 */ 1892 1893 /* Chain for buf IO for disk drive targets (PM disabled) */ 1894 sd_destroypkt_for_buf, /* Index: 3 */ 1895 sd_destroypkt_for_buf, /* Index: 4 */ 1896 1897 /* Chain for buf IO for removable-media targets (PM enabled) */ 1898 sd_destroypkt_for_buf, /* Index: 5 */ 1899 sd_destroypkt_for_buf, /* Index: 6 */ 1900 sd_destroypkt_for_buf, /* Index: 7 */ 1901 sd_destroypkt_for_buf, /* Index: 8 */ 1902 1903 /* Chain for buf IO for removable-media targets (PM disabled) */ 1904 sd_destroypkt_for_buf, /* Index: 9 */ 1905 sd_destroypkt_for_buf, /* Index: 10 */ 1906 sd_destroypkt_for_buf, /* Index: 11 */ 1907 1908 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1909 sd_destroypkt_for_buf, /* Index: 12 */ 1910 sd_destroypkt_for_buf, /* Index: 13 */ 1911 sd_destroypkt_for_buf, /* Index: 14 */ 1912 sd_destroypkt_for_buf, /* Index: 15 */ 1913 1914 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1915 sd_destroypkt_for_buf, /* Index: 16 */ 1916 sd_destroypkt_for_buf, /* Index: 17 */ 1917 sd_destroypkt_for_buf, /* Index: 18 */ 1918 1919 /* Chain for USCSI commands (non-checksum targets) */ 1920 sd_destroypkt_for_uscsi, /* Index: 19 */ 1921 sd_destroypkt_for_uscsi, /* Index: 20 */ 1922 1923 /* Chain for USCSI commands (checksum targets) */ 1924 sd_destroypkt_for_uscsi, /* Index: 21 */ 1925 sd_destroypkt_for_uscsi, /* Index: 22 */ 1926 sd_destroypkt_for_uscsi, /* Index: 22 */ 1927 1928 /* Chain for "direct" USCSI commands (all targets) */ 1929 sd_destroypkt_for_uscsi, /* Index: 24 */ 1930 1931 /* Chain for "direct priority" USCSI commands (all targets) */ 1932 sd_destroypkt_for_uscsi, /* Index: 25 */ 1933 1934 }; 1935 1936 1937 1938 /* 1939 * Array to map a layering chain index to the appropriate chain "type". 1940 * The chain type indicates a specific property/usage of the chain. 1941 * The redundant entries are present so that the index used for accessing 1942 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1943 * with this table as well. 1944 */ 1945 1946 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1947 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1948 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1949 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1950 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1951 /* (for error recovery) */ 1952 1953 static int sd_chain_type_map[] = { 1954 1955 /* Chain for buf IO for disk drive targets (PM enabled) */ 1956 SD_CHAIN_BUFIO, /* Index: 0 */ 1957 SD_CHAIN_BUFIO, /* Index: 1 */ 1958 SD_CHAIN_BUFIO, /* Index: 2 */ 1959 1960 /* Chain for buf IO for disk drive targets (PM disabled) */ 1961 SD_CHAIN_BUFIO, /* Index: 3 */ 1962 SD_CHAIN_BUFIO, /* Index: 4 */ 1963 1964 /* Chain for buf IO for removable-media targets (PM enabled) */ 1965 SD_CHAIN_BUFIO, /* Index: 5 */ 1966 SD_CHAIN_BUFIO, /* Index: 6 */ 1967 SD_CHAIN_BUFIO, /* Index: 7 */ 1968 SD_CHAIN_BUFIO, /* Index: 8 */ 1969 1970 /* Chain for buf IO for removable-media targets (PM disabled) */ 1971 SD_CHAIN_BUFIO, /* Index: 9 */ 1972 SD_CHAIN_BUFIO, /* Index: 10 */ 1973 SD_CHAIN_BUFIO, /* Index: 11 */ 1974 1975 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1976 SD_CHAIN_BUFIO, /* Index: 12 */ 1977 SD_CHAIN_BUFIO, /* Index: 13 */ 1978 SD_CHAIN_BUFIO, /* Index: 14 */ 1979 SD_CHAIN_BUFIO, /* Index: 15 */ 1980 1981 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1982 SD_CHAIN_BUFIO, /* Index: 16 */ 1983 SD_CHAIN_BUFIO, /* Index: 17 */ 1984 SD_CHAIN_BUFIO, /* Index: 18 */ 1985 1986 /* Chain for USCSI commands (non-checksum targets) */ 1987 SD_CHAIN_USCSI, /* Index: 19 */ 1988 SD_CHAIN_USCSI, /* Index: 20 */ 1989 1990 /* Chain for USCSI commands (checksum targets) */ 1991 SD_CHAIN_USCSI, /* Index: 21 */ 1992 SD_CHAIN_USCSI, /* Index: 22 */ 1993 SD_CHAIN_USCSI, /* Index: 22 */ 1994 1995 /* Chain for "direct" USCSI commands (all targets) */ 1996 SD_CHAIN_DIRECT, /* Index: 24 */ 1997 1998 /* Chain for "direct priority" USCSI commands (all targets) */ 1999 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2000 }; 2001 2002 2003 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2004 #define SD_IS_BUFIO(xp) \ 2005 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2006 2007 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2008 #define SD_IS_DIRECT_PRIORITY(xp) \ 2009 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2010 2011 2012 2013 /* 2014 * Struct, array, and macros to map a specific chain to the appropriate 2015 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2016 * 2017 * The sd_chain_index_map[] array is used at attach time to set the various 2018 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2019 * chain to be used with the instance. This allows different instances to use 2020 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2021 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2022 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2023 * dynamically & without the use of locking; and (2) a layer may update the 2024 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2025 * to allow for deferred processing of an IO within the same chain from a 2026 * different execution context. 2027 */ 2028 2029 struct sd_chain_index { 2030 int sci_iostart_index; 2031 int sci_iodone_index; 2032 }; 2033 2034 static struct sd_chain_index sd_chain_index_map[] = { 2035 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2036 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2037 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2038 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2039 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2040 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2041 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2042 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2043 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2044 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2045 }; 2046 2047 2048 /* 2049 * The following are indexes into the sd_chain_index_map[] array. 2050 */ 2051 2052 /* un->un_buf_chain_type must be set to one of these */ 2053 #define SD_CHAIN_INFO_DISK 0 2054 #define SD_CHAIN_INFO_DISK_NO_PM 1 2055 #define SD_CHAIN_INFO_RMMEDIA 2 2056 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2057 #define SD_CHAIN_INFO_CHKSUM 4 2058 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2059 2060 /* un->un_uscsi_chain_type must be set to one of these */ 2061 #define SD_CHAIN_INFO_USCSI_CMD 6 2062 /* USCSI with PM disabled is the same as DIRECT */ 2063 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2064 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2065 2066 /* un->un_direct_chain_type must be set to one of these */ 2067 #define SD_CHAIN_INFO_DIRECT_CMD 8 2068 2069 /* un->un_priority_chain_type must be set to one of these */ 2070 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2071 2072 /* size for devid inquiries */ 2073 #define MAX_INQUIRY_SIZE 0xF0 2074 2075 /* 2076 * Macros used by functions to pass a given buf(9S) struct along to the 2077 * next function in the layering chain for further processing. 2078 * 2079 * In the following macros, passing more than three arguments to the called 2080 * routines causes the optimizer for the SPARC compiler to stop doing tail 2081 * call elimination which results in significant performance degradation. 2082 */ 2083 #define SD_BEGIN_IOSTART(index, un, bp) \ 2084 ((*(sd_iostart_chain[index]))(index, un, bp)) 2085 2086 #define SD_BEGIN_IODONE(index, un, bp) \ 2087 ((*(sd_iodone_chain[index]))(index, un, bp)) 2088 2089 #define SD_NEXT_IOSTART(index, un, bp) \ 2090 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2091 2092 #define SD_NEXT_IODONE(index, un, bp) \ 2093 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2094 2095 2096 /* 2097 * Function: _init 2098 * 2099 * Description: This is the driver _init(9E) entry point. 2100 * 2101 * Return Code: Returns the value from mod_install(9F) or 2102 * ddi_soft_state_init(9F) as appropriate. 2103 * 2104 * Context: Called when driver module loaded. 2105 */ 2106 2107 int 2108 _init(void) 2109 { 2110 int err; 2111 2112 /* establish driver name from module name */ 2113 sd_label = mod_modname(&modlinkage); 2114 2115 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2116 SD_MAXUNIT); 2117 2118 if (err != 0) { 2119 return (err); 2120 } 2121 2122 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2123 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2124 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2125 2126 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2127 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2128 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2129 2130 /* 2131 * it's ok to init here even for fibre device 2132 */ 2133 sd_scsi_probe_cache_init(); 2134 2135 /* 2136 * Creating taskq before mod_install ensures that all callers (threads) 2137 * that enter the module after a successfull mod_install encounter 2138 * a valid taskq. 2139 */ 2140 sd_taskq_create(); 2141 2142 err = mod_install(&modlinkage); 2143 if (err != 0) { 2144 /* delete taskq if install fails */ 2145 sd_taskq_delete(); 2146 2147 mutex_destroy(&sd_detach_mutex); 2148 mutex_destroy(&sd_log_mutex); 2149 mutex_destroy(&sd_label_mutex); 2150 2151 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2152 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2153 cv_destroy(&sd_tr.srq_inprocess_cv); 2154 2155 sd_scsi_probe_cache_fini(); 2156 2157 ddi_soft_state_fini(&sd_state); 2158 return (err); 2159 } 2160 2161 return (err); 2162 } 2163 2164 2165 /* 2166 * Function: _fini 2167 * 2168 * Description: This is the driver _fini(9E) entry point. 2169 * 2170 * Return Code: Returns the value from mod_remove(9F) 2171 * 2172 * Context: Called when driver module is unloaded. 2173 */ 2174 2175 int 2176 _fini(void) 2177 { 2178 int err; 2179 2180 if ((err = mod_remove(&modlinkage)) != 0) { 2181 return (err); 2182 } 2183 2184 sd_taskq_delete(); 2185 2186 mutex_destroy(&sd_detach_mutex); 2187 mutex_destroy(&sd_log_mutex); 2188 mutex_destroy(&sd_label_mutex); 2189 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2190 2191 sd_scsi_probe_cache_fini(); 2192 2193 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2194 cv_destroy(&sd_tr.srq_inprocess_cv); 2195 2196 ddi_soft_state_fini(&sd_state); 2197 2198 return (err); 2199 } 2200 2201 2202 /* 2203 * Function: _info 2204 * 2205 * Description: This is the driver _info(9E) entry point. 2206 * 2207 * Arguments: modinfop - pointer to the driver modinfo structure 2208 * 2209 * Return Code: Returns the value from mod_info(9F). 2210 * 2211 * Context: Kernel thread context 2212 */ 2213 2214 int 2215 _info(struct modinfo *modinfop) 2216 { 2217 return (mod_info(&modlinkage, modinfop)); 2218 } 2219 2220 2221 /* 2222 * The following routines implement the driver message logging facility. 2223 * They provide component- and level- based debug output filtering. 2224 * Output may also be restricted to messages for a single instance by 2225 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2226 * to NULL, then messages for all instances are printed. 2227 * 2228 * These routines have been cloned from each other due to the language 2229 * constraints of macros and variable argument list processing. 2230 */ 2231 2232 2233 /* 2234 * Function: sd_log_err 2235 * 2236 * Description: This routine is called by the SD_ERROR macro for debug 2237 * logging of error conditions. 2238 * 2239 * Arguments: comp - driver component being logged 2240 * dev - pointer to driver info structure 2241 * fmt - error string and format to be logged 2242 */ 2243 2244 static void 2245 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2246 { 2247 va_list ap; 2248 dev_info_t *dev; 2249 2250 ASSERT(un != NULL); 2251 dev = SD_DEVINFO(un); 2252 ASSERT(dev != NULL); 2253 2254 /* 2255 * Filter messages based on the global component and level masks. 2256 * Also print if un matches the value of sd_debug_un, or if 2257 * sd_debug_un is set to NULL. 2258 */ 2259 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2260 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2261 mutex_enter(&sd_log_mutex); 2262 va_start(ap, fmt); 2263 (void) vsprintf(sd_log_buf, fmt, ap); 2264 va_end(ap); 2265 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2266 mutex_exit(&sd_log_mutex); 2267 } 2268 #ifdef SD_FAULT_INJECTION 2269 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2270 if (un->sd_injection_mask & comp) { 2271 mutex_enter(&sd_log_mutex); 2272 va_start(ap, fmt); 2273 (void) vsprintf(sd_log_buf, fmt, ap); 2274 va_end(ap); 2275 sd_injection_log(sd_log_buf, un); 2276 mutex_exit(&sd_log_mutex); 2277 } 2278 #endif 2279 } 2280 2281 2282 /* 2283 * Function: sd_log_info 2284 * 2285 * Description: This routine is called by the SD_INFO macro for debug 2286 * logging of general purpose informational conditions. 2287 * 2288 * Arguments: comp - driver component being logged 2289 * dev - pointer to driver info structure 2290 * fmt - info string and format to be logged 2291 */ 2292 2293 static void 2294 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2295 { 2296 va_list ap; 2297 dev_info_t *dev; 2298 2299 ASSERT(un != NULL); 2300 dev = SD_DEVINFO(un); 2301 ASSERT(dev != NULL); 2302 2303 /* 2304 * Filter messages based on the global component and level masks. 2305 * Also print if un matches the value of sd_debug_un, or if 2306 * sd_debug_un is set to NULL. 2307 */ 2308 if ((sd_component_mask & component) && 2309 (sd_level_mask & SD_LOGMASK_INFO) && 2310 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2311 mutex_enter(&sd_log_mutex); 2312 va_start(ap, fmt); 2313 (void) vsprintf(sd_log_buf, fmt, ap); 2314 va_end(ap); 2315 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2316 mutex_exit(&sd_log_mutex); 2317 } 2318 #ifdef SD_FAULT_INJECTION 2319 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2320 if (un->sd_injection_mask & component) { 2321 mutex_enter(&sd_log_mutex); 2322 va_start(ap, fmt); 2323 (void) vsprintf(sd_log_buf, fmt, ap); 2324 va_end(ap); 2325 sd_injection_log(sd_log_buf, un); 2326 mutex_exit(&sd_log_mutex); 2327 } 2328 #endif 2329 } 2330 2331 2332 /* 2333 * Function: sd_log_trace 2334 * 2335 * Description: This routine is called by the SD_TRACE macro for debug 2336 * logging of trace conditions (i.e. function entry/exit). 2337 * 2338 * Arguments: comp - driver component being logged 2339 * dev - pointer to driver info structure 2340 * fmt - trace string and format to be logged 2341 */ 2342 2343 static void 2344 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2345 { 2346 va_list ap; 2347 dev_info_t *dev; 2348 2349 ASSERT(un != NULL); 2350 dev = SD_DEVINFO(un); 2351 ASSERT(dev != NULL); 2352 2353 /* 2354 * Filter messages based on the global component and level masks. 2355 * Also print if un matches the value of sd_debug_un, or if 2356 * sd_debug_un is set to NULL. 2357 */ 2358 if ((sd_component_mask & component) && 2359 (sd_level_mask & SD_LOGMASK_TRACE) && 2360 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2361 mutex_enter(&sd_log_mutex); 2362 va_start(ap, fmt); 2363 (void) vsprintf(sd_log_buf, fmt, ap); 2364 va_end(ap); 2365 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2366 mutex_exit(&sd_log_mutex); 2367 } 2368 #ifdef SD_FAULT_INJECTION 2369 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2370 if (un->sd_injection_mask & component) { 2371 mutex_enter(&sd_log_mutex); 2372 va_start(ap, fmt); 2373 (void) vsprintf(sd_log_buf, fmt, ap); 2374 va_end(ap); 2375 sd_injection_log(sd_log_buf, un); 2376 mutex_exit(&sd_log_mutex); 2377 } 2378 #endif 2379 } 2380 2381 2382 /* 2383 * Function: sdprobe 2384 * 2385 * Description: This is the driver probe(9e) entry point function. 2386 * 2387 * Arguments: devi - opaque device info handle 2388 * 2389 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2390 * DDI_PROBE_FAILURE: If the probe failed. 2391 * DDI_PROBE_PARTIAL: If the instance is not present now, 2392 * but may be present in the future. 2393 */ 2394 2395 static int 2396 sdprobe(dev_info_t *devi) 2397 { 2398 struct scsi_device *devp; 2399 int rval; 2400 int instance; 2401 2402 /* 2403 * if it wasn't for pln, sdprobe could actually be nulldev 2404 * in the "__fibre" case. 2405 */ 2406 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2407 return (DDI_PROBE_DONTCARE); 2408 } 2409 2410 devp = ddi_get_driver_private(devi); 2411 2412 if (devp == NULL) { 2413 /* Ooops... nexus driver is mis-configured... */ 2414 return (DDI_PROBE_FAILURE); 2415 } 2416 2417 instance = ddi_get_instance(devi); 2418 2419 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2420 return (DDI_PROBE_PARTIAL); 2421 } 2422 2423 /* 2424 * Call the SCSA utility probe routine to see if we actually 2425 * have a target at this SCSI nexus. 2426 */ 2427 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2428 case SCSIPROBE_EXISTS: 2429 switch (devp->sd_inq->inq_dtype) { 2430 case DTYPE_DIRECT: 2431 rval = DDI_PROBE_SUCCESS; 2432 break; 2433 case DTYPE_RODIRECT: 2434 /* CDs etc. Can be removable media */ 2435 rval = DDI_PROBE_SUCCESS; 2436 break; 2437 case DTYPE_OPTICAL: 2438 /* 2439 * Rewritable optical driver HP115AA 2440 * Can also be removable media 2441 */ 2442 2443 /* 2444 * Do not attempt to bind to DTYPE_OPTICAL if 2445 * pre solaris 9 sparc sd behavior is required 2446 * 2447 * If first time through and sd_dtype_optical_bind 2448 * has not been set in /etc/system check properties 2449 */ 2450 2451 if (sd_dtype_optical_bind < 0) { 2452 sd_dtype_optical_bind = ddi_prop_get_int 2453 (DDI_DEV_T_ANY, devi, 0, 2454 "optical-device-bind", 1); 2455 } 2456 2457 if (sd_dtype_optical_bind == 0) { 2458 rval = DDI_PROBE_FAILURE; 2459 } else { 2460 rval = DDI_PROBE_SUCCESS; 2461 } 2462 break; 2463 2464 case DTYPE_NOTPRESENT: 2465 default: 2466 rval = DDI_PROBE_FAILURE; 2467 break; 2468 } 2469 break; 2470 default: 2471 rval = DDI_PROBE_PARTIAL; 2472 break; 2473 } 2474 2475 /* 2476 * This routine checks for resource allocation prior to freeing, 2477 * so it will take care of the "smart probing" case where a 2478 * scsi_probe() may or may not have been issued and will *not* 2479 * free previously-freed resources. 2480 */ 2481 scsi_unprobe(devp); 2482 return (rval); 2483 } 2484 2485 2486 /* 2487 * Function: sdinfo 2488 * 2489 * Description: This is the driver getinfo(9e) entry point function. 2490 * Given the device number, return the devinfo pointer from 2491 * the scsi_device structure or the instance number 2492 * associated with the dev_t. 2493 * 2494 * Arguments: dip - pointer to device info structure 2495 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2496 * DDI_INFO_DEVT2INSTANCE) 2497 * arg - driver dev_t 2498 * resultp - user buffer for request response 2499 * 2500 * Return Code: DDI_SUCCESS 2501 * DDI_FAILURE 2502 */ 2503 /* ARGSUSED */ 2504 static int 2505 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2506 { 2507 struct sd_lun *un; 2508 dev_t dev; 2509 int instance; 2510 int error; 2511 2512 switch (infocmd) { 2513 case DDI_INFO_DEVT2DEVINFO: 2514 dev = (dev_t)arg; 2515 instance = SDUNIT(dev); 2516 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2517 return (DDI_FAILURE); 2518 } 2519 *result = (void *) SD_DEVINFO(un); 2520 error = DDI_SUCCESS; 2521 break; 2522 case DDI_INFO_DEVT2INSTANCE: 2523 dev = (dev_t)arg; 2524 instance = SDUNIT(dev); 2525 *result = (void *)(uintptr_t)instance; 2526 error = DDI_SUCCESS; 2527 break; 2528 default: 2529 error = DDI_FAILURE; 2530 } 2531 return (error); 2532 } 2533 2534 /* 2535 * Function: sd_prop_op 2536 * 2537 * Description: This is the driver prop_op(9e) entry point function. 2538 * Return the number of blocks for the partition in question 2539 * or forward the request to the property facilities. 2540 * 2541 * Arguments: dev - device number 2542 * dip - pointer to device info structure 2543 * prop_op - property operator 2544 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2545 * name - pointer to property name 2546 * valuep - pointer or address of the user buffer 2547 * lengthp - property length 2548 * 2549 * Return Code: DDI_PROP_SUCCESS 2550 * DDI_PROP_NOT_FOUND 2551 * DDI_PROP_UNDEFINED 2552 * DDI_PROP_NO_MEMORY 2553 * DDI_PROP_BUF_TOO_SMALL 2554 */ 2555 2556 static int 2557 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2558 char *name, caddr_t valuep, int *lengthp) 2559 { 2560 int instance = ddi_get_instance(dip); 2561 struct sd_lun *un; 2562 uint64_t nblocks64; 2563 2564 /* 2565 * Our dynamic properties are all device specific and size oriented. 2566 * Requests issued under conditions where size is valid are passed 2567 * to ddi_prop_op_nblocks with the size information, otherwise the 2568 * request is passed to ddi_prop_op. Size depends on valid geometry. 2569 */ 2570 un = ddi_get_soft_state(sd_state, instance); 2571 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2572 (un->un_f_geometry_is_valid == FALSE)) { 2573 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2574 name, valuep, lengthp)); 2575 } else { 2576 /* get nblocks value */ 2577 ASSERT(!mutex_owned(SD_MUTEX(un))); 2578 mutex_enter(SD_MUTEX(un)); 2579 nblocks64 = (ulong_t)un->un_map[SDPART(dev)].dkl_nblk; 2580 mutex_exit(SD_MUTEX(un)); 2581 2582 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2583 name, valuep, lengthp, nblocks64)); 2584 } 2585 } 2586 2587 /* 2588 * The following functions are for smart probing: 2589 * sd_scsi_probe_cache_init() 2590 * sd_scsi_probe_cache_fini() 2591 * sd_scsi_clear_probe_cache() 2592 * sd_scsi_probe_with_cache() 2593 */ 2594 2595 /* 2596 * Function: sd_scsi_probe_cache_init 2597 * 2598 * Description: Initializes the probe response cache mutex and head pointer. 2599 * 2600 * Context: Kernel thread context 2601 */ 2602 2603 static void 2604 sd_scsi_probe_cache_init(void) 2605 { 2606 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2607 sd_scsi_probe_cache_head = NULL; 2608 } 2609 2610 2611 /* 2612 * Function: sd_scsi_probe_cache_fini 2613 * 2614 * Description: Frees all resources associated with the probe response cache. 2615 * 2616 * Context: Kernel thread context 2617 */ 2618 2619 static void 2620 sd_scsi_probe_cache_fini(void) 2621 { 2622 struct sd_scsi_probe_cache *cp; 2623 struct sd_scsi_probe_cache *ncp; 2624 2625 /* Clean up our smart probing linked list */ 2626 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2627 ncp = cp->next; 2628 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2629 } 2630 sd_scsi_probe_cache_head = NULL; 2631 mutex_destroy(&sd_scsi_probe_cache_mutex); 2632 } 2633 2634 2635 /* 2636 * Function: sd_scsi_clear_probe_cache 2637 * 2638 * Description: This routine clears the probe response cache. This is 2639 * done when open() returns ENXIO so that when deferred 2640 * attach is attempted (possibly after a device has been 2641 * turned on) we will retry the probe. Since we don't know 2642 * which target we failed to open, we just clear the 2643 * entire cache. 2644 * 2645 * Context: Kernel thread context 2646 */ 2647 2648 static void 2649 sd_scsi_clear_probe_cache(void) 2650 { 2651 struct sd_scsi_probe_cache *cp; 2652 int i; 2653 2654 mutex_enter(&sd_scsi_probe_cache_mutex); 2655 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2656 /* 2657 * Reset all entries to SCSIPROBE_EXISTS. This will 2658 * force probing to be performed the next time 2659 * sd_scsi_probe_with_cache is called. 2660 */ 2661 for (i = 0; i < NTARGETS_WIDE; i++) { 2662 cp->cache[i] = SCSIPROBE_EXISTS; 2663 } 2664 } 2665 mutex_exit(&sd_scsi_probe_cache_mutex); 2666 } 2667 2668 2669 /* 2670 * Function: sd_scsi_probe_with_cache 2671 * 2672 * Description: This routine implements support for a scsi device probe 2673 * with cache. The driver maintains a cache of the target 2674 * responses to scsi probes. If we get no response from a 2675 * target during a probe inquiry, we remember that, and we 2676 * avoid additional calls to scsi_probe on non-zero LUNs 2677 * on the same target until the cache is cleared. By doing 2678 * so we avoid the 1/4 sec selection timeout for nonzero 2679 * LUNs. lun0 of a target is always probed. 2680 * 2681 * Arguments: devp - Pointer to a scsi_device(9S) structure 2682 * waitfunc - indicates what the allocator routines should 2683 * do when resources are not available. This value 2684 * is passed on to scsi_probe() when that routine 2685 * is called. 2686 * 2687 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2688 * otherwise the value returned by scsi_probe(9F). 2689 * 2690 * Context: Kernel thread context 2691 */ 2692 2693 static int 2694 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2695 { 2696 struct sd_scsi_probe_cache *cp; 2697 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2698 int lun, tgt; 2699 2700 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2701 SCSI_ADDR_PROP_LUN, 0); 2702 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2703 SCSI_ADDR_PROP_TARGET, -1); 2704 2705 /* Make sure caching enabled and target in range */ 2706 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2707 /* do it the old way (no cache) */ 2708 return (scsi_probe(devp, waitfn)); 2709 } 2710 2711 mutex_enter(&sd_scsi_probe_cache_mutex); 2712 2713 /* Find the cache for this scsi bus instance */ 2714 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2715 if (cp->pdip == pdip) { 2716 break; 2717 } 2718 } 2719 2720 /* If we can't find a cache for this pdip, create one */ 2721 if (cp == NULL) { 2722 int i; 2723 2724 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2725 KM_SLEEP); 2726 cp->pdip = pdip; 2727 cp->next = sd_scsi_probe_cache_head; 2728 sd_scsi_probe_cache_head = cp; 2729 for (i = 0; i < NTARGETS_WIDE; i++) { 2730 cp->cache[i] = SCSIPROBE_EXISTS; 2731 } 2732 } 2733 2734 mutex_exit(&sd_scsi_probe_cache_mutex); 2735 2736 /* Recompute the cache for this target if LUN zero */ 2737 if (lun == 0) { 2738 cp->cache[tgt] = SCSIPROBE_EXISTS; 2739 } 2740 2741 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2742 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2743 return (SCSIPROBE_NORESP); 2744 } 2745 2746 /* Do the actual probe; save & return the result */ 2747 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2748 } 2749 2750 2751 /* 2752 * Function: sd_spin_up_unit 2753 * 2754 * Description: Issues the following commands to spin-up the device: 2755 * START STOP UNIT, and INQUIRY. 2756 * 2757 * Arguments: un - driver soft state (unit) structure 2758 * 2759 * Return Code: 0 - success 2760 * EIO - failure 2761 * EACCES - reservation conflict 2762 * 2763 * Context: Kernel thread context 2764 */ 2765 2766 static int 2767 sd_spin_up_unit(struct sd_lun *un) 2768 { 2769 size_t resid = 0; 2770 int has_conflict = FALSE; 2771 uchar_t *bufaddr; 2772 2773 ASSERT(un != NULL); 2774 2775 /* 2776 * Send a throwaway START UNIT command. 2777 * 2778 * If we fail on this, we don't care presently what precisely 2779 * is wrong. EMC's arrays will also fail this with a check 2780 * condition (0x2/0x4/0x3) if the device is "inactive," but 2781 * we don't want to fail the attach because it may become 2782 * "active" later. 2783 */ 2784 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2785 == EACCES) 2786 has_conflict = TRUE; 2787 2788 /* 2789 * Send another INQUIRY command to the target. This is necessary for 2790 * non-removable media direct access devices because their INQUIRY data 2791 * may not be fully qualified until they are spun up (perhaps via the 2792 * START command above). Note: This seems to be needed for some 2793 * legacy devices only.) The INQUIRY command should succeed even if a 2794 * Reservation Conflict is present. 2795 */ 2796 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2797 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2798 kmem_free(bufaddr, SUN_INQSIZE); 2799 return (EIO); 2800 } 2801 2802 /* 2803 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2804 * Note that this routine does not return a failure here even if the 2805 * INQUIRY command did not return any data. This is a legacy behavior. 2806 */ 2807 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2808 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2809 } 2810 2811 kmem_free(bufaddr, SUN_INQSIZE); 2812 2813 /* If we hit a reservation conflict above, tell the caller. */ 2814 if (has_conflict == TRUE) { 2815 return (EACCES); 2816 } 2817 2818 return (0); 2819 } 2820 2821 /* 2822 * Function: sd_enable_descr_sense 2823 * 2824 * Description: This routine attempts to select descriptor sense format 2825 * using the Control mode page. Devices that support 64 bit 2826 * LBAs (for >2TB luns) should also implement descriptor 2827 * sense data so we will call this function whenever we see 2828 * a lun larger than 2TB. If for some reason the device 2829 * supports 64 bit LBAs but doesn't support descriptor sense 2830 * presumably the mode select will fail. Everything will 2831 * continue to work normally except that we will not get 2832 * complete sense data for commands that fail with an LBA 2833 * larger than 32 bits. 2834 * 2835 * Arguments: un - driver soft state (unit) structure 2836 * 2837 * Context: Kernel thread context only 2838 */ 2839 2840 static void 2841 sd_enable_descr_sense(struct sd_lun *un) 2842 { 2843 uchar_t *header; 2844 struct mode_control_scsi3 *ctrl_bufp; 2845 size_t buflen; 2846 size_t bd_len; 2847 2848 /* 2849 * Read MODE SENSE page 0xA, Control Mode Page 2850 */ 2851 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 2852 sizeof (struct mode_control_scsi3); 2853 header = kmem_zalloc(buflen, KM_SLEEP); 2854 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 2855 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 2856 SD_ERROR(SD_LOG_COMMON, un, 2857 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 2858 goto eds_exit; 2859 } 2860 2861 /* 2862 * Determine size of Block Descriptors in order to locate 2863 * the mode page data. ATAPI devices return 0, SCSI devices 2864 * should return MODE_BLK_DESC_LENGTH. 2865 */ 2866 bd_len = ((struct mode_header *)header)->bdesc_length; 2867 2868 ctrl_bufp = (struct mode_control_scsi3 *) 2869 (header + MODE_HEADER_LENGTH + bd_len); 2870 2871 /* 2872 * Clear PS bit for MODE SELECT 2873 */ 2874 ctrl_bufp->mode_page.ps = 0; 2875 2876 /* 2877 * Set D_SENSE to enable descriptor sense format. 2878 */ 2879 ctrl_bufp->d_sense = 1; 2880 2881 /* 2882 * Use MODE SELECT to commit the change to the D_SENSE bit 2883 */ 2884 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 2885 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 2886 SD_INFO(SD_LOG_COMMON, un, 2887 "sd_enable_descr_sense: mode select ctrl page failed\n"); 2888 goto eds_exit; 2889 } 2890 2891 eds_exit: 2892 kmem_free(header, buflen); 2893 } 2894 2895 2896 /* 2897 * Function: sd_set_mmc_caps 2898 * 2899 * Description: This routine determines if the device is MMC compliant and if 2900 * the device supports CDDA via a mode sense of the CDVD 2901 * capabilities mode page. Also checks if the device is a 2902 * dvdram writable device. 2903 * 2904 * Arguments: un - driver soft state (unit) structure 2905 * 2906 * Context: Kernel thread context only 2907 */ 2908 2909 static void 2910 sd_set_mmc_caps(struct sd_lun *un) 2911 { 2912 struct mode_header_grp2 *sense_mhp; 2913 uchar_t *sense_page; 2914 caddr_t buf; 2915 int bd_len; 2916 int status; 2917 struct uscsi_cmd com; 2918 int rtn; 2919 uchar_t *out_data_rw, *out_data_hd; 2920 uchar_t *rqbuf_rw, *rqbuf_hd; 2921 2922 ASSERT(un != NULL); 2923 2924 /* 2925 * The flags which will be set in this function are - mmc compliant, 2926 * dvdram writable device, cdda support. Initialize them to FALSE 2927 * and if a capability is detected - it will be set to TRUE. 2928 */ 2929 un->un_f_mmc_cap = FALSE; 2930 un->un_f_dvdram_writable_device = FALSE; 2931 un->un_f_cfg_cdda = FALSE; 2932 2933 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 2934 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 2935 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 2936 2937 if (status != 0) { 2938 /* command failed; just return */ 2939 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2940 return; 2941 } 2942 /* 2943 * If the mode sense request for the CDROM CAPABILITIES 2944 * page (0x2A) succeeds the device is assumed to be MMC. 2945 */ 2946 un->un_f_mmc_cap = TRUE; 2947 2948 /* Get to the page data */ 2949 sense_mhp = (struct mode_header_grp2 *)buf; 2950 bd_len = (sense_mhp->bdesc_length_hi << 8) | 2951 sense_mhp->bdesc_length_lo; 2952 if (bd_len > MODE_BLK_DESC_LENGTH) { 2953 /* 2954 * We did not get back the expected block descriptor 2955 * length so we cannot determine if the device supports 2956 * CDDA. However, we still indicate the device is MMC 2957 * according to the successful response to the page 2958 * 0x2A mode sense request. 2959 */ 2960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 2961 "sd_set_mmc_caps: Mode Sense returned " 2962 "invalid block descriptor length\n"); 2963 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2964 return; 2965 } 2966 2967 /* See if read CDDA is supported */ 2968 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 2969 bd_len); 2970 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 2971 2972 /* See if writing DVD RAM is supported. */ 2973 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 2974 if (un->un_f_dvdram_writable_device == TRUE) { 2975 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2976 return; 2977 } 2978 2979 /* 2980 * If the device presents DVD or CD capabilities in the mode 2981 * page, we can return here since a RRD will not have 2982 * these capabilities. 2983 */ 2984 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 2985 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2986 return; 2987 } 2988 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2989 2990 /* 2991 * If un->un_f_dvdram_writable_device is still FALSE, 2992 * check for a Removable Rigid Disk (RRD). A RRD 2993 * device is identified by the features RANDOM_WRITABLE and 2994 * HARDWARE_DEFECT_MANAGEMENT. 2995 */ 2996 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 2997 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 2998 2999 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3000 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3001 RANDOM_WRITABLE); 3002 if (rtn != 0) { 3003 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3004 kmem_free(rqbuf_rw, SENSE_LENGTH); 3005 return; 3006 } 3007 3008 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3009 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3010 3011 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3012 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3013 HARDWARE_DEFECT_MANAGEMENT); 3014 if (rtn == 0) { 3015 /* 3016 * We have good information, check for random writable 3017 * and hardware defect features. 3018 */ 3019 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3020 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3021 un->un_f_dvdram_writable_device = TRUE; 3022 } 3023 } 3024 3025 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3026 kmem_free(rqbuf_rw, SENSE_LENGTH); 3027 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3028 kmem_free(rqbuf_hd, SENSE_LENGTH); 3029 } 3030 3031 /* 3032 * Function: sd_check_for_writable_cd 3033 * 3034 * Description: This routine determines if the media in the device is 3035 * writable or not. It uses the get configuration command (0x46) 3036 * to determine if the media is writable 3037 * 3038 * Arguments: un - driver soft state (unit) structure 3039 * 3040 * Context: Never called at interrupt context. 3041 */ 3042 3043 static void 3044 sd_check_for_writable_cd(struct sd_lun *un) 3045 { 3046 struct uscsi_cmd com; 3047 uchar_t *out_data; 3048 uchar_t *rqbuf; 3049 int rtn; 3050 uchar_t *out_data_rw, *out_data_hd; 3051 uchar_t *rqbuf_rw, *rqbuf_hd; 3052 struct mode_header_grp2 *sense_mhp; 3053 uchar_t *sense_page; 3054 caddr_t buf; 3055 int bd_len; 3056 int status; 3057 3058 ASSERT(un != NULL); 3059 ASSERT(mutex_owned(SD_MUTEX(un))); 3060 3061 /* 3062 * Initialize the writable media to false, if configuration info. 3063 * tells us otherwise then only we will set it. 3064 */ 3065 un->un_f_mmc_writable_media = FALSE; 3066 mutex_exit(SD_MUTEX(un)); 3067 3068 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3069 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3070 3071 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3072 out_data, SD_PROFILE_HEADER_LEN); 3073 3074 mutex_enter(SD_MUTEX(un)); 3075 if (rtn == 0) { 3076 /* 3077 * We have good information, check for writable DVD. 3078 */ 3079 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3080 un->un_f_mmc_writable_media = TRUE; 3081 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3082 kmem_free(rqbuf, SENSE_LENGTH); 3083 return; 3084 } 3085 } 3086 3087 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3088 kmem_free(rqbuf, SENSE_LENGTH); 3089 3090 /* 3091 * Determine if this is a RRD type device. 3092 */ 3093 mutex_exit(SD_MUTEX(un)); 3094 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3095 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3096 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3097 mutex_enter(SD_MUTEX(un)); 3098 if (status != 0) { 3099 /* command failed; just return */ 3100 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3101 return; 3102 } 3103 3104 /* Get to the page data */ 3105 sense_mhp = (struct mode_header_grp2 *)buf; 3106 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3107 if (bd_len > MODE_BLK_DESC_LENGTH) { 3108 /* 3109 * We did not get back the expected block descriptor length so 3110 * we cannot check the mode page. 3111 */ 3112 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3113 "sd_check_for_writable_cd: Mode Sense returned " 3114 "invalid block descriptor length\n"); 3115 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3116 return; 3117 } 3118 3119 /* 3120 * If the device presents DVD or CD capabilities in the mode 3121 * page, we can return here since a RRD device will not have 3122 * these capabilities. 3123 */ 3124 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3125 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3126 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3127 return; 3128 } 3129 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3130 3131 /* 3132 * If un->un_f_mmc_writable_media is still FALSE, 3133 * check for RRD type media. A RRD device is identified 3134 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3135 */ 3136 mutex_exit(SD_MUTEX(un)); 3137 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3138 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3139 3140 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3141 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3142 RANDOM_WRITABLE); 3143 if (rtn != 0) { 3144 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3145 kmem_free(rqbuf_rw, SENSE_LENGTH); 3146 mutex_enter(SD_MUTEX(un)); 3147 return; 3148 } 3149 3150 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3151 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3152 3153 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3154 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3155 HARDWARE_DEFECT_MANAGEMENT); 3156 mutex_enter(SD_MUTEX(un)); 3157 if (rtn == 0) { 3158 /* 3159 * We have good information, check for random writable 3160 * and hardware defect features as current. 3161 */ 3162 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3163 (out_data_rw[10] & 0x1) && 3164 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3165 (out_data_hd[10] & 0x1)) { 3166 un->un_f_mmc_writable_media = TRUE; 3167 } 3168 } 3169 3170 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3171 kmem_free(rqbuf_rw, SENSE_LENGTH); 3172 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3173 kmem_free(rqbuf_hd, SENSE_LENGTH); 3174 } 3175 3176 /* 3177 * Function: sd_read_unit_properties 3178 * 3179 * Description: The following implements a property lookup mechanism. 3180 * Properties for particular disks (keyed on vendor, model 3181 * and rev numbers) are sought in the sd.conf file via 3182 * sd_process_sdconf_file(), and if not found there, are 3183 * looked for in a list hardcoded in this driver via 3184 * sd_process_sdconf_table() Once located the properties 3185 * are used to update the driver unit structure. 3186 * 3187 * Arguments: un - driver soft state (unit) structure 3188 */ 3189 3190 static void 3191 sd_read_unit_properties(struct sd_lun *un) 3192 { 3193 /* 3194 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3195 * the "sd-config-list" property (from the sd.conf file) or if 3196 * there was not a match for the inquiry vid/pid. If this event 3197 * occurs the static driver configuration table is searched for 3198 * a match. 3199 */ 3200 ASSERT(un != NULL); 3201 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3202 sd_process_sdconf_table(un); 3203 } 3204 3205 /* check for LSI device */ 3206 sd_is_lsi(un); 3207 3208 /* 3209 * Set this in sd.conf to 0 in order to disable kstats. The default 3210 * is 1, so they are enabled by default. 3211 */ 3212 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 3213 SD_DEVINFO(un), DDI_PROP_DONTPASS, "enable-partition-kstats", 1)); 3214 } 3215 3216 3217 /* 3218 * Function: sd_process_sdconf_file 3219 * 3220 * Description: Use ddi_getlongprop to obtain the properties from the 3221 * driver's config file (ie, sd.conf) and update the driver 3222 * soft state structure accordingly. 3223 * 3224 * Arguments: un - driver soft state (unit) structure 3225 * 3226 * Return Code: SD_SUCCESS - The properties were successfully set according 3227 * to the driver configuration file. 3228 * SD_FAILURE - The driver config list was not obtained or 3229 * there was no vid/pid match. This indicates that 3230 * the static config table should be used. 3231 * 3232 * The config file has a property, "sd-config-list", which consists of 3233 * one or more duplets as follows: 3234 * 3235 * sd-config-list= 3236 * <duplet>, 3237 * [<duplet>,] 3238 * [<duplet>]; 3239 * 3240 * The structure of each duplet is as follows: 3241 * 3242 * <duplet>:= <vid+pid>,<data-property-name_list> 3243 * 3244 * The first entry of the duplet is the device ID string (the concatenated 3245 * vid & pid; not to be confused with a device_id). This is defined in 3246 * the same way as in the sd_disk_table. 3247 * 3248 * The second part of the duplet is a string that identifies a 3249 * data-property-name-list. The data-property-name-list is defined as 3250 * follows: 3251 * 3252 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3253 * 3254 * The syntax of <data-property-name> depends on the <version> field. 3255 * 3256 * If version = SD_CONF_VERSION_1 we have the following syntax: 3257 * 3258 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3259 * 3260 * where the prop0 value will be used to set prop0 if bit0 set in the 3261 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3262 * 3263 */ 3264 3265 static int 3266 sd_process_sdconf_file(struct sd_lun *un) 3267 { 3268 char *config_list = NULL; 3269 int config_list_len; 3270 int len; 3271 int dupletlen = 0; 3272 char *vidptr; 3273 int vidlen; 3274 char *dnlist_ptr; 3275 char *dataname_ptr; 3276 int dnlist_len; 3277 int dataname_len; 3278 int *data_list; 3279 int data_list_len; 3280 int rval = SD_FAILURE; 3281 int i; 3282 3283 ASSERT(un != NULL); 3284 3285 /* Obtain the configuration list associated with the .conf file */ 3286 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3287 sd_config_list, (caddr_t)&config_list, &config_list_len) 3288 != DDI_PROP_SUCCESS) { 3289 return (SD_FAILURE); 3290 } 3291 3292 /* 3293 * Compare vids in each duplet to the inquiry vid - if a match is 3294 * made, get the data value and update the soft state structure 3295 * accordingly. 3296 * 3297 * Note: This algorithm is complex and difficult to maintain. It should 3298 * be replaced with a more robust implementation. 3299 */ 3300 for (len = config_list_len, vidptr = config_list; len > 0; 3301 vidptr += dupletlen, len -= dupletlen) { 3302 /* 3303 * Note: The assumption here is that each vid entry is on 3304 * a unique line from its associated duplet. 3305 */ 3306 vidlen = dupletlen = (int)strlen(vidptr); 3307 if ((vidlen == 0) || 3308 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3309 dupletlen++; 3310 continue; 3311 } 3312 3313 /* 3314 * dnlist contains 1 or more blank separated 3315 * data-property-name entries 3316 */ 3317 dnlist_ptr = vidptr + vidlen + 1; 3318 dnlist_len = (int)strlen(dnlist_ptr); 3319 dupletlen += dnlist_len + 2; 3320 3321 /* 3322 * Set a pointer for the first data-property-name 3323 * entry in the list 3324 */ 3325 dataname_ptr = dnlist_ptr; 3326 dataname_len = 0; 3327 3328 /* 3329 * Loop through all data-property-name entries in the 3330 * data-property-name-list setting the properties for each. 3331 */ 3332 while (dataname_len < dnlist_len) { 3333 int version; 3334 3335 /* 3336 * Determine the length of the current 3337 * data-property-name entry by indexing until a 3338 * blank or NULL is encountered. When the space is 3339 * encountered reset it to a NULL for compliance 3340 * with ddi_getlongprop(). 3341 */ 3342 for (i = 0; ((dataname_ptr[i] != ' ') && 3343 (dataname_ptr[i] != '\0')); i++) { 3344 ; 3345 } 3346 3347 dataname_len += i; 3348 /* If not null terminated, Make it so */ 3349 if (dataname_ptr[i] == ' ') { 3350 dataname_ptr[i] = '\0'; 3351 } 3352 dataname_len++; 3353 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3354 "sd_process_sdconf_file: disk:%s, data:%s\n", 3355 vidptr, dataname_ptr); 3356 3357 /* Get the data list */ 3358 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3359 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3360 != DDI_PROP_SUCCESS) { 3361 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3362 "sd_process_sdconf_file: data property (%s)" 3363 " has no value\n", dataname_ptr); 3364 dataname_ptr = dnlist_ptr + dataname_len; 3365 continue; 3366 } 3367 3368 version = data_list[0]; 3369 3370 if (version == SD_CONF_VERSION_1) { 3371 sd_tunables values; 3372 3373 /* Set the properties */ 3374 if (sd_chk_vers1_data(un, data_list[1], 3375 &data_list[2], data_list_len, dataname_ptr) 3376 == SD_SUCCESS) { 3377 sd_get_tunables_from_conf(un, 3378 data_list[1], &data_list[2], 3379 &values); 3380 sd_set_vers1_properties(un, 3381 data_list[1], &values); 3382 rval = SD_SUCCESS; 3383 } else { 3384 rval = SD_FAILURE; 3385 } 3386 } else { 3387 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3388 "data property %s version 0x%x is invalid.", 3389 dataname_ptr, version); 3390 rval = SD_FAILURE; 3391 } 3392 kmem_free(data_list, data_list_len); 3393 dataname_ptr = dnlist_ptr + dataname_len; 3394 } 3395 } 3396 3397 /* free up the memory allocated by ddi_getlongprop */ 3398 if (config_list) { 3399 kmem_free(config_list, config_list_len); 3400 } 3401 3402 return (rval); 3403 } 3404 3405 /* 3406 * Function: sd_get_tunables_from_conf() 3407 * 3408 * 3409 * This function reads the data list from the sd.conf file and pulls 3410 * the values that can have numeric values as arguments and places 3411 * the values in the apropriate sd_tunables member. 3412 * Since the order of the data list members varies across platforms 3413 * This function reads them from the data list in a platform specific 3414 * order and places them into the correct sd_tunable member that is 3415 * a consistant across all platforms. 3416 */ 3417 static void 3418 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3419 sd_tunables *values) 3420 { 3421 int i; 3422 int mask; 3423 3424 bzero(values, sizeof (sd_tunables)); 3425 3426 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3427 3428 mask = 1 << i; 3429 if (mask > flags) { 3430 break; 3431 } 3432 3433 switch (mask & flags) { 3434 case 0: /* This mask bit not set in flags */ 3435 continue; 3436 case SD_CONF_BSET_THROTTLE: 3437 values->sdt_throttle = data_list[i]; 3438 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3439 "sd_get_tunables_from_conf: throttle = %d\n", 3440 values->sdt_throttle); 3441 break; 3442 case SD_CONF_BSET_CTYPE: 3443 values->sdt_ctype = data_list[i]; 3444 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3445 "sd_get_tunables_from_conf: ctype = %d\n", 3446 values->sdt_ctype); 3447 break; 3448 case SD_CONF_BSET_NRR_COUNT: 3449 values->sdt_not_rdy_retries = data_list[i]; 3450 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3451 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3452 values->sdt_not_rdy_retries); 3453 break; 3454 case SD_CONF_BSET_BSY_RETRY_COUNT: 3455 values->sdt_busy_retries = data_list[i]; 3456 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3457 "sd_get_tunables_from_conf: busy_retries = %d\n", 3458 values->sdt_busy_retries); 3459 break; 3460 case SD_CONF_BSET_RST_RETRIES: 3461 values->sdt_reset_retries = data_list[i]; 3462 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3463 "sd_get_tunables_from_conf: reset_retries = %d\n", 3464 values->sdt_reset_retries); 3465 break; 3466 case SD_CONF_BSET_RSV_REL_TIME: 3467 values->sdt_reserv_rel_time = data_list[i]; 3468 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3469 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3470 values->sdt_reserv_rel_time); 3471 break; 3472 case SD_CONF_BSET_MIN_THROTTLE: 3473 values->sdt_min_throttle = data_list[i]; 3474 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3475 "sd_get_tunables_from_conf: min_throttle = %d\n", 3476 values->sdt_min_throttle); 3477 break; 3478 case SD_CONF_BSET_DISKSORT_DISABLED: 3479 values->sdt_disk_sort_dis = data_list[i]; 3480 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3481 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3482 values->sdt_disk_sort_dis); 3483 break; 3484 case SD_CONF_BSET_LUN_RESET_ENABLED: 3485 values->sdt_lun_reset_enable = data_list[i]; 3486 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3487 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3488 "\n", values->sdt_lun_reset_enable); 3489 break; 3490 } 3491 } 3492 } 3493 3494 /* 3495 * Function: sd_process_sdconf_table 3496 * 3497 * Description: Search the static configuration table for a match on the 3498 * inquiry vid/pid and update the driver soft state structure 3499 * according to the table property values for the device. 3500 * 3501 * The form of a configuration table entry is: 3502 * <vid+pid>,<flags>,<property-data> 3503 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3504 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3505 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3506 * 3507 * Arguments: un - driver soft state (unit) structure 3508 */ 3509 3510 static void 3511 sd_process_sdconf_table(struct sd_lun *un) 3512 { 3513 char *id = NULL; 3514 int table_index; 3515 int idlen; 3516 3517 ASSERT(un != NULL); 3518 for (table_index = 0; table_index < sd_disk_table_size; 3519 table_index++) { 3520 id = sd_disk_table[table_index].device_id; 3521 idlen = strlen(id); 3522 if (idlen == 0) { 3523 continue; 3524 } 3525 3526 /* 3527 * The static configuration table currently does not 3528 * implement version 10 properties. Additionally, 3529 * multiple data-property-name entries are not 3530 * implemented in the static configuration table. 3531 */ 3532 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3533 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3534 "sd_process_sdconf_table: disk %s\n", id); 3535 sd_set_vers1_properties(un, 3536 sd_disk_table[table_index].flags, 3537 sd_disk_table[table_index].properties); 3538 break; 3539 } 3540 } 3541 } 3542 3543 3544 /* 3545 * Function: sd_sdconf_id_match 3546 * 3547 * Description: This local function implements a case sensitive vid/pid 3548 * comparison as well as the boundary cases of wild card and 3549 * multiple blanks. 3550 * 3551 * Note: An implicit assumption made here is that the scsi 3552 * inquiry structure will always keep the vid, pid and 3553 * revision strings in consecutive sequence, so they can be 3554 * read as a single string. If this assumption is not the 3555 * case, a separate string, to be used for the check, needs 3556 * to be built with these strings concatenated. 3557 * 3558 * Arguments: un - driver soft state (unit) structure 3559 * id - table or config file vid/pid 3560 * idlen - length of the vid/pid (bytes) 3561 * 3562 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3563 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3564 */ 3565 3566 static int 3567 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3568 { 3569 struct scsi_inquiry *sd_inq; 3570 int rval = SD_SUCCESS; 3571 3572 ASSERT(un != NULL); 3573 sd_inq = un->un_sd->sd_inq; 3574 ASSERT(id != NULL); 3575 3576 /* 3577 * We use the inq_vid as a pointer to a buffer containing the 3578 * vid and pid and use the entire vid/pid length of the table 3579 * entry for the comparison. This works because the inq_pid 3580 * data member follows inq_vid in the scsi_inquiry structure. 3581 */ 3582 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3583 /* 3584 * The user id string is compared to the inquiry vid/pid 3585 * using a case insensitive comparison and ignoring 3586 * multiple spaces. 3587 */ 3588 rval = sd_blank_cmp(un, id, idlen); 3589 if (rval != SD_SUCCESS) { 3590 /* 3591 * User id strings that start and end with a "*" 3592 * are a special case. These do not have a 3593 * specific vendor, and the product string can 3594 * appear anywhere in the 16 byte PID portion of 3595 * the inquiry data. This is a simple strstr() 3596 * type search for the user id in the inquiry data. 3597 */ 3598 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3599 char *pidptr = &id[1]; 3600 int i; 3601 int j; 3602 int pidstrlen = idlen - 2; 3603 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3604 pidstrlen; 3605 3606 if (j < 0) { 3607 return (SD_FAILURE); 3608 } 3609 for (i = 0; i < j; i++) { 3610 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3611 pidptr, pidstrlen) == 0) { 3612 rval = SD_SUCCESS; 3613 break; 3614 } 3615 } 3616 } 3617 } 3618 } 3619 return (rval); 3620 } 3621 3622 3623 /* 3624 * Function: sd_blank_cmp 3625 * 3626 * Description: If the id string starts and ends with a space, treat 3627 * multiple consecutive spaces as equivalent to a single 3628 * space. For example, this causes a sd_disk_table entry 3629 * of " NEC CDROM " to match a device's id string of 3630 * "NEC CDROM". 3631 * 3632 * Note: The success exit condition for this routine is if 3633 * the pointer to the table entry is '\0' and the cnt of 3634 * the inquiry length is zero. This will happen if the inquiry 3635 * string returned by the device is padded with spaces to be 3636 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3637 * SCSI spec states that the inquiry string is to be padded with 3638 * spaces. 3639 * 3640 * Arguments: un - driver soft state (unit) structure 3641 * id - table or config file vid/pid 3642 * idlen - length of the vid/pid (bytes) 3643 * 3644 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3645 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3646 */ 3647 3648 static int 3649 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3650 { 3651 char *p1; 3652 char *p2; 3653 int cnt; 3654 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3655 sizeof (SD_INQUIRY(un)->inq_pid); 3656 3657 ASSERT(un != NULL); 3658 p2 = un->un_sd->sd_inq->inq_vid; 3659 ASSERT(id != NULL); 3660 p1 = id; 3661 3662 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3663 /* 3664 * Note: string p1 is terminated by a NUL but string p2 3665 * isn't. The end of p2 is determined by cnt. 3666 */ 3667 for (;;) { 3668 /* skip over any extra blanks in both strings */ 3669 while ((*p1 != '\0') && (*p1 == ' ')) { 3670 p1++; 3671 } 3672 while ((cnt != 0) && (*p2 == ' ')) { 3673 p2++; 3674 cnt--; 3675 } 3676 3677 /* compare the two strings */ 3678 if ((cnt == 0) || 3679 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3680 break; 3681 } 3682 while ((cnt > 0) && 3683 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3684 p1++; 3685 p2++; 3686 cnt--; 3687 } 3688 } 3689 } 3690 3691 /* return SD_SUCCESS if both strings match */ 3692 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3693 } 3694 3695 3696 /* 3697 * Function: sd_chk_vers1_data 3698 * 3699 * Description: Verify the version 1 device properties provided by the 3700 * user via the configuration file 3701 * 3702 * Arguments: un - driver soft state (unit) structure 3703 * flags - integer mask indicating properties to be set 3704 * prop_list - integer list of property values 3705 * list_len - length of user provided data 3706 * 3707 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3708 * SD_FAILURE - Indicates the user provided data is invalid 3709 */ 3710 3711 static int 3712 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3713 int list_len, char *dataname_ptr) 3714 { 3715 int i; 3716 int mask = 1; 3717 int index = 0; 3718 3719 ASSERT(un != NULL); 3720 3721 /* Check for a NULL property name and list */ 3722 if (dataname_ptr == NULL) { 3723 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3724 "sd_chk_vers1_data: NULL data property name."); 3725 return (SD_FAILURE); 3726 } 3727 if (prop_list == NULL) { 3728 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3729 "sd_chk_vers1_data: %s NULL data property list.", 3730 dataname_ptr); 3731 return (SD_FAILURE); 3732 } 3733 3734 /* Display a warning if undefined bits are set in the flags */ 3735 if (flags & ~SD_CONF_BIT_MASK) { 3736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3737 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3738 "Properties not set.", 3739 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3740 return (SD_FAILURE); 3741 } 3742 3743 /* 3744 * Verify the length of the list by identifying the highest bit set 3745 * in the flags and validating that the property list has a length 3746 * up to the index of this bit. 3747 */ 3748 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3749 if (flags & mask) { 3750 index++; 3751 } 3752 mask = 1 << i; 3753 } 3754 if ((list_len / sizeof (int)) < (index + 2)) { 3755 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3756 "sd_chk_vers1_data: " 3757 "Data property list %s size is incorrect. " 3758 "Properties not set.", dataname_ptr); 3759 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3760 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3761 return (SD_FAILURE); 3762 } 3763 return (SD_SUCCESS); 3764 } 3765 3766 3767 /* 3768 * Function: sd_set_vers1_properties 3769 * 3770 * Description: Set version 1 device properties based on a property list 3771 * retrieved from the driver configuration file or static 3772 * configuration table. Version 1 properties have the format: 3773 * 3774 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3775 * 3776 * where the prop0 value will be used to set prop0 if bit0 3777 * is set in the flags 3778 * 3779 * Arguments: un - driver soft state (unit) structure 3780 * flags - integer mask indicating properties to be set 3781 * prop_list - integer list of property values 3782 */ 3783 3784 static void 3785 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3786 { 3787 ASSERT(un != NULL); 3788 3789 /* 3790 * Set the flag to indicate cache is to be disabled. An attempt 3791 * to disable the cache via sd_disable_caching() will be made 3792 * later during attach once the basic initialization is complete. 3793 */ 3794 if (flags & SD_CONF_BSET_NOCACHE) { 3795 un->un_f_opt_disable_cache = TRUE; 3796 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3797 "sd_set_vers1_properties: caching disabled flag set\n"); 3798 } 3799 3800 /* CD-specific configuration parameters */ 3801 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3802 un->un_f_cfg_playmsf_bcd = TRUE; 3803 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3804 "sd_set_vers1_properties: playmsf_bcd set\n"); 3805 } 3806 if (flags & SD_CONF_BSET_READSUB_BCD) { 3807 un->un_f_cfg_readsub_bcd = TRUE; 3808 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3809 "sd_set_vers1_properties: readsub_bcd set\n"); 3810 } 3811 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3812 un->un_f_cfg_read_toc_trk_bcd = TRUE; 3813 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3814 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 3815 } 3816 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 3817 un->un_f_cfg_read_toc_addr_bcd = TRUE; 3818 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3819 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 3820 } 3821 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 3822 un->un_f_cfg_no_read_header = TRUE; 3823 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3824 "sd_set_vers1_properties: no_read_header set\n"); 3825 } 3826 if (flags & SD_CONF_BSET_READ_CD_XD4) { 3827 un->un_f_cfg_read_cd_xd4 = TRUE; 3828 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3829 "sd_set_vers1_properties: read_cd_xd4 set\n"); 3830 } 3831 3832 /* Support for devices which do not have valid/unique serial numbers */ 3833 if (flags & SD_CONF_BSET_FAB_DEVID) { 3834 un->un_f_opt_fab_devid = TRUE; 3835 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3836 "sd_set_vers1_properties: fab_devid bit set\n"); 3837 } 3838 3839 /* Support for user throttle configuration */ 3840 if (flags & SD_CONF_BSET_THROTTLE) { 3841 ASSERT(prop_list != NULL); 3842 un->un_saved_throttle = un->un_throttle = 3843 prop_list->sdt_throttle; 3844 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3845 "sd_set_vers1_properties: throttle set to %d\n", 3846 prop_list->sdt_throttle); 3847 } 3848 3849 /* Set the per disk retry count according to the conf file or table. */ 3850 if (flags & SD_CONF_BSET_NRR_COUNT) { 3851 ASSERT(prop_list != NULL); 3852 if (prop_list->sdt_not_rdy_retries) { 3853 un->un_notready_retry_count = 3854 prop_list->sdt_not_rdy_retries; 3855 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3856 "sd_set_vers1_properties: not ready retry count" 3857 " set to %d\n", un->un_notready_retry_count); 3858 } 3859 } 3860 3861 /* The controller type is reported for generic disk driver ioctls */ 3862 if (flags & SD_CONF_BSET_CTYPE) { 3863 ASSERT(prop_list != NULL); 3864 switch (prop_list->sdt_ctype) { 3865 case CTYPE_CDROM: 3866 un->un_ctype = prop_list->sdt_ctype; 3867 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3868 "sd_set_vers1_properties: ctype set to " 3869 "CTYPE_CDROM\n"); 3870 break; 3871 case CTYPE_CCS: 3872 un->un_ctype = prop_list->sdt_ctype; 3873 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3874 "sd_set_vers1_properties: ctype set to " 3875 "CTYPE_CCS\n"); 3876 break; 3877 case CTYPE_ROD: /* RW optical */ 3878 un->un_ctype = prop_list->sdt_ctype; 3879 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3880 "sd_set_vers1_properties: ctype set to " 3881 "CTYPE_ROD\n"); 3882 break; 3883 default: 3884 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3885 "sd_set_vers1_properties: Could not set " 3886 "invalid ctype value (%d)", 3887 prop_list->sdt_ctype); 3888 } 3889 } 3890 3891 /* Purple failover timeout */ 3892 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 3893 ASSERT(prop_list != NULL); 3894 un->un_busy_retry_count = 3895 prop_list->sdt_busy_retries; 3896 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3897 "sd_set_vers1_properties: " 3898 "busy retry count set to %d\n", 3899 un->un_busy_retry_count); 3900 } 3901 3902 /* Purple reset retry count */ 3903 if (flags & SD_CONF_BSET_RST_RETRIES) { 3904 ASSERT(prop_list != NULL); 3905 un->un_reset_retry_count = 3906 prop_list->sdt_reset_retries; 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3908 "sd_set_vers1_properties: " 3909 "reset retry count set to %d\n", 3910 un->un_reset_retry_count); 3911 } 3912 3913 /* Purple reservation release timeout */ 3914 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 3915 ASSERT(prop_list != NULL); 3916 un->un_reserve_release_time = 3917 prop_list->sdt_reserv_rel_time; 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3919 "sd_set_vers1_properties: " 3920 "reservation release timeout set to %d\n", 3921 un->un_reserve_release_time); 3922 } 3923 3924 /* 3925 * Driver flag telling the driver to verify that no commands are pending 3926 * for a device before issuing a Test Unit Ready. This is a workaround 3927 * for a firmware bug in some Seagate eliteI drives. 3928 */ 3929 if (flags & SD_CONF_BSET_TUR_CHECK) { 3930 un->un_f_cfg_tur_check = TRUE; 3931 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3932 "sd_set_vers1_properties: tur queue check set\n"); 3933 } 3934 3935 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 3936 un->un_min_throttle = prop_list->sdt_min_throttle; 3937 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3938 "sd_set_vers1_properties: min throttle set to %d\n", 3939 un->un_min_throttle); 3940 } 3941 3942 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 3943 un->un_f_disksort_disabled = 3944 (prop_list->sdt_disk_sort_dis != 0) ? 3945 TRUE : FALSE; 3946 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3947 "sd_set_vers1_properties: disksort disabled " 3948 "flag set to %d\n", 3949 prop_list->sdt_disk_sort_dis); 3950 } 3951 3952 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 3953 un->un_f_lun_reset_enabled = 3954 (prop_list->sdt_lun_reset_enable != 0) ? 3955 TRUE : FALSE; 3956 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3957 "sd_set_vers1_properties: lun reset enabled " 3958 "flag set to %d\n", 3959 prop_list->sdt_lun_reset_enable); 3960 } 3961 3962 /* 3963 * Validate the throttle values. 3964 * If any of the numbers are invalid, set everything to defaults. 3965 */ 3966 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3967 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3968 (un->un_min_throttle > un->un_throttle)) { 3969 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3970 un->un_min_throttle = sd_min_throttle; 3971 } 3972 } 3973 3974 /* 3975 * Function: sd_is_lsi() 3976 * 3977 * Description: Check for lsi devices, step throught the static device 3978 * table to match vid/pid. 3979 * 3980 * Args: un - ptr to sd_lun 3981 * 3982 * Notes: When creating new LSI property, need to add the new LSI property 3983 * to this function. 3984 */ 3985 static void 3986 sd_is_lsi(struct sd_lun *un) 3987 { 3988 char *id = NULL; 3989 int table_index; 3990 int idlen; 3991 void *prop; 3992 3993 ASSERT(un != NULL); 3994 for (table_index = 0; table_index < sd_disk_table_size; 3995 table_index++) { 3996 id = sd_disk_table[table_index].device_id; 3997 idlen = strlen(id); 3998 if (idlen == 0) { 3999 continue; 4000 } 4001 4002 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4003 prop = sd_disk_table[table_index].properties; 4004 if (prop == &lsi_properties || 4005 prop == &lsi_oem_properties || 4006 prop == &lsi_properties_scsi || 4007 prop == &symbios_properties) { 4008 un->un_f_cfg_is_lsi = TRUE; 4009 } 4010 break; 4011 } 4012 } 4013 } 4014 4015 4016 /* 4017 * The following routines support reading and interpretation of disk labels, 4018 * including Solaris BE (8-slice) vtoc's, Solaris LE (16-slice) vtoc's, and 4019 * fdisk tables. 4020 */ 4021 4022 /* 4023 * Function: sd_validate_geometry 4024 * 4025 * Description: Read the label from the disk (if present). Update the unit's 4026 * geometry and vtoc information from the data in the label. 4027 * Verify that the label is valid. 4028 * 4029 * Arguments: un - driver soft state (unit) structure 4030 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4031 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4032 * to use the USCSI "direct" chain and bypass the normal 4033 * command waitq. 4034 * 4035 * Return Code: 0 - Successful completion 4036 * EINVAL - Invalid value in un->un_tgt_blocksize or 4037 * un->un_blockcount; or label on disk is corrupted 4038 * or unreadable. 4039 * EACCES - Reservation conflict at the device. 4040 * ENOMEM - Resource allocation error 4041 * ENOTSUP - geometry not applicable 4042 * 4043 * Context: Kernel thread only (can sleep). 4044 */ 4045 4046 static int 4047 sd_validate_geometry(struct sd_lun *un, int path_flag) 4048 { 4049 static char labelstring[128]; 4050 static char buf[256]; 4051 char *label = NULL; 4052 int label_error = 0; 4053 int gvalid = un->un_f_geometry_is_valid; 4054 int lbasize; 4055 uint_t capacity; 4056 int count; 4057 4058 ASSERT(un != NULL); 4059 ASSERT(mutex_owned(SD_MUTEX(un))); 4060 4061 /* 4062 * If the required values are not valid, then try getting them 4063 * once via read capacity. If that fails, then fail this call. 4064 * This is necessary with the new mpxio failover behavior in 4065 * the T300 where we can get an attach for the inactive path 4066 * before the active path. The inactive path fails commands with 4067 * sense data of 02,04,88 which happens to the read capacity 4068 * before mpxio has had sufficient knowledge to know if it should 4069 * force a fail over or not. (Which it won't do at attach anyhow). 4070 * If the read capacity at attach time fails, un_tgt_blocksize and 4071 * un_blockcount won't be valid. 4072 */ 4073 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4074 (un->un_f_blockcount_is_valid != TRUE)) { 4075 uint64_t cap; 4076 uint32_t lbasz; 4077 int rval; 4078 4079 mutex_exit(SD_MUTEX(un)); 4080 rval = sd_send_scsi_READ_CAPACITY(un, &cap, 4081 &lbasz, SD_PATH_DIRECT); 4082 mutex_enter(SD_MUTEX(un)); 4083 if (rval == 0) { 4084 /* 4085 * The following relies on 4086 * sd_send_scsi_READ_CAPACITY never 4087 * returning 0 for capacity and/or lbasize. 4088 */ 4089 sd_update_block_info(un, lbasz, cap); 4090 } 4091 4092 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4093 (un->un_f_blockcount_is_valid != TRUE)) { 4094 return (EINVAL); 4095 } 4096 } 4097 4098 /* 4099 * Copy the lbasize and capacity so that if they're reset while we're 4100 * not holding the SD_MUTEX, we will continue to use valid values 4101 * after the SD_MUTEX is reacquired. (4119659) 4102 */ 4103 lbasize = un->un_tgt_blocksize; 4104 capacity = un->un_blockcount; 4105 4106 #if defined(_SUNOS_VTOC_16) 4107 /* 4108 * Set up the "whole disk" fdisk partition; this should always 4109 * exist, regardless of whether the disk contains an fdisk table 4110 * or vtoc. 4111 */ 4112 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 4113 un->un_map[P0_RAW_DISK].dkl_nblk = capacity; 4114 #endif 4115 4116 /* 4117 * Refresh the logical and physical geometry caches. 4118 * (data from MODE SENSE format/rigid disk geometry pages, 4119 * and scsi_ifgetcap("geometry"). 4120 */ 4121 sd_resync_geom_caches(un, capacity, lbasize, path_flag); 4122 4123 label_error = sd_use_efi(un, path_flag); 4124 if (label_error == 0) { 4125 /* found a valid EFI label */ 4126 SD_TRACE(SD_LOG_IO_PARTITION, un, 4127 "sd_validate_geometry: found EFI label\n"); 4128 un->un_solaris_offset = 0; 4129 un->un_solaris_size = capacity; 4130 return (ENOTSUP); 4131 } 4132 if (un->un_blockcount > DK_MAX_BLOCKS) { 4133 if (label_error == ESRCH) { 4134 /* 4135 * they've configured a LUN over 1TB, but used 4136 * format.dat to restrict format's view of the 4137 * capacity to be under 1TB 4138 */ 4139 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4140 "is >1TB and has a VTOC label: use format(1M) to either decrease the"); 4141 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 4142 "size to be < 1TB or relabel the disk with an EFI label"); 4143 } else { 4144 /* unlabeled disk over 1TB */ 4145 return (ENOTSUP); 4146 } 4147 } 4148 label_error = 0; 4149 4150 /* 4151 * at this point it is either labeled with a VTOC or it is 4152 * under 1TB 4153 */ 4154 4155 /* 4156 * Only DIRECT ACCESS devices will have Sun labels. 4157 * CD's supposedly have a Sun label, too 4158 */ 4159 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 4160 struct dk_label *dkl; 4161 offset_t dkl1; 4162 offset_t label_addr, real_addr; 4163 int rval; 4164 size_t buffer_size; 4165 4166 /* 4167 * Note: This will set up un->un_solaris_size and 4168 * un->un_solaris_offset. 4169 */ 4170 switch (sd_read_fdisk(un, capacity, lbasize, path_flag)) { 4171 case SD_CMD_RESERVATION_CONFLICT: 4172 ASSERT(mutex_owned(SD_MUTEX(un))); 4173 return (EACCES); 4174 case SD_CMD_FAILURE: 4175 ASSERT(mutex_owned(SD_MUTEX(un))); 4176 return (ENOMEM); 4177 } 4178 4179 if (un->un_solaris_size <= DK_LABEL_LOC) { 4180 /* 4181 * Found fdisk table but no Solaris partition entry, 4182 * so don't call sd_uselabel() and don't create 4183 * a default label. 4184 */ 4185 label_error = 0; 4186 un->un_f_geometry_is_valid = TRUE; 4187 goto no_solaris_partition; 4188 } 4189 label_addr = (daddr_t)(un->un_solaris_offset + DK_LABEL_LOC); 4190 4191 /* 4192 * sys_blocksize != tgt_blocksize, need to re-adjust 4193 * blkno and save the index to beginning of dk_label 4194 */ 4195 real_addr = SD_SYS2TGTBLOCK(un, label_addr); 4196 buffer_size = SD_REQBYTES2TGTBYTES(un, 4197 sizeof (struct dk_label)); 4198 4199 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_validate_geometry: " 4200 "label_addr: 0x%x allocation size: 0x%x\n", 4201 label_addr, buffer_size); 4202 dkl = kmem_zalloc(buffer_size, KM_NOSLEEP); 4203 if (dkl == NULL) { 4204 return (ENOMEM); 4205 } 4206 4207 mutex_exit(SD_MUTEX(un)); 4208 rval = sd_send_scsi_READ(un, dkl, buffer_size, real_addr, 4209 path_flag); 4210 mutex_enter(SD_MUTEX(un)); 4211 4212 switch (rval) { 4213 case 0: 4214 /* 4215 * sd_uselabel will establish that the geometry 4216 * is valid. 4217 * For sys_blocksize != tgt_blocksize, need 4218 * to index into the beginning of dk_label 4219 */ 4220 dkl1 = (daddr_t)dkl 4221 + SD_TGTBYTEOFFSET(un, label_addr, real_addr); 4222 if (sd_uselabel(un, (struct dk_label *)(uintptr_t)dkl1, 4223 path_flag) != SD_LABEL_IS_VALID) { 4224 label_error = EINVAL; 4225 } 4226 break; 4227 case EACCES: 4228 label_error = EACCES; 4229 break; 4230 default: 4231 label_error = EINVAL; 4232 break; 4233 } 4234 4235 kmem_free(dkl, buffer_size); 4236 4237 #if defined(_SUNOS_VTOC_8) 4238 label = (char *)un->un_asciilabel; 4239 #elif defined(_SUNOS_VTOC_16) 4240 label = (char *)un->un_vtoc.v_asciilabel; 4241 #else 4242 #error "No VTOC format defined." 4243 #endif 4244 } 4245 4246 /* 4247 * If a valid label was not found, AND if no reservation conflict 4248 * was detected, then go ahead and create a default label (4069506). 4249 * 4250 * Note: currently, for VTOC_8 devices, the default label is created 4251 * for removables only. For VTOC_16 devices, the default label will 4252 * be created for both removables and non-removables alike. 4253 * (see sd_build_default_label) 4254 */ 4255 #if defined(_SUNOS_VTOC_8) 4256 if (ISREMOVABLE(un) && (label_error != EACCES)) { 4257 #elif defined(_SUNOS_VTOC_16) 4258 if (label_error != EACCES) { 4259 #endif 4260 if (un->un_f_geometry_is_valid == FALSE) { 4261 sd_build_default_label(un); 4262 } 4263 label_error = 0; 4264 } 4265 4266 no_solaris_partition: 4267 if ((!ISREMOVABLE(un) || 4268 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 4269 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 4270 /* 4271 * Print out a message indicating who and what we are. 4272 * We do this only when we happen to really validate the 4273 * geometry. We may call sd_validate_geometry() at other 4274 * times, e.g., ioctl()'s like Get VTOC in which case we 4275 * don't want to print the label. 4276 * If the geometry is valid, print the label string, 4277 * else print vendor and product info, if available 4278 */ 4279 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 4280 SD_INFO(SD_LOG_ATTACH_DETACH, un, "?<%s>\n", label); 4281 } else { 4282 mutex_enter(&sd_label_mutex); 4283 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 4284 labelstring); 4285 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 4286 &labelstring[64]); 4287 (void) sprintf(buf, "?Vendor '%s', product '%s'", 4288 labelstring, &labelstring[64]); 4289 if (un->un_f_blockcount_is_valid == TRUE) { 4290 (void) sprintf(&buf[strlen(buf)], 4291 ", %llu %u byte blocks\n", 4292 (longlong_t)un->un_blockcount, 4293 un->un_tgt_blocksize); 4294 } else { 4295 (void) sprintf(&buf[strlen(buf)], 4296 ", (unknown capacity)\n"); 4297 } 4298 SD_INFO(SD_LOG_ATTACH_DETACH, un, buf); 4299 mutex_exit(&sd_label_mutex); 4300 } 4301 } 4302 4303 #if defined(_SUNOS_VTOC_16) 4304 /* 4305 * If we have valid geometry, set up the remaining fdisk partitions. 4306 * Note that dkl_cylno is not used for the fdisk map entries, so 4307 * we set it to an entirely bogus value. 4308 */ 4309 for (count = 0; count < FD_NUMPART; count++) { 4310 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 4311 un->un_map[FDISK_P1 + count].dkl_nblk = 4312 un->un_fmap[count].fmap_nblk; 4313 4314 un->un_offset[FDISK_P1 + count] = 4315 un->un_fmap[count].fmap_start; 4316 } 4317 #endif 4318 4319 for (count = 0; count < NDKMAP; count++) { 4320 #if defined(_SUNOS_VTOC_8) 4321 struct dk_map *lp = &un->un_map[count]; 4322 un->un_offset[count] = 4323 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 4324 #elif defined(_SUNOS_VTOC_16) 4325 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 4326 4327 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 4328 #else 4329 #error "No VTOC format defined." 4330 #endif 4331 } 4332 4333 return (label_error); 4334 } 4335 4336 4337 #if defined(_SUNOS_VTOC_16) 4338 /* 4339 * Macro: MAX_BLKS 4340 * 4341 * This macro is used for table entries where we need to have the largest 4342 * possible sector value for that head & SPT (sectors per track) 4343 * combination. Other entries for some smaller disk sizes are set by 4344 * convention to match those used by X86 BIOS usage. 4345 */ 4346 #define MAX_BLKS(heads, spt) UINT16_MAX * heads * spt, heads, spt 4347 4348 /* 4349 * Function: sd_convert_geometry 4350 * 4351 * Description: Convert physical geometry into a dk_geom structure. In 4352 * other words, make sure we don't wrap 16-bit values. 4353 * e.g. converting from geom_cache to dk_geom 4354 * 4355 * Context: Kernel thread only 4356 */ 4357 static void 4358 sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g) 4359 { 4360 int i; 4361 static const struct chs_values { 4362 uint_t max_cap; /* Max Capacity for this HS. */ 4363 uint_t nhead; /* Heads to use. */ 4364 uint_t nsect; /* SPT to use. */ 4365 } CHS_values[] = { 4366 {0x00200000, 64, 32}, /* 1GB or smaller disk. */ 4367 {0x01000000, 128, 32}, /* 8GB or smaller disk. */ 4368 {MAX_BLKS(255, 63)}, /* 502.02GB or smaller disk. */ 4369 {MAX_BLKS(255, 126)}, /* .98TB or smaller disk. */ 4370 {DK_MAX_BLOCKS, 255, 189} /* Max size is just under 1TB */ 4371 }; 4372 4373 /* Unlabeled SCSI floppy device */ 4374 if (capacity <= 0x1000) { 4375 un_g->dkg_nhead = 2; 4376 un_g->dkg_ncyl = 80; 4377 un_g->dkg_nsect = capacity / (un_g->dkg_nhead * un_g->dkg_ncyl); 4378 return; 4379 } 4380 4381 /* 4382 * For all devices we calculate cylinders using the 4383 * heads and sectors we assign based on capacity of the 4384 * device. The table is designed to be compatible with the 4385 * way other operating systems lay out fdisk tables for X86 4386 * and to insure that the cylinders never exceed 65535 to 4387 * prevent problems with X86 ioctls that report geometry. 4388 * We use SPT that are multiples of 63, since other OSes that 4389 * are not limited to 16-bits for cylinders stop at 63 SPT 4390 * we make do by using multiples of 63 SPT. 4391 * 4392 * Note than capacities greater than or equal to 1TB will simply 4393 * get the largest geometry from the table. This should be okay 4394 * since disks this large shouldn't be using CHS values anyway. 4395 */ 4396 for (i = 0; CHS_values[i].max_cap < capacity && 4397 CHS_values[i].max_cap != DK_MAX_BLOCKS; i++) 4398 ; 4399 4400 un_g->dkg_nhead = CHS_values[i].nhead; 4401 un_g->dkg_nsect = CHS_values[i].nsect; 4402 } 4403 #endif 4404 4405 4406 /* 4407 * Function: sd_resync_geom_caches 4408 * 4409 * Description: (Re)initialize both geometry caches: the virtual geometry 4410 * information is extracted from the HBA (the "geometry" 4411 * capability), and the physical geometry cache data is 4412 * generated by issuing MODE SENSE commands. 4413 * 4414 * Arguments: un - driver soft state (unit) structure 4415 * capacity - disk capacity in #blocks 4416 * lbasize - disk block size in bytes 4417 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4418 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4419 * to use the USCSI "direct" chain and bypass the normal 4420 * command waitq. 4421 * 4422 * Context: Kernel thread only (can sleep). 4423 */ 4424 4425 static void 4426 sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 4427 int path_flag) 4428 { 4429 struct geom_cache pgeom; 4430 struct geom_cache *pgeom_p = &pgeom; 4431 int spc; 4432 unsigned short nhead; 4433 unsigned short nsect; 4434 4435 ASSERT(un != NULL); 4436 ASSERT(mutex_owned(SD_MUTEX(un))); 4437 4438 /* 4439 * Ask the controller for its logical geometry. 4440 * Note: if the HBA does not support scsi_ifgetcap("geometry"), 4441 * then the lgeom cache will be invalid. 4442 */ 4443 sd_get_virtual_geometry(un, capacity, lbasize); 4444 4445 /* 4446 * Initialize the pgeom cache from lgeom, so that if MODE SENSE 4447 * doesn't work, DKIOCG_PHYSGEOM can return reasonable values. 4448 */ 4449 if (un->un_lgeom.g_nsect == 0 || un->un_lgeom.g_nhead == 0) { 4450 /* 4451 * Note: Perhaps this needs to be more adaptive? The rationale 4452 * is that, if there's no HBA geometry from the HBA driver, any 4453 * guess is good, since this is the physical geometry. If MODE 4454 * SENSE fails this gives a max cylinder size for non-LBA access 4455 */ 4456 nhead = 255; 4457 nsect = 63; 4458 } else { 4459 nhead = un->un_lgeom.g_nhead; 4460 nsect = un->un_lgeom.g_nsect; 4461 } 4462 4463 if (ISCD(un)) { 4464 pgeom_p->g_nhead = 1; 4465 pgeom_p->g_nsect = nsect * nhead; 4466 } else { 4467 pgeom_p->g_nhead = nhead; 4468 pgeom_p->g_nsect = nsect; 4469 } 4470 4471 spc = pgeom_p->g_nhead * pgeom_p->g_nsect; 4472 pgeom_p->g_capacity = capacity; 4473 pgeom_p->g_ncyl = pgeom_p->g_capacity / spc; 4474 pgeom_p->g_acyl = 0; 4475 4476 /* 4477 * Retrieve fresh geometry data from the hardware, stash it 4478 * here temporarily before we rebuild the incore label. 4479 * 4480 * We want to use the MODE SENSE commands to derive the 4481 * physical geometry of the device, but if either command 4482 * fails, the logical geometry is used as the fallback for 4483 * disk label geometry. 4484 */ 4485 mutex_exit(SD_MUTEX(un)); 4486 sd_get_physical_geometry(un, pgeom_p, capacity, lbasize, path_flag); 4487 mutex_enter(SD_MUTEX(un)); 4488 4489 /* 4490 * Now update the real copy while holding the mutex. This 4491 * way the global copy is never in an inconsistent state. 4492 */ 4493 bcopy(pgeom_p, &un->un_pgeom, sizeof (un->un_pgeom)); 4494 4495 SD_INFO(SD_LOG_COMMON, un, "sd_resync_geom_caches: " 4496 "(cached from lgeom)\n"); 4497 SD_INFO(SD_LOG_COMMON, un, 4498 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4499 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4500 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4501 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4502 "intrlv: %d; rpm: %d\n", un->un_pgeom.g_secsize, 4503 un->un_pgeom.g_capacity, un->un_pgeom.g_intrlv, 4504 un->un_pgeom.g_rpm); 4505 } 4506 4507 4508 /* 4509 * Function: sd_read_fdisk 4510 * 4511 * Description: utility routine to read the fdisk table. 4512 * 4513 * Arguments: un - driver soft state (unit) structure 4514 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4515 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4516 * to use the USCSI "direct" chain and bypass the normal 4517 * command waitq. 4518 * 4519 * Return Code: SD_CMD_SUCCESS 4520 * SD_CMD_FAILURE 4521 * 4522 * Context: Kernel thread only (can sleep). 4523 */ 4524 /* ARGSUSED */ 4525 static int 4526 sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, int path_flag) 4527 { 4528 #if defined(_NO_FDISK_PRESENT) 4529 4530 un->un_solaris_offset = 0; 4531 un->un_solaris_size = capacity; 4532 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4533 return (SD_CMD_SUCCESS); 4534 4535 #elif defined(_FIRMWARE_NEEDS_FDISK) 4536 4537 struct ipart *fdp; 4538 struct mboot *mbp; 4539 struct ipart fdisk[FD_NUMPART]; 4540 int i; 4541 char sigbuf[2]; 4542 caddr_t bufp; 4543 int uidx; 4544 int rval; 4545 int lba = 0; 4546 uint_t solaris_offset; /* offset to solaris part. */ 4547 daddr_t solaris_size; /* size of solaris partition */ 4548 uint32_t blocksize; 4549 4550 ASSERT(un != NULL); 4551 ASSERT(mutex_owned(SD_MUTEX(un))); 4552 ASSERT(un->un_f_tgt_blocksize_is_valid == TRUE); 4553 4554 blocksize = un->un_tgt_blocksize; 4555 4556 /* 4557 * Start off assuming no fdisk table 4558 */ 4559 solaris_offset = 0; 4560 solaris_size = capacity; 4561 4562 mutex_exit(SD_MUTEX(un)); 4563 bufp = kmem_zalloc(blocksize, KM_SLEEP); 4564 rval = sd_send_scsi_READ(un, bufp, blocksize, 0, path_flag); 4565 mutex_enter(SD_MUTEX(un)); 4566 4567 if (rval != 0) { 4568 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4569 "sd_read_fdisk: fdisk read err\n"); 4570 kmem_free(bufp, blocksize); 4571 return (SD_CMD_FAILURE); 4572 } 4573 4574 mbp = (struct mboot *)bufp; 4575 4576 /* 4577 * The fdisk table does not begin on a 4-byte boundary within the 4578 * master boot record, so we copy it to an aligned structure to avoid 4579 * alignment exceptions on some processors. 4580 */ 4581 bcopy(&mbp->parts[0], fdisk, sizeof (fdisk)); 4582 4583 /* 4584 * Check for lba support before verifying sig; sig might not be 4585 * there, say on a blank disk, but the max_chs mark may still 4586 * be present. 4587 * 4588 * Note: LBA support and BEFs are an x86-only concept but this 4589 * code should work OK on SPARC as well. 4590 */ 4591 4592 /* 4593 * First, check for lba-access-ok on root node (or prom root node) 4594 * if present there, don't need to search fdisk table. 4595 */ 4596 if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0, 4597 "lba-access-ok", 0) != 0) { 4598 /* All drives do LBA; don't search fdisk table */ 4599 lba = 1; 4600 } else { 4601 /* Okay, look for mark in fdisk table */ 4602 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4603 /* accumulate "lba" value from all partitions */ 4604 lba = (lba || sd_has_max_chs_vals(fdp)); 4605 } 4606 } 4607 4608 /* 4609 * Next, look for 'no-bef-lba-access' prop on parent. 4610 * Its presence means the realmode driver doesn't support 4611 * LBA, so the target driver shouldn't advertise it as ok. 4612 * This should be a temporary condition; one day all 4613 * BEFs should support the LBA access functions. 4614 */ 4615 if ((lba != 0) && (ddi_getprop(DDI_DEV_T_ANY, 4616 ddi_get_parent(SD_DEVINFO(un)), DDI_PROP_DONTPASS, 4617 "no-bef-lba-access", 0) != 0)) { 4618 /* BEF doesn't support LBA; don't advertise it as ok */ 4619 lba = 0; 4620 } 4621 4622 if (lba != 0) { 4623 dev_t dev = sd_make_device(SD_DEVINFO(un)); 4624 4625 if (ddi_getprop(dev, SD_DEVINFO(un), DDI_PROP_DONTPASS, 4626 "lba-access-ok", 0) == 0) { 4627 /* not found; create it */ 4628 if (ddi_prop_create(dev, SD_DEVINFO(un), 0, 4629 "lba-access-ok", (caddr_t)NULL, 0) != 4630 DDI_PROP_SUCCESS) { 4631 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4632 "sd_read_fdisk: Can't create lba property " 4633 "for instance %d\n", 4634 ddi_get_instance(SD_DEVINFO(un))); 4635 } 4636 } 4637 } 4638 4639 bcopy(&mbp->signature, sigbuf, sizeof (sigbuf)); 4640 4641 /* 4642 * Endian-independent signature check 4643 */ 4644 if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) || 4645 (sigbuf[0] != (MBB_MAGIC & 0xFF))) { 4646 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4647 "sd_read_fdisk: no fdisk\n"); 4648 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4649 rval = SD_CMD_SUCCESS; 4650 goto done; 4651 } 4652 4653 #ifdef SDDEBUG 4654 if (sd_level_mask & SD_LOGMASK_INFO) { 4655 fdp = fdisk; 4656 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_read_fdisk:\n"); 4657 SD_INFO(SD_LOG_ATTACH_DETACH, un, " relsect " 4658 "numsect sysid bootid\n"); 4659 for (i = 0; i < FD_NUMPART; i++, fdp++) { 4660 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4661 " %d: %8d %8d 0x%08x 0x%08x\n", 4662 i, fdp->relsect, fdp->numsect, 4663 fdp->systid, fdp->bootid); 4664 } 4665 } 4666 #endif 4667 4668 /* 4669 * Try to find the unix partition 4670 */ 4671 uidx = -1; 4672 solaris_offset = 0; 4673 solaris_size = 0; 4674 4675 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4676 int relsect; 4677 int numsect; 4678 4679 if (fdp->numsect == 0) { 4680 un->un_fmap[i].fmap_start = 0; 4681 un->un_fmap[i].fmap_nblk = 0; 4682 continue; 4683 } 4684 4685 /* 4686 * Data in the fdisk table is little-endian. 4687 */ 4688 relsect = LE_32(fdp->relsect); 4689 numsect = LE_32(fdp->numsect); 4690 4691 un->un_fmap[i].fmap_start = relsect; 4692 un->un_fmap[i].fmap_nblk = numsect; 4693 4694 if (fdp->systid != SUNIXOS && 4695 fdp->systid != SUNIXOS2 && 4696 fdp->systid != EFI_PMBR) { 4697 continue; 4698 } 4699 4700 /* 4701 * use the last active solaris partition id found 4702 * (there should only be 1 active partition id) 4703 * 4704 * if there are no active solaris partition id 4705 * then use the first inactive solaris partition id 4706 */ 4707 if ((uidx == -1) || (fdp->bootid == ACTIVE)) { 4708 uidx = i; 4709 solaris_offset = relsect; 4710 solaris_size = numsect; 4711 } 4712 } 4713 4714 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk 0x%x 0x%lx", 4715 un->un_solaris_offset, un->un_solaris_size); 4716 4717 rval = SD_CMD_SUCCESS; 4718 4719 done: 4720 4721 /* 4722 * Clear the VTOC info, only if the Solaris partition entry 4723 * has moved, changed size, been deleted, or if the size of 4724 * the partition is too small to even fit the label sector. 4725 */ 4726 if ((un->un_solaris_offset != solaris_offset) || 4727 (un->un_solaris_size != solaris_size) || 4728 solaris_size <= DK_LABEL_LOC) { 4729 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk moved 0x%x 0x%lx", 4730 solaris_offset, solaris_size); 4731 bzero(&un->un_g, sizeof (struct dk_geom)); 4732 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 4733 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 4734 un->un_f_geometry_is_valid = FALSE; 4735 } 4736 un->un_solaris_offset = solaris_offset; 4737 un->un_solaris_size = solaris_size; 4738 kmem_free(bufp, blocksize); 4739 return (rval); 4740 4741 #else /* #elif defined(_FIRMWARE_NEEDS_FDISK) */ 4742 #error "fdisk table presence undetermined for this platform." 4743 #endif /* #if defined(_NO_FDISK_PRESENT) */ 4744 } 4745 4746 4747 /* 4748 * Function: sd_get_physical_geometry 4749 * 4750 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4751 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4752 * target, and use this information to initialize the physical 4753 * geometry cache specified by pgeom_p. 4754 * 4755 * MODE SENSE is an optional command, so failure in this case 4756 * does not necessarily denote an error. We want to use the 4757 * MODE SENSE commands to derive the physical geometry of the 4758 * device, but if either command fails, the logical geometry is 4759 * used as the fallback for disk label geometry. 4760 * 4761 * This requires that un->un_blockcount and un->un_tgt_blocksize 4762 * have already been initialized for the current target and 4763 * that the current values be passed as args so that we don't 4764 * end up ever trying to use -1 as a valid value. This could 4765 * happen if either value is reset while we're not holding 4766 * the mutex. 4767 * 4768 * Arguments: un - driver soft state (unit) structure 4769 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4770 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4771 * to use the USCSI "direct" chain and bypass the normal 4772 * command waitq. 4773 * 4774 * Context: Kernel thread only (can sleep). 4775 */ 4776 4777 static void 4778 sd_get_physical_geometry(struct sd_lun *un, struct geom_cache *pgeom_p, 4779 int capacity, int lbasize, int path_flag) 4780 { 4781 struct mode_format *page3p; 4782 struct mode_geometry *page4p; 4783 struct mode_header *headerp; 4784 int sector_size; 4785 int nsect; 4786 int nhead; 4787 int ncyl; 4788 int intrlv; 4789 int spc; 4790 int modesense_capacity; 4791 int rpm; 4792 int bd_len; 4793 int mode_header_length; 4794 uchar_t *p3bufp; 4795 uchar_t *p4bufp; 4796 int cdbsize; 4797 4798 ASSERT(un != NULL); 4799 ASSERT(!(mutex_owned(SD_MUTEX(un)))); 4800 4801 if (un->un_f_blockcount_is_valid != TRUE) { 4802 return; 4803 } 4804 4805 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 4806 return; 4807 } 4808 4809 if (lbasize == 0) { 4810 if (ISCD(un)) { 4811 lbasize = 2048; 4812 } else { 4813 lbasize = un->un_sys_blocksize; 4814 } 4815 } 4816 pgeom_p->g_secsize = (unsigned short)lbasize; 4817 4818 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4819 4820 /* 4821 * Retrieve MODE SENSE page 3 - Format Device Page 4822 */ 4823 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4824 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4825 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4826 != 0) { 4827 SD_ERROR(SD_LOG_COMMON, un, 4828 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4829 goto page3_exit; 4830 } 4831 4832 /* 4833 * Determine size of Block Descriptors in order to locate the mode 4834 * page data. ATAPI devices return 0, SCSI devices should return 4835 * MODE_BLK_DESC_LENGTH. 4836 */ 4837 headerp = (struct mode_header *)p3bufp; 4838 if (un->un_f_cfg_is_atapi == TRUE) { 4839 struct mode_header_grp2 *mhp = 4840 (struct mode_header_grp2 *)headerp; 4841 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4842 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4843 } else { 4844 mode_header_length = MODE_HEADER_LENGTH; 4845 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4846 } 4847 4848 if (bd_len > MODE_BLK_DESC_LENGTH) { 4849 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4850 "received unexpected bd_len of %d, page3\n", bd_len); 4851 goto page3_exit; 4852 } 4853 4854 page3p = (struct mode_format *) 4855 ((caddr_t)headerp + mode_header_length + bd_len); 4856 4857 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4858 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4859 "mode sense pg3 code mismatch %d\n", 4860 page3p->mode_page.code); 4861 goto page3_exit; 4862 } 4863 4864 /* 4865 * Use this physical geometry data only if BOTH MODE SENSE commands 4866 * complete successfully; otherwise, revert to the logical geometry. 4867 * So, we need to save everything in temporary variables. 4868 */ 4869 sector_size = BE_16(page3p->data_bytes_sect); 4870 4871 /* 4872 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4873 */ 4874 if (sector_size == 0) { 4875 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4876 } else { 4877 sector_size &= ~(un->un_sys_blocksize - 1); 4878 } 4879 4880 nsect = BE_16(page3p->sect_track); 4881 intrlv = BE_16(page3p->interleave); 4882 4883 SD_INFO(SD_LOG_COMMON, un, 4884 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4885 SD_INFO(SD_LOG_COMMON, un, 4886 " mode page: %d; nsect: %d; sector size: %d;\n", 4887 page3p->mode_page.code, nsect, sector_size); 4888 SD_INFO(SD_LOG_COMMON, un, 4889 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4890 BE_16(page3p->track_skew), 4891 BE_16(page3p->cylinder_skew)); 4892 4893 4894 /* 4895 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4896 */ 4897 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4898 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4899 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4900 != 0) { 4901 SD_ERROR(SD_LOG_COMMON, un, 4902 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4903 goto page4_exit; 4904 } 4905 4906 /* 4907 * Determine size of Block Descriptors in order to locate the mode 4908 * page data. ATAPI devices return 0, SCSI devices should return 4909 * MODE_BLK_DESC_LENGTH. 4910 */ 4911 headerp = (struct mode_header *)p4bufp; 4912 if (un->un_f_cfg_is_atapi == TRUE) { 4913 struct mode_header_grp2 *mhp = 4914 (struct mode_header_grp2 *)headerp; 4915 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4916 } else { 4917 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4918 } 4919 4920 if (bd_len > MODE_BLK_DESC_LENGTH) { 4921 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4922 "received unexpected bd_len of %d, page4\n", bd_len); 4923 goto page4_exit; 4924 } 4925 4926 page4p = (struct mode_geometry *) 4927 ((caddr_t)headerp + mode_header_length + bd_len); 4928 4929 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4930 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4931 "mode sense pg4 code mismatch %d\n", 4932 page4p->mode_page.code); 4933 goto page4_exit; 4934 } 4935 4936 /* 4937 * Stash the data now, after we know that both commands completed. 4938 */ 4939 4940 mutex_enter(SD_MUTEX(un)); 4941 4942 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4943 spc = nhead * nsect; 4944 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4945 rpm = BE_16(page4p->rpm); 4946 4947 modesense_capacity = spc * ncyl; 4948 4949 SD_INFO(SD_LOG_COMMON, un, 4950 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4951 SD_INFO(SD_LOG_COMMON, un, 4952 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4953 SD_INFO(SD_LOG_COMMON, un, 4954 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4955 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4956 (void *)pgeom_p, capacity); 4957 4958 /* 4959 * Compensate if the drive's geometry is not rectangular, i.e., 4960 * the product of C * H * S returned by MODE SENSE >= that returned 4961 * by read capacity. This is an idiosyncrasy of the original x86 4962 * disk subsystem. 4963 */ 4964 if (modesense_capacity >= capacity) { 4965 SD_INFO(SD_LOG_COMMON, un, 4966 "sd_get_physical_geometry: adjusting acyl; " 4967 "old: %d; new: %d\n", pgeom_p->g_acyl, 4968 (modesense_capacity - capacity + spc - 1) / spc); 4969 if (sector_size != 0) { 4970 /* 1243403: NEC D38x7 drives don't support sec size */ 4971 pgeom_p->g_secsize = (unsigned short)sector_size; 4972 } 4973 pgeom_p->g_nsect = (unsigned short)nsect; 4974 pgeom_p->g_nhead = (unsigned short)nhead; 4975 pgeom_p->g_capacity = capacity; 4976 pgeom_p->g_acyl = 4977 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4978 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4979 } 4980 4981 pgeom_p->g_rpm = (unsigned short)rpm; 4982 pgeom_p->g_intrlv = (unsigned short)intrlv; 4983 4984 SD_INFO(SD_LOG_COMMON, un, 4985 "sd_get_physical_geometry: mode sense geometry:\n"); 4986 SD_INFO(SD_LOG_COMMON, un, 4987 " nsect: %d; sector size: %d; interlv: %d\n", 4988 nsect, sector_size, intrlv); 4989 SD_INFO(SD_LOG_COMMON, un, 4990 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4991 nhead, ncyl, rpm, modesense_capacity); 4992 SD_INFO(SD_LOG_COMMON, un, 4993 "sd_get_physical_geometry: (cached)\n"); 4994 SD_INFO(SD_LOG_COMMON, un, 4995 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4996 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4997 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4998 SD_INFO(SD_LOG_COMMON, un, 4999 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5000 un->un_pgeom.g_secsize, un->un_pgeom.g_capacity, 5001 un->un_pgeom.g_intrlv, un->un_pgeom.g_rpm); 5002 5003 mutex_exit(SD_MUTEX(un)); 5004 5005 page4_exit: 5006 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5007 page3_exit: 5008 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5009 } 5010 5011 5012 /* 5013 * Function: sd_get_virtual_geometry 5014 * 5015 * Description: Ask the controller to tell us about the target device. 5016 * 5017 * Arguments: un - pointer to softstate 5018 * capacity - disk capacity in #blocks 5019 * lbasize - disk block size in bytes 5020 * 5021 * Context: Kernel thread only 5022 */ 5023 5024 static void 5025 sd_get_virtual_geometry(struct sd_lun *un, int capacity, int lbasize) 5026 { 5027 struct geom_cache *lgeom_p = &un->un_lgeom; 5028 uint_t geombuf; 5029 int spc; 5030 5031 ASSERT(un != NULL); 5032 ASSERT(mutex_owned(SD_MUTEX(un))); 5033 5034 mutex_exit(SD_MUTEX(un)); 5035 5036 /* Set sector size, and total number of sectors */ 5037 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5038 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5039 5040 /* Let the HBA tell us its geometry */ 5041 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5042 5043 mutex_enter(SD_MUTEX(un)); 5044 5045 /* A value of -1 indicates an undefined "geometry" property */ 5046 if (geombuf == (-1)) { 5047 return; 5048 } 5049 5050 /* Initialize the logical geometry cache. */ 5051 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5052 lgeom_p->g_nsect = geombuf & 0xffff; 5053 lgeom_p->g_secsize = un->un_sys_blocksize; 5054 5055 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5056 5057 /* 5058 * Note: The driver originally converted the capacity value from 5059 * target blocks to system blocks. However, the capacity value passed 5060 * to this routine is already in terms of system blocks (this scaling 5061 * is done when the READ CAPACITY command is issued and processed). 5062 * This 'error' may have gone undetected because the usage of g_ncyl 5063 * (which is based upon g_capacity) is very limited within the driver 5064 */ 5065 lgeom_p->g_capacity = capacity; 5066 5067 /* 5068 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5069 * hba may return zero values if the device has been removed. 5070 */ 5071 if (spc == 0) { 5072 lgeom_p->g_ncyl = 0; 5073 } else { 5074 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5075 } 5076 lgeom_p->g_acyl = 0; 5077 5078 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5079 SD_INFO(SD_LOG_COMMON, un, 5080 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5081 un->un_lgeom.g_ncyl, un->un_lgeom.g_acyl, 5082 un->un_lgeom.g_nhead, un->un_lgeom.g_nsect); 5083 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 5084 "intrlv: %d; rpm: %d\n", un->un_lgeom.g_secsize, 5085 un->un_lgeom.g_capacity, un->un_lgeom.g_intrlv, un->un_lgeom.g_rpm); 5086 } 5087 5088 5089 /* 5090 * Function: sd_update_block_info 5091 * 5092 * Description: Calculate a byte count to sector count bitshift value 5093 * from sector size. 5094 * 5095 * Arguments: un: unit struct. 5096 * lbasize: new target sector size 5097 * capacity: new target capacity, ie. block count 5098 * 5099 * Context: Kernel thread context 5100 */ 5101 5102 static void 5103 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5104 { 5105 if (lbasize != 0) { 5106 un->un_tgt_blocksize = lbasize; 5107 un->un_f_tgt_blocksize_is_valid = TRUE; 5108 } 5109 5110 if (capacity != 0) { 5111 un->un_blockcount = capacity; 5112 un->un_f_blockcount_is_valid = TRUE; 5113 } 5114 } 5115 5116 5117 static void 5118 sd_swap_efi_gpt(efi_gpt_t *e) 5119 { 5120 _NOTE(ASSUMING_PROTECTED(*e)) 5121 e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature); 5122 e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision); 5123 e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize); 5124 e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32); 5125 e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA); 5126 e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA); 5127 e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA); 5128 e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA); 5129 UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID); 5130 e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA); 5131 e->efi_gpt_NumberOfPartitionEntries = 5132 LE_32(e->efi_gpt_NumberOfPartitionEntries); 5133 e->efi_gpt_SizeOfPartitionEntry = 5134 LE_32(e->efi_gpt_SizeOfPartitionEntry); 5135 e->efi_gpt_PartitionEntryArrayCRC32 = 5136 LE_32(e->efi_gpt_PartitionEntryArrayCRC32); 5137 } 5138 5139 static void 5140 sd_swap_efi_gpe(int nparts, efi_gpe_t *p) 5141 { 5142 int i; 5143 5144 _NOTE(ASSUMING_PROTECTED(*p)) 5145 for (i = 0; i < nparts; i++) { 5146 UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID, 5147 p[i].efi_gpe_PartitionTypeGUID); 5148 p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA); 5149 p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA); 5150 /* PartitionAttrs */ 5151 } 5152 } 5153 5154 static int 5155 sd_validate_efi(efi_gpt_t *labp) 5156 { 5157 if (labp->efi_gpt_Signature != EFI_SIGNATURE) 5158 return (EINVAL); 5159 /* at least 96 bytes in this version of the spec. */ 5160 if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) > 5161 labp->efi_gpt_HeaderSize) 5162 return (EINVAL); 5163 /* this should be 128 bytes */ 5164 if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t)) 5165 return (EINVAL); 5166 return (0); 5167 } 5168 5169 static int 5170 sd_use_efi(struct sd_lun *un, int path_flag) 5171 { 5172 int i; 5173 int rval = 0; 5174 efi_gpe_t *partitions; 5175 uchar_t *buf; 5176 uint_t lbasize; 5177 uint64_t cap; 5178 uint_t nparts; 5179 diskaddr_t gpe_lba; 5180 5181 ASSERT(mutex_owned(SD_MUTEX(un))); 5182 lbasize = un->un_tgt_blocksize; 5183 5184 mutex_exit(SD_MUTEX(un)); 5185 5186 buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 5187 5188 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 5189 rval = EINVAL; 5190 goto done_err; 5191 } 5192 5193 rval = sd_send_scsi_READ(un, buf, lbasize, 0, path_flag); 5194 if (rval) { 5195 goto done_err; 5196 } 5197 if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) { 5198 /* not ours */ 5199 rval = ESRCH; 5200 goto done_err; 5201 } 5202 5203 rval = sd_send_scsi_READ(un, buf, lbasize, 1, path_flag); 5204 if (rval) { 5205 goto done_err; 5206 } 5207 sd_swap_efi_gpt((efi_gpt_t *)buf); 5208 5209 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) { 5210 /* 5211 * Couldn't read the primary, try the backup. Our 5212 * capacity at this point could be based on CHS, so 5213 * check what the device reports. 5214 */ 5215 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 5216 path_flag); 5217 if (rval) { 5218 goto done_err; 5219 } 5220 if ((rval = sd_send_scsi_READ(un, buf, lbasize, 5221 cap - 1, path_flag)) != 0) { 5222 goto done_err; 5223 } 5224 sd_swap_efi_gpt((efi_gpt_t *)buf); 5225 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) 5226 goto done_err; 5227 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5228 "primary label corrupt; using backup\n"); 5229 } 5230 5231 nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries; 5232 gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA; 5233 5234 rval = sd_send_scsi_READ(un, buf, EFI_MIN_ARRAY_SIZE, gpe_lba, 5235 path_flag); 5236 if (rval) { 5237 goto done_err; 5238 } 5239 partitions = (efi_gpe_t *)buf; 5240 5241 if (nparts > MAXPART) { 5242 nparts = MAXPART; 5243 } 5244 sd_swap_efi_gpe(nparts, partitions); 5245 5246 mutex_enter(SD_MUTEX(un)); 5247 5248 /* Fill in partition table. */ 5249 for (i = 0; i < nparts; i++) { 5250 if (partitions->efi_gpe_StartingLBA != 0 || 5251 partitions->efi_gpe_EndingLBA != 0) { 5252 un->un_map[i].dkl_cylno = 5253 partitions->efi_gpe_StartingLBA; 5254 un->un_map[i].dkl_nblk = 5255 partitions->efi_gpe_EndingLBA - 5256 partitions->efi_gpe_StartingLBA + 1; 5257 un->un_offset[i] = 5258 partitions->efi_gpe_StartingLBA; 5259 } 5260 if (i == WD_NODE) { 5261 /* 5262 * minor number 7 corresponds to the whole disk 5263 */ 5264 un->un_map[i].dkl_cylno = 0; 5265 un->un_map[i].dkl_nblk = un->un_blockcount; 5266 un->un_offset[i] = 0; 5267 } 5268 partitions++; 5269 } 5270 un->un_solaris_offset = 0; 5271 un->un_solaris_size = cap; 5272 un->un_f_geometry_is_valid = TRUE; 5273 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5274 return (0); 5275 5276 done_err: 5277 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5278 mutex_enter(SD_MUTEX(un)); 5279 /* 5280 * if we didn't find something that could look like a VTOC 5281 * and the disk is over 1TB, we know there isn't a valid label. 5282 * Otherwise let sd_uselabel decide what to do. We only 5283 * want to invalidate this if we're certain the label isn't 5284 * valid because sd_prop_op will now fail, which in turn 5285 * causes things like opens and stats on the partition to fail. 5286 */ 5287 if ((un->un_blockcount > DK_MAX_BLOCKS) && (rval != ESRCH)) { 5288 un->un_f_geometry_is_valid = FALSE; 5289 } 5290 return (rval); 5291 } 5292 5293 5294 /* 5295 * Function: sd_uselabel 5296 * 5297 * Description: Validate the disk label and update the relevant data (geometry, 5298 * partition, vtoc, and capacity data) in the sd_lun struct. 5299 * Marks the geometry of the unit as being valid. 5300 * 5301 * Arguments: un: unit struct. 5302 * dk_label: disk label 5303 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 5304 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 5305 * to use the USCSI "direct" chain and bypass the normal 5306 * command waitq. 5307 * 5308 * Return Code: SD_LABEL_IS_VALID: Label read from disk is OK; geometry, 5309 * partition, vtoc, and capacity data are good. 5310 * 5311 * SD_LABEL_IS_INVALID: Magic number or checksum error in the 5312 * label; or computed capacity does not jibe with capacity 5313 * reported from the READ CAPACITY command. 5314 * 5315 * Context: Kernel thread only (can sleep). 5316 */ 5317 5318 static int 5319 sd_uselabel(struct sd_lun *un, struct dk_label *labp, int path_flag) 5320 { 5321 short *sp; 5322 short sum; 5323 short count; 5324 int label_error = SD_LABEL_IS_VALID; 5325 int i; 5326 int capacity; 5327 int part_end; 5328 int track_capacity; 5329 int err; 5330 #if defined(_SUNOS_VTOC_16) 5331 struct dkl_partition *vpartp; 5332 #endif 5333 ASSERT(un != NULL); 5334 ASSERT(mutex_owned(SD_MUTEX(un))); 5335 5336 /* Validate the magic number of the label. */ 5337 if (labp->dkl_magic != DKL_MAGIC) { 5338 #if defined(__sparc) 5339 if ((un->un_state == SD_STATE_NORMAL) && 5340 !ISREMOVABLE(un)) { 5341 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5342 "Corrupt label; wrong magic number\n"); 5343 } 5344 #endif 5345 return (SD_LABEL_IS_INVALID); 5346 } 5347 5348 /* Validate the checksum of the label. */ 5349 sp = (short *)labp; 5350 sum = 0; 5351 count = sizeof (struct dk_label) / sizeof (short); 5352 while (count--) { 5353 sum ^= *sp++; 5354 } 5355 5356 if (sum != 0) { 5357 #if defined(_SUNOS_VTOC_16) 5358 if (un->un_state == SD_STATE_NORMAL && !ISCD(un)) { 5359 #elif defined(_SUNOS_VTOC_8) 5360 if (un->un_state == SD_STATE_NORMAL && !ISREMOVABLE(un)) { 5361 #endif 5362 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5363 "Corrupt label - label checksum failed\n"); 5364 } 5365 return (SD_LABEL_IS_INVALID); 5366 } 5367 5368 5369 /* 5370 * Fill in geometry structure with data from label. 5371 */ 5372 bzero(&un->un_g, sizeof (struct dk_geom)); 5373 un->un_g.dkg_ncyl = labp->dkl_ncyl; 5374 un->un_g.dkg_acyl = labp->dkl_acyl; 5375 un->un_g.dkg_bcyl = 0; 5376 un->un_g.dkg_nhead = labp->dkl_nhead; 5377 un->un_g.dkg_nsect = labp->dkl_nsect; 5378 un->un_g.dkg_intrlv = labp->dkl_intrlv; 5379 5380 #if defined(_SUNOS_VTOC_8) 5381 un->un_g.dkg_gap1 = labp->dkl_gap1; 5382 un->un_g.dkg_gap2 = labp->dkl_gap2; 5383 un->un_g.dkg_bhead = labp->dkl_bhead; 5384 #endif 5385 #if defined(_SUNOS_VTOC_16) 5386 un->un_dkg_skew = labp->dkl_skew; 5387 #endif 5388 5389 #if defined(__i386) || defined(__amd64) 5390 un->un_g.dkg_apc = labp->dkl_apc; 5391 #endif 5392 5393 /* 5394 * Currently we rely on the values in the label being accurate. If 5395 * dlk_rpm or dlk_pcly are zero in the label, use a default value. 5396 * 5397 * Note: In the future a MODE SENSE may be used to retrieve this data, 5398 * although this command is optional in SCSI-2. 5399 */ 5400 un->un_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600; 5401 un->un_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl : 5402 (un->un_g.dkg_ncyl + un->un_g.dkg_acyl); 5403 5404 /* 5405 * The Read and Write reinstruct values may not be valid 5406 * for older disks. 5407 */ 5408 un->un_g.dkg_read_reinstruct = labp->dkl_read_reinstruct; 5409 un->un_g.dkg_write_reinstruct = labp->dkl_write_reinstruct; 5410 5411 /* Fill in partition table. */ 5412 #if defined(_SUNOS_VTOC_8) 5413 for (i = 0; i < NDKMAP; i++) { 5414 un->un_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno; 5415 un->un_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk; 5416 } 5417 #endif 5418 #if defined(_SUNOS_VTOC_16) 5419 vpartp = labp->dkl_vtoc.v_part; 5420 track_capacity = labp->dkl_nhead * labp->dkl_nsect; 5421 5422 for (i = 0; i < NDKMAP; i++, vpartp++) { 5423 un->un_map[i].dkl_cylno = vpartp->p_start / track_capacity; 5424 un->un_map[i].dkl_nblk = vpartp->p_size; 5425 } 5426 #endif 5427 5428 /* Fill in VTOC Structure. */ 5429 bcopy(&labp->dkl_vtoc, &un->un_vtoc, sizeof (struct dk_vtoc)); 5430 #if defined(_SUNOS_VTOC_8) 5431 /* 5432 * The 8-slice vtoc does not include the ascii label; save it into 5433 * the device's soft state structure here. 5434 */ 5435 bcopy(labp->dkl_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 5436 #endif 5437 5438 /* Mark the geometry as valid. */ 5439 un->un_f_geometry_is_valid = TRUE; 5440 5441 /* Now look for a valid capacity. */ 5442 track_capacity = (un->un_g.dkg_nhead * un->un_g.dkg_nsect); 5443 capacity = (un->un_g.dkg_ncyl * track_capacity); 5444 5445 if (un->un_g.dkg_acyl) { 5446 #if defined(__i386) || defined(__amd64) 5447 /* we may have > 1 alts cylinder */ 5448 capacity += (track_capacity * un->un_g.dkg_acyl); 5449 #else 5450 capacity += track_capacity; 5451 #endif 5452 } 5453 5454 /* 5455 * At this point, un->un_blockcount should contain valid data from 5456 * the READ CAPACITY command. 5457 */ 5458 if (un->un_f_blockcount_is_valid != TRUE) { 5459 /* 5460 * We have a situation where the target didn't give us a good 5461 * READ CAPACITY value, yet there appears to be a valid label. 5462 * In this case, we'll fake the capacity. 5463 */ 5464 un->un_blockcount = capacity; 5465 un->un_f_blockcount_is_valid = TRUE; 5466 goto done; 5467 } 5468 5469 5470 if ((capacity <= un->un_blockcount) || 5471 (un->un_state != SD_STATE_NORMAL)) { 5472 #if defined(_SUNOS_VTOC_8) 5473 /* 5474 * We can't let this happen on drives that are subdivided 5475 * into logical disks (i.e., that have an fdisk table). 5476 * The un_blockcount field should always hold the full media 5477 * size in sectors, period. This code would overwrite 5478 * un_blockcount with the size of the Solaris fdisk partition. 5479 */ 5480 SD_ERROR(SD_LOG_COMMON, un, 5481 "sd_uselabel: Label %d blocks; Drive %d blocks\n", 5482 capacity, un->un_blockcount); 5483 un->un_blockcount = capacity; 5484 un->un_f_blockcount_is_valid = TRUE; 5485 #endif /* defined(_SUNOS_VTOC_8) */ 5486 goto done; 5487 } 5488 5489 if (ISCD(un)) { 5490 /* For CDROMs, we trust that the data in the label is OK. */ 5491 #if defined(_SUNOS_VTOC_8) 5492 for (i = 0; i < NDKMAP; i++) { 5493 part_end = labp->dkl_nhead * labp->dkl_nsect * 5494 labp->dkl_map[i].dkl_cylno + 5495 labp->dkl_map[i].dkl_nblk - 1; 5496 5497 if ((labp->dkl_map[i].dkl_nblk) && 5498 (part_end > un->un_blockcount)) { 5499 un->un_f_geometry_is_valid = FALSE; 5500 break; 5501 } 5502 } 5503 #endif 5504 #if defined(_SUNOS_VTOC_16) 5505 vpartp = &(labp->dkl_vtoc.v_part[0]); 5506 for (i = 0; i < NDKMAP; i++, vpartp++) { 5507 part_end = vpartp->p_start + vpartp->p_size; 5508 if ((vpartp->p_size > 0) && 5509 (part_end > un->un_blockcount)) { 5510 un->un_f_geometry_is_valid = FALSE; 5511 break; 5512 } 5513 } 5514 #endif 5515 } else { 5516 uint64_t t_capacity; 5517 uint32_t t_lbasize; 5518 5519 mutex_exit(SD_MUTEX(un)); 5520 err = sd_send_scsi_READ_CAPACITY(un, &t_capacity, &t_lbasize, 5521 path_flag); 5522 ASSERT(t_capacity <= DK_MAX_BLOCKS); 5523 mutex_enter(SD_MUTEX(un)); 5524 5525 if (err == 0) { 5526 sd_update_block_info(un, t_lbasize, t_capacity); 5527 } 5528 5529 if (capacity > un->un_blockcount) { 5530 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5531 "Corrupt label - bad geometry\n"); 5532 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 5533 "Label says %u blocks; Drive says %llu blocks\n", 5534 capacity, (unsigned long long)un->un_blockcount); 5535 un->un_f_geometry_is_valid = FALSE; 5536 label_error = SD_LABEL_IS_INVALID; 5537 } 5538 } 5539 5540 done: 5541 5542 SD_INFO(SD_LOG_COMMON, un, "sd_uselabel: (label geometry)\n"); 5543 SD_INFO(SD_LOG_COMMON, un, 5544 " ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n", 5545 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5546 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5547 SD_INFO(SD_LOG_COMMON, un, 5548 " lbasize: %d; capacity: %d; intrlv: %d; rpm: %d\n", 5549 un->un_tgt_blocksize, un->un_blockcount, 5550 un->un_g.dkg_intrlv, un->un_g.dkg_rpm); 5551 SD_INFO(SD_LOG_COMMON, un, " wrt_reinstr: %d; rd_reinstr: %d\n", 5552 un->un_g.dkg_write_reinstruct, un->un_g.dkg_read_reinstruct); 5553 5554 ASSERT(mutex_owned(SD_MUTEX(un))); 5555 5556 return (label_error); 5557 } 5558 5559 5560 /* 5561 * Function: sd_build_default_label 5562 * 5563 * Description: Generate a default label for those devices that do not have 5564 * one, e.g., new media, removable cartridges, etc.. 5565 * 5566 * Context: Kernel thread only 5567 */ 5568 5569 static void 5570 sd_build_default_label(struct sd_lun *un) 5571 { 5572 #if defined(_SUNOS_VTOC_16) 5573 uint_t phys_spc; 5574 uint_t disksize; 5575 struct dk_geom un_g; 5576 #endif 5577 5578 ASSERT(un != NULL); 5579 ASSERT(mutex_owned(SD_MUTEX(un))); 5580 5581 #if defined(_SUNOS_VTOC_8) 5582 /* 5583 * Note: This is a legacy check for non-removable devices on VTOC_8 5584 * only. This may be a valid check for VTOC_16 as well. 5585 */ 5586 if (!ISREMOVABLE(un)) { 5587 return; 5588 } 5589 #endif 5590 5591 bzero(&un->un_g, sizeof (struct dk_geom)); 5592 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 5593 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 5594 5595 #if defined(_SUNOS_VTOC_8) 5596 5597 /* 5598 * It's a REMOVABLE media, therefore no label (on sparc, anyway). 5599 * But it is still necessary to set up various geometry information, 5600 * and we are doing this here. 5601 */ 5602 5603 /* 5604 * For the rpm, we use the minimum for the disk. For the head, cyl, 5605 * and number of sector per track, if the capacity <= 1GB, head = 64, 5606 * sect = 32. else head = 255, sect 63 Note: the capacity should be 5607 * equal to C*H*S values. This will cause some truncation of size due 5608 * to round off errors. For CD-ROMs, this truncation can have adverse 5609 * side effects, so returning ncyl and nhead as 1. The nsect will 5610 * overflow for most of CD-ROMs as nsect is of type ushort. (4190569) 5611 */ 5612 if (ISCD(un)) { 5613 /* 5614 * Preserve the old behavior for non-writable 5615 * medias. Since dkg_nsect is a ushort, it 5616 * will lose bits as cdroms have more than 5617 * 65536 sectors. So if we recalculate 5618 * capacity, it will become much shorter. 5619 * But the dkg_* information is not 5620 * used for CDROMs so it is OK. But for 5621 * Writable CDs we need this information 5622 * to be valid (for newfs say). So we 5623 * make nsect and nhead > 1 that way 5624 * nsect can still stay within ushort limit 5625 * without losing any bits. 5626 */ 5627 if (un->un_f_mmc_writable_media == TRUE) { 5628 un->un_g.dkg_nhead = 64; 5629 un->un_g.dkg_nsect = 32; 5630 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5631 un->un_blockcount = un->un_g.dkg_ncyl * 5632 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5633 } else { 5634 un->un_g.dkg_ncyl = 1; 5635 un->un_g.dkg_nhead = 1; 5636 un->un_g.dkg_nsect = un->un_blockcount; 5637 } 5638 } else { 5639 if (un->un_blockcount <= 0x1000) { 5640 /* unlabeled SCSI floppy device */ 5641 un->un_g.dkg_nhead = 2; 5642 un->un_g.dkg_ncyl = 80; 5643 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 5644 } else if (un->un_blockcount <= 0x200000) { 5645 un->un_g.dkg_nhead = 64; 5646 un->un_g.dkg_nsect = 32; 5647 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5648 } else { 5649 un->un_g.dkg_nhead = 255; 5650 un->un_g.dkg_nsect = 63; 5651 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 5652 } 5653 un->un_blockcount = 5654 un->un_g.dkg_ncyl * un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5655 } 5656 5657 un->un_g.dkg_acyl = 0; 5658 un->un_g.dkg_bcyl = 0; 5659 un->un_g.dkg_rpm = 200; 5660 un->un_asciilabel[0] = '\0'; 5661 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl; 5662 5663 un->un_map[0].dkl_cylno = 0; 5664 un->un_map[0].dkl_nblk = un->un_blockcount; 5665 un->un_map[2].dkl_cylno = 0; 5666 un->un_map[2].dkl_nblk = un->un_blockcount; 5667 5668 #elif defined(_SUNOS_VTOC_16) 5669 5670 if (un->un_solaris_size == 0) { 5671 /* 5672 * Got fdisk table but no solaris entry therefore 5673 * don't create a default label 5674 */ 5675 un->un_f_geometry_is_valid = TRUE; 5676 return; 5677 } 5678 5679 /* 5680 * For CDs we continue to use the physical geometry to calculate 5681 * number of cylinders. All other devices must convert the 5682 * physical geometry (geom_cache) to values that will fit 5683 * in a dk_geom structure. 5684 */ 5685 if (ISCD(un)) { 5686 phys_spc = un->un_pgeom.g_nhead * un->un_pgeom.g_nsect; 5687 } else { 5688 /* Convert physical geometry to disk geometry */ 5689 bzero(&un_g, sizeof (struct dk_geom)); 5690 sd_convert_geometry(un->un_blockcount, &un_g); 5691 bcopy(&un_g, &un->un_g, sizeof (un->un_g)); 5692 phys_spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5693 } 5694 5695 un->un_g.dkg_pcyl = un->un_solaris_size / phys_spc; 5696 un->un_g.dkg_acyl = DK_ACYL; 5697 un->un_g.dkg_ncyl = un->un_g.dkg_pcyl - DK_ACYL; 5698 disksize = un->un_g.dkg_ncyl * phys_spc; 5699 5700 if (ISCD(un)) { 5701 /* 5702 * CD's don't use the "heads * sectors * cyls"-type of 5703 * geometry, but instead use the entire capacity of the media. 5704 */ 5705 disksize = un->un_solaris_size; 5706 un->un_g.dkg_nhead = 1; 5707 un->un_g.dkg_nsect = 1; 5708 un->un_g.dkg_rpm = 5709 (un->un_pgeom.g_rpm == 0) ? 200 : un->un_pgeom.g_rpm; 5710 5711 un->un_vtoc.v_part[0].p_start = 0; 5712 un->un_vtoc.v_part[0].p_size = disksize; 5713 un->un_vtoc.v_part[0].p_tag = V_BACKUP; 5714 un->un_vtoc.v_part[0].p_flag = V_UNMNT; 5715 5716 un->un_map[0].dkl_cylno = 0; 5717 un->un_map[0].dkl_nblk = disksize; 5718 un->un_offset[0] = 0; 5719 5720 } else { 5721 /* 5722 * Hard disks and removable media cartridges 5723 */ 5724 un->un_g.dkg_rpm = 5725 (un->un_pgeom.g_rpm == 0) ? 3600: un->un_pgeom.g_rpm; 5726 un->un_vtoc.v_sectorsz = un->un_sys_blocksize; 5727 5728 /* Add boot slice */ 5729 un->un_vtoc.v_part[8].p_start = 0; 5730 un->un_vtoc.v_part[8].p_size = phys_spc; 5731 un->un_vtoc.v_part[8].p_tag = V_BOOT; 5732 un->un_vtoc.v_part[8].p_flag = V_UNMNT; 5733 5734 un->un_map[8].dkl_cylno = 0; 5735 un->un_map[8].dkl_nblk = phys_spc; 5736 un->un_offset[8] = 0; 5737 } 5738 5739 un->un_g.dkg_apc = 0; 5740 un->un_vtoc.v_nparts = V_NUMPAR; 5741 un->un_vtoc.v_version = V_VERSION; 5742 5743 /* Add backup slice */ 5744 un->un_vtoc.v_part[2].p_start = 0; 5745 un->un_vtoc.v_part[2].p_size = disksize; 5746 un->un_vtoc.v_part[2].p_tag = V_BACKUP; 5747 un->un_vtoc.v_part[2].p_flag = V_UNMNT; 5748 5749 un->un_map[2].dkl_cylno = 0; 5750 un->un_map[2].dkl_nblk = disksize; 5751 un->un_offset[2] = 0; 5752 5753 (void) sprintf(un->un_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d" 5754 " hd %d sec %d", un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5755 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5756 5757 #else 5758 #error "No VTOC format defined." 5759 #endif 5760 5761 un->un_g.dkg_read_reinstruct = 0; 5762 un->un_g.dkg_write_reinstruct = 0; 5763 5764 un->un_g.dkg_intrlv = 1; 5765 5766 un->un_vtoc.v_sanity = VTOC_SANE; 5767 5768 un->un_f_geometry_is_valid = TRUE; 5769 5770 SD_INFO(SD_LOG_COMMON, un, 5771 "sd_build_default_label: Default label created: " 5772 "cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n", 5773 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, un->un_g.dkg_nhead, 5774 un->un_g.dkg_nsect, un->un_blockcount); 5775 } 5776 5777 5778 #if defined(_FIRMWARE_NEEDS_FDISK) 5779 /* 5780 * Max CHS values, as they are encoded into bytes, for 1022/254/63 5781 */ 5782 #define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2)) 5783 #define LBA_MAX_CYL (1022 & 0xFF) 5784 #define LBA_MAX_HEAD (254) 5785 5786 5787 /* 5788 * Function: sd_has_max_chs_vals 5789 * 5790 * Description: Return TRUE if Cylinder-Head-Sector values are all at maximum. 5791 * 5792 * Arguments: fdp - ptr to CHS info 5793 * 5794 * Return Code: True or false 5795 * 5796 * Context: Any. 5797 */ 5798 5799 static int 5800 sd_has_max_chs_vals(struct ipart *fdp) 5801 { 5802 return ((fdp->begcyl == LBA_MAX_CYL) && 5803 (fdp->beghead == LBA_MAX_HEAD) && 5804 (fdp->begsect == LBA_MAX_SECT) && 5805 (fdp->endcyl == LBA_MAX_CYL) && 5806 (fdp->endhead == LBA_MAX_HEAD) && 5807 (fdp->endsect == LBA_MAX_SECT)); 5808 } 5809 #endif 5810 5811 5812 /* 5813 * Function: sd_inq_fill 5814 * 5815 * Description: Print a piece of inquiry data, cleaned up for non-printable 5816 * characters and stopping at the first space character after 5817 * the beginning of the passed string; 5818 * 5819 * Arguments: p - source string 5820 * l - maximum length to copy 5821 * s - destination string 5822 * 5823 * Context: Any. 5824 */ 5825 5826 static void 5827 sd_inq_fill(char *p, int l, char *s) 5828 { 5829 unsigned i = 0; 5830 char c; 5831 5832 while (i++ < l) { 5833 if ((c = *p++) < ' ' || c >= 0x7F) { 5834 c = '*'; 5835 } else if (i != 1 && c == ' ') { 5836 break; 5837 } 5838 *s++ = c; 5839 } 5840 *s++ = 0; 5841 } 5842 5843 5844 /* 5845 * Function: sd_register_devid 5846 * 5847 * Description: This routine will obtain the device id information from the 5848 * target, obtain the serial number, and register the device 5849 * id with the ddi framework. 5850 * 5851 * Arguments: devi - the system's dev_info_t for the device. 5852 * un - driver soft state (unit) structure 5853 * reservation_flag - indicates if a reservation conflict 5854 * occurred during attach 5855 * 5856 * Context: Kernel Thread 5857 */ 5858 static void 5859 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 5860 { 5861 int rval = 0; 5862 uchar_t *inq80 = NULL; 5863 size_t inq80_len = MAX_INQUIRY_SIZE; 5864 size_t inq80_resid = 0; 5865 uchar_t *inq83 = NULL; 5866 size_t inq83_len = MAX_INQUIRY_SIZE; 5867 size_t inq83_resid = 0; 5868 5869 ASSERT(un != NULL); 5870 ASSERT(mutex_owned(SD_MUTEX(un))); 5871 ASSERT((SD_DEVINFO(un)) == devi); 5872 5873 /* 5874 * This is the case of antiquated Sun disk drives that have the 5875 * FAB_DEVID property set in the disk_table. These drives 5876 * manage the devid's by storing them in last 2 available sectors 5877 * on the drive and have them fabricated by the ddi layer by calling 5878 * ddi_devid_init and passing the DEVID_FAB flag. 5879 */ 5880 if (un->un_f_opt_fab_devid == TRUE) { 5881 /* 5882 * Depending on EINVAL isn't reliable, since a reserved disk 5883 * may result in invalid geometry, so check to make sure a 5884 * reservation conflict did not occur during attach. 5885 */ 5886 if ((sd_get_devid(un) == EINVAL) && 5887 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5888 /* 5889 * The devid is invalid AND there is no reservation 5890 * conflict. Fabricate a new devid. 5891 */ 5892 (void) sd_create_devid(un); 5893 } 5894 5895 /* Register the devid if it exists */ 5896 if (un->un_devid != NULL) { 5897 (void) ddi_devid_register(SD_DEVINFO(un), 5898 un->un_devid); 5899 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5900 "sd_register_devid: Devid Fabricated\n"); 5901 } 5902 return; 5903 } 5904 5905 /* 5906 * We check the availibility of the World Wide Name (0x83) and Unit 5907 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5908 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5909 * 0x83 is availible, that is the best choice. Our next choice is 5910 * 0x80. If neither are availible, we munge the devid from the device 5911 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5912 * to fabricate a devid for non-Sun qualified disks. 5913 */ 5914 if (sd_check_vpd_page_support(un) == 0) { 5915 /* collect page 80 data if available */ 5916 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5917 5918 mutex_exit(SD_MUTEX(un)); 5919 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5920 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 5921 0x01, 0x80, &inq80_resid); 5922 5923 if (rval != 0) { 5924 kmem_free(inq80, inq80_len); 5925 inq80 = NULL; 5926 inq80_len = 0; 5927 } 5928 mutex_enter(SD_MUTEX(un)); 5929 } 5930 5931 /* collect page 83 data if available */ 5932 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5933 5934 mutex_exit(SD_MUTEX(un)); 5935 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5936 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 5937 0x01, 0x83, &inq83_resid); 5938 5939 if (rval != 0) { 5940 kmem_free(inq83, inq83_len); 5941 inq83 = NULL; 5942 inq83_len = 0; 5943 } 5944 mutex_enter(SD_MUTEX(un)); 5945 } 5946 } 5947 5948 /* encode best devid possible based on data available */ 5949 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5950 (char *)ddi_driver_name(SD_DEVINFO(un)), 5951 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5952 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5953 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5954 5955 /* devid successfully encoded, register devid */ 5956 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5957 5958 } else { 5959 /* 5960 * Unable to encode a devid based on data available. 5961 * This is not a Sun qualified disk. Older Sun disk 5962 * drives that have the SD_FAB_DEVID property 5963 * set in the disk_table and non Sun qualified 5964 * disks are treated in the same manner. These 5965 * drives manage the devid's by storing them in 5966 * last 2 available sectors on the drive and 5967 * have them fabricated by the ddi layer by 5968 * calling ddi_devid_init and passing the 5969 * DEVID_FAB flag. 5970 * Create a fabricate devid only if there's no 5971 * fabricate devid existed. 5972 */ 5973 if (sd_get_devid(un) == EINVAL) { 5974 (void) sd_create_devid(un); 5975 un->un_f_opt_fab_devid = TRUE; 5976 } 5977 5978 /* Register the devid if it exists */ 5979 if (un->un_devid != NULL) { 5980 (void) ddi_devid_register(SD_DEVINFO(un), 5981 un->un_devid); 5982 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5983 "sd_register_devid: devid fabricated using " 5984 "ddi framework\n"); 5985 } 5986 } 5987 5988 /* clean up resources */ 5989 if (inq80 != NULL) { 5990 kmem_free(inq80, inq80_len); 5991 } 5992 if (inq83 != NULL) { 5993 kmem_free(inq83, inq83_len); 5994 } 5995 } 5996 5997 static daddr_t 5998 sd_get_devid_block(struct sd_lun *un) 5999 { 6000 daddr_t spc, blk, head, cyl; 6001 6002 if (un->un_blockcount <= DK_MAX_BLOCKS) { 6003 /* this geometry doesn't allow us to write a devid */ 6004 if (un->un_g.dkg_acyl < 2) { 6005 return (-1); 6006 } 6007 6008 /* 6009 * Subtract 2 guarantees that the next to last cylinder 6010 * is used 6011 */ 6012 cyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl - 2; 6013 spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 6014 head = un->un_g.dkg_nhead - 1; 6015 blk = (cyl * (spc - un->un_g.dkg_apc)) + 6016 (head * un->un_g.dkg_nsect) + 1; 6017 } else { 6018 if (un->un_reserved != -1) { 6019 blk = un->un_map[un->un_reserved].dkl_cylno + 1; 6020 } else { 6021 return (-1); 6022 } 6023 } 6024 return (blk); 6025 } 6026 6027 /* 6028 * Function: sd_get_devid 6029 * 6030 * Description: This routine will return 0 if a valid device id has been 6031 * obtained from the target and stored in the soft state. If a 6032 * valid device id has not been previously read and stored, a 6033 * read attempt will be made. 6034 * 6035 * Arguments: un - driver soft state (unit) structure 6036 * 6037 * Return Code: 0 if we successfully get the device id 6038 * 6039 * Context: Kernel Thread 6040 */ 6041 6042 static int 6043 sd_get_devid(struct sd_lun *un) 6044 { 6045 struct dk_devid *dkdevid; 6046 ddi_devid_t tmpid; 6047 uint_t *ip; 6048 size_t sz; 6049 daddr_t blk; 6050 int status; 6051 int chksum; 6052 int i; 6053 size_t buffer_size; 6054 6055 ASSERT(un != NULL); 6056 ASSERT(mutex_owned(SD_MUTEX(un))); 6057 6058 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 6059 un); 6060 6061 if (un->un_devid != NULL) { 6062 return (0); 6063 } 6064 6065 blk = sd_get_devid_block(un); 6066 if (blk < 0) 6067 return (EINVAL); 6068 6069 /* 6070 * Read and verify device id, stored in the reserved cylinders at the 6071 * end of the disk. Backup label is on the odd sectors of the last 6072 * track of the last cylinder. Device id will be on track of the next 6073 * to last cylinder. 6074 */ 6075 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 6076 mutex_exit(SD_MUTEX(un)); 6077 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 6078 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 6079 SD_PATH_DIRECT); 6080 if (status != 0) { 6081 goto error; 6082 } 6083 6084 /* Validate the revision */ 6085 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 6086 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 6087 status = EINVAL; 6088 goto error; 6089 } 6090 6091 /* Calculate the checksum */ 6092 chksum = 0; 6093 ip = (uint_t *)dkdevid; 6094 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6095 i++) { 6096 chksum ^= ip[i]; 6097 } 6098 6099 /* Compare the checksums */ 6100 if (DKD_GETCHKSUM(dkdevid) != chksum) { 6101 status = EINVAL; 6102 goto error; 6103 } 6104 6105 /* Validate the device id */ 6106 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 6107 status = EINVAL; 6108 goto error; 6109 } 6110 6111 /* 6112 * Store the device id in the driver soft state 6113 */ 6114 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 6115 tmpid = kmem_alloc(sz, KM_SLEEP); 6116 6117 mutex_enter(SD_MUTEX(un)); 6118 6119 un->un_devid = tmpid; 6120 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 6121 6122 kmem_free(dkdevid, buffer_size); 6123 6124 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 6125 6126 return (status); 6127 error: 6128 mutex_enter(SD_MUTEX(un)); 6129 kmem_free(dkdevid, buffer_size); 6130 return (status); 6131 } 6132 6133 6134 /* 6135 * Function: sd_create_devid 6136 * 6137 * Description: This routine will fabricate the device id and write it 6138 * to the disk. 6139 * 6140 * Arguments: un - driver soft state (unit) structure 6141 * 6142 * Return Code: value of the fabricated device id 6143 * 6144 * Context: Kernel Thread 6145 */ 6146 6147 static ddi_devid_t 6148 sd_create_devid(struct sd_lun *un) 6149 { 6150 ASSERT(un != NULL); 6151 6152 /* Fabricate the devid */ 6153 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 6154 == DDI_FAILURE) { 6155 return (NULL); 6156 } 6157 6158 /* Write the devid to disk */ 6159 if (sd_write_deviceid(un) != 0) { 6160 ddi_devid_free(un->un_devid); 6161 un->un_devid = NULL; 6162 } 6163 6164 return (un->un_devid); 6165 } 6166 6167 6168 /* 6169 * Function: sd_write_deviceid 6170 * 6171 * Description: This routine will write the device id to the disk 6172 * reserved sector. 6173 * 6174 * Arguments: un - driver soft state (unit) structure 6175 * 6176 * Return Code: EINVAL 6177 * value returned by sd_send_scsi_cmd 6178 * 6179 * Context: Kernel Thread 6180 */ 6181 6182 static int 6183 sd_write_deviceid(struct sd_lun *un) 6184 { 6185 struct dk_devid *dkdevid; 6186 daddr_t blk; 6187 uint_t *ip, chksum; 6188 int status; 6189 int i; 6190 6191 ASSERT(mutex_owned(SD_MUTEX(un))); 6192 6193 blk = sd_get_devid_block(un); 6194 if (blk < 0) 6195 return (-1); 6196 mutex_exit(SD_MUTEX(un)); 6197 6198 /* Allocate the buffer */ 6199 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 6200 6201 /* Fill in the revision */ 6202 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 6203 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 6204 6205 /* Copy in the device id */ 6206 mutex_enter(SD_MUTEX(un)); 6207 bcopy(un->un_devid, &dkdevid->dkd_devid, 6208 ddi_devid_sizeof(un->un_devid)); 6209 mutex_exit(SD_MUTEX(un)); 6210 6211 /* Calculate the checksum */ 6212 chksum = 0; 6213 ip = (uint_t *)dkdevid; 6214 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6215 i++) { 6216 chksum ^= ip[i]; 6217 } 6218 6219 /* Fill-in checksum */ 6220 DKD_FORMCHKSUM(chksum, dkdevid); 6221 6222 /* Write the reserved sector */ 6223 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 6224 SD_PATH_DIRECT); 6225 6226 kmem_free(dkdevid, un->un_sys_blocksize); 6227 6228 mutex_enter(SD_MUTEX(un)); 6229 return (status); 6230 } 6231 6232 6233 /* 6234 * Function: sd_check_vpd_page_support 6235 * 6236 * Description: This routine sends an inquiry command with the EVPD bit set and 6237 * a page code of 0x00 to the device. It is used to determine which 6238 * vital product pages are availible to find the devid. We are 6239 * looking for pages 0x83 or 0x80. If we return a negative 1, the 6240 * device does not support that command. 6241 * 6242 * Arguments: un - driver soft state (unit) structure 6243 * 6244 * Return Code: 0 - success 6245 * 1 - check condition 6246 * 6247 * Context: This routine can sleep. 6248 */ 6249 6250 static int 6251 sd_check_vpd_page_support(struct sd_lun *un) 6252 { 6253 uchar_t *page_list = NULL; 6254 uchar_t page_length = 0xff; /* Use max possible length */ 6255 uchar_t evpd = 0x01; /* Set the EVPD bit */ 6256 uchar_t page_code = 0x00; /* Supported VPD Pages */ 6257 int rval = 0; 6258 int counter; 6259 6260 ASSERT(un != NULL); 6261 ASSERT(mutex_owned(SD_MUTEX(un))); 6262 6263 mutex_exit(SD_MUTEX(un)); 6264 6265 /* 6266 * We'll set the page length to the maximum to save figuring it out 6267 * with an additional call. 6268 */ 6269 page_list = kmem_zalloc(page_length, KM_SLEEP); 6270 6271 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 6272 page_code, NULL); 6273 6274 mutex_enter(SD_MUTEX(un)); 6275 6276 /* 6277 * Now we must validate that the device accepted the command, as some 6278 * drives do not support it. If the drive does support it, we will 6279 * return 0, and the supported pages will be in un_vpd_page_mask. If 6280 * not, we return -1. 6281 */ 6282 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 6283 /* Loop to find one of the 2 pages we need */ 6284 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 6285 6286 /* 6287 * Pages are returned in ascending order, and 0x83 is what we 6288 * are hoping for. 6289 */ 6290 while ((page_list[counter] <= 0x83) && 6291 (counter <= (page_list[VPD_PAGE_LENGTH] + 6292 VPD_HEAD_OFFSET))) { 6293 /* 6294 * Add 3 because page_list[3] is the number of 6295 * pages minus 3 6296 */ 6297 6298 switch (page_list[counter]) { 6299 case 0x00: 6300 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 6301 break; 6302 case 0x80: 6303 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 6304 break; 6305 case 0x81: 6306 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 6307 break; 6308 case 0x82: 6309 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 6310 break; 6311 case 0x83: 6312 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 6313 break; 6314 } 6315 counter++; 6316 } 6317 6318 } else { 6319 rval = -1; 6320 6321 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6322 "sd_check_vpd_page_support: This drive does not implement " 6323 "VPD pages.\n"); 6324 } 6325 6326 kmem_free(page_list, page_length); 6327 6328 return (rval); 6329 } 6330 6331 6332 /* 6333 * Function: sd_setup_pm 6334 * 6335 * Description: Initialize Power Management on the device 6336 * 6337 * Context: Kernel Thread 6338 */ 6339 6340 static void 6341 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 6342 { 6343 uint_t log_page_size; 6344 uchar_t *log_page_data; 6345 int rval; 6346 6347 /* 6348 * Since we are called from attach, holding a mutex for 6349 * un is unnecessary. Because some of the routines called 6350 * from here require SD_MUTEX to not be held, assert this 6351 * right up front. 6352 */ 6353 ASSERT(!mutex_owned(SD_MUTEX(un))); 6354 /* 6355 * Since the sd device does not have the 'reg' property, 6356 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6357 * The following code is to tell cpr that this device 6358 * DOES need to be suspended and resumed. 6359 */ 6360 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6361 "pm-hardware-state", "needs-suspend-resume"); 6362 6363 /* 6364 * Check if HBA has set the "pm-capable" property. 6365 * If "pm-capable" exists and is non-zero then we can 6366 * power manage the device without checking the start/stop 6367 * cycle count log sense page. 6368 * 6369 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 6370 * then we should not power manage the device. 6371 * 6372 * If "pm-capable" doesn't exist then un->un_pm_capable_prop will 6373 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, sd will 6374 * check the start/stop cycle count log sense page and power manage 6375 * the device if the cycle count limit has not been exceeded. 6376 */ 6377 un->un_pm_capable_prop = 6378 ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6379 "pm-capable", SD_PM_CAPABLE_UNDEFINED); 6380 if (un->un_pm_capable_prop != SD_PM_CAPABLE_UNDEFINED) { 6381 /* 6382 * pm-capable property exists. 6383 * 6384 * Convert "TRUE" values for un_pm_capable_prop to 6385 * SD_PM_CAPABLE_TRUE (1) to make it easier to check later. 6386 * "TRUE" values are any values except SD_PM_CAPABLE_FALSE (0) 6387 * and SD_PM_CAPABLE_UNDEFINED (-1) 6388 */ 6389 if (un->un_pm_capable_prop != SD_PM_CAPABLE_FALSE) { 6390 un->un_pm_capable_prop = SD_PM_CAPABLE_TRUE; 6391 } 6392 6393 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6394 "sd_unit_attach: un:0x%p pm-capable " 6395 "property set to %d.\n", un, un->un_pm_capable_prop); 6396 } 6397 6398 /* 6399 * This complies with the new power management framework 6400 * for certain desktop machines. Create the pm_components 6401 * property as a string array property. 6402 * 6403 * If this is a removable device or if the pm-capable property 6404 * is SD_PM_CAPABLE_TRUE (1) then we should create the 6405 * pm_components property without checking for the existance of 6406 * the start-stop cycle counter log page 6407 */ 6408 if (ISREMOVABLE(un) || 6409 un->un_pm_capable_prop == SD_PM_CAPABLE_TRUE) { 6410 /* 6411 * not all devices have a motor, try it first. 6412 * some devices may return ILLEGAL REQUEST, some 6413 * will hang 6414 */ 6415 un->un_f_start_stop_supported = TRUE; 6416 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 6417 SD_PATH_DIRECT) != 0) { 6418 un->un_f_start_stop_supported = FALSE; 6419 } 6420 6421 /* 6422 * create pm properties anyways otherwise the parent can't 6423 * go to sleep 6424 */ 6425 (void) sd_create_pm_components(devi, un); 6426 un->un_f_pm_is_enabled = TRUE; 6427 6428 /* 6429 * Need to create a zero length (Boolean) property 6430 * removable-media for the removable media devices. 6431 * Note that the return value of the property is not being 6432 * checked, since if unable to create the property 6433 * then do not want the attach to fail altogether. Consistent 6434 * with other property creation in attach. 6435 */ 6436 if (ISREMOVABLE(un)) { 6437 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 6438 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 6439 } 6440 return; 6441 } 6442 6443 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 6444 6445 #ifdef SDDEBUG 6446 if (sd_force_pm_supported) { 6447 /* Force a successful result */ 6448 rval = 1; 6449 } 6450 #endif 6451 6452 /* 6453 * If the start-stop cycle counter log page is not supported 6454 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 6455 * then we should not create the pm_components property. 6456 */ 6457 if (rval == -1 || un->un_pm_capable_prop == SD_PM_CAPABLE_FALSE) { 6458 /* 6459 * Error. 6460 * Reading log sense failed, most likely this is 6461 * an older drive that does not support log sense. 6462 * If this fails auto-pm is not supported. 6463 */ 6464 un->un_power_level = SD_SPINDLE_ON; 6465 un->un_f_pm_is_enabled = FALSE; 6466 6467 } else if (rval == 0) { 6468 /* 6469 * Page not found. 6470 * The start stop cycle counter is implemented as page 6471 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6472 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6473 */ 6474 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 6475 /* 6476 * Page found, use this one. 6477 */ 6478 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6479 un->un_f_pm_is_enabled = TRUE; 6480 } else { 6481 /* 6482 * Error or page not found. 6483 * auto-pm is not supported for this device. 6484 */ 6485 un->un_power_level = SD_SPINDLE_ON; 6486 un->un_f_pm_is_enabled = FALSE; 6487 } 6488 } else { 6489 /* 6490 * Page found, use it. 6491 */ 6492 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6493 un->un_f_pm_is_enabled = TRUE; 6494 } 6495 6496 6497 if (un->un_f_pm_is_enabled == TRUE) { 6498 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6499 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6500 6501 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6502 log_page_size, un->un_start_stop_cycle_page, 6503 0x01, 0, SD_PATH_DIRECT); 6504 #ifdef SDDEBUG 6505 if (sd_force_pm_supported) { 6506 /* Force a successful result */ 6507 rval = 0; 6508 } 6509 #endif 6510 6511 /* 6512 * If the Log sense for Page( Start/stop cycle counter page) 6513 * succeeds, then power managment is supported and we can 6514 * enable auto-pm. 6515 */ 6516 if (rval == 0) { 6517 (void) sd_create_pm_components(devi, un); 6518 } else { 6519 un->un_power_level = SD_SPINDLE_ON; 6520 un->un_f_pm_is_enabled = FALSE; 6521 } 6522 6523 kmem_free(log_page_data, log_page_size); 6524 } 6525 } 6526 6527 6528 /* 6529 * Function: sd_create_pm_components 6530 * 6531 * Description: Initialize PM property. 6532 * 6533 * Context: Kernel thread context 6534 */ 6535 6536 static void 6537 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6538 { 6539 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 6540 6541 ASSERT(!mutex_owned(SD_MUTEX(un))); 6542 6543 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6544 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 6545 /* 6546 * When components are initially created they are idle, 6547 * power up any non-removables. 6548 * Note: the return value of pm_raise_power can't be used 6549 * for determining if PM should be enabled for this device. 6550 * Even if you check the return values and remove this 6551 * property created above, the PM framework will not honor the 6552 * change after the first call to pm_raise_power. Hence, 6553 * removal of that property does not help if pm_raise_power 6554 * fails. In the case of removable media, the start/stop 6555 * will fail if the media is not present. 6556 */ 6557 if ((!ISREMOVABLE(un)) && (pm_raise_power(SD_DEVINFO(un), 0, 6558 SD_SPINDLE_ON) == DDI_SUCCESS)) { 6559 mutex_enter(SD_MUTEX(un)); 6560 un->un_power_level = SD_SPINDLE_ON; 6561 mutex_enter(&un->un_pm_mutex); 6562 /* Set to on and not busy. */ 6563 un->un_pm_count = 0; 6564 } else { 6565 mutex_enter(SD_MUTEX(un)); 6566 un->un_power_level = SD_SPINDLE_OFF; 6567 mutex_enter(&un->un_pm_mutex); 6568 /* Set to off. */ 6569 un->un_pm_count = -1; 6570 } 6571 mutex_exit(&un->un_pm_mutex); 6572 mutex_exit(SD_MUTEX(un)); 6573 } else { 6574 un->un_power_level = SD_SPINDLE_ON; 6575 un->un_f_pm_is_enabled = FALSE; 6576 } 6577 } 6578 6579 6580 /* 6581 * Function: sd_ddi_suspend 6582 * 6583 * Description: Performs system power-down operations. This includes 6584 * setting the drive state to indicate its suspended so 6585 * that no new commands will be accepted. Also, wait for 6586 * all commands that are in transport or queued to a timer 6587 * for retry to complete. All timeout threads are cancelled. 6588 * 6589 * Return Code: DDI_FAILURE or DDI_SUCCESS 6590 * 6591 * Context: Kernel thread context 6592 */ 6593 6594 static int 6595 sd_ddi_suspend(dev_info_t *devi) 6596 { 6597 struct sd_lun *un; 6598 clock_t wait_cmds_complete; 6599 6600 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6601 if (un == NULL) { 6602 return (DDI_FAILURE); 6603 } 6604 6605 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6606 6607 mutex_enter(SD_MUTEX(un)); 6608 6609 /* Return success if the device is already suspended. */ 6610 if (un->un_state == SD_STATE_SUSPENDED) { 6611 mutex_exit(SD_MUTEX(un)); 6612 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6613 "device already suspended, exiting\n"); 6614 return (DDI_SUCCESS); 6615 } 6616 6617 /* Return failure if the device is being used by HA */ 6618 if (un->un_resvd_status & 6619 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6620 mutex_exit(SD_MUTEX(un)); 6621 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6622 "device in use by HA, exiting\n"); 6623 return (DDI_FAILURE); 6624 } 6625 6626 /* 6627 * Return failure if the device is in a resource wait 6628 * or power changing state. 6629 */ 6630 if ((un->un_state == SD_STATE_RWAIT) || 6631 (un->un_state == SD_STATE_PM_CHANGING)) { 6632 mutex_exit(SD_MUTEX(un)); 6633 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6634 "device in resource wait state, exiting\n"); 6635 return (DDI_FAILURE); 6636 } 6637 6638 6639 un->un_save_state = un->un_last_state; 6640 New_state(un, SD_STATE_SUSPENDED); 6641 6642 /* 6643 * Wait for all commands that are in transport or queued to a timer 6644 * for retry to complete. 6645 * 6646 * While waiting, no new commands will be accepted or sent because of 6647 * the new state we set above. 6648 * 6649 * Wait till current operation has completed. If we are in the resource 6650 * wait state (with an intr outstanding) then we need to wait till the 6651 * intr completes and starts the next cmd. We want to wait for 6652 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6653 */ 6654 wait_cmds_complete = ddi_get_lbolt() + 6655 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6656 6657 while (un->un_ncmds_in_transport != 0) { 6658 /* 6659 * Fail if commands do not finish in the specified time. 6660 */ 6661 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6662 wait_cmds_complete) == -1) { 6663 /* 6664 * Undo the state changes made above. Everything 6665 * must go back to it's original value. 6666 */ 6667 Restore_state(un); 6668 un->un_last_state = un->un_save_state; 6669 /* Wake up any threads that might be waiting. */ 6670 cv_broadcast(&un->un_suspend_cv); 6671 mutex_exit(SD_MUTEX(un)); 6672 SD_ERROR(SD_LOG_IO_PM, un, 6673 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6674 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6675 return (DDI_FAILURE); 6676 } 6677 } 6678 6679 /* 6680 * Cancel SCSI watch thread and timeouts, if any are active 6681 */ 6682 6683 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6684 opaque_t temp_token = un->un_swr_token; 6685 mutex_exit(SD_MUTEX(un)); 6686 scsi_watch_suspend(temp_token); 6687 mutex_enter(SD_MUTEX(un)); 6688 } 6689 6690 if (un->un_reset_throttle_timeid != NULL) { 6691 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6692 un->un_reset_throttle_timeid = NULL; 6693 mutex_exit(SD_MUTEX(un)); 6694 (void) untimeout(temp_id); 6695 mutex_enter(SD_MUTEX(un)); 6696 } 6697 6698 if (un->un_dcvb_timeid != NULL) { 6699 timeout_id_t temp_id = un->un_dcvb_timeid; 6700 un->un_dcvb_timeid = NULL; 6701 mutex_exit(SD_MUTEX(un)); 6702 (void) untimeout(temp_id); 6703 mutex_enter(SD_MUTEX(un)); 6704 } 6705 6706 mutex_enter(&un->un_pm_mutex); 6707 if (un->un_pm_timeid != NULL) { 6708 timeout_id_t temp_id = un->un_pm_timeid; 6709 un->un_pm_timeid = NULL; 6710 mutex_exit(&un->un_pm_mutex); 6711 mutex_exit(SD_MUTEX(un)); 6712 (void) untimeout(temp_id); 6713 mutex_enter(SD_MUTEX(un)); 6714 } else { 6715 mutex_exit(&un->un_pm_mutex); 6716 } 6717 6718 if (un->un_retry_timeid != NULL) { 6719 timeout_id_t temp_id = un->un_retry_timeid; 6720 un->un_retry_timeid = NULL; 6721 mutex_exit(SD_MUTEX(un)); 6722 (void) untimeout(temp_id); 6723 mutex_enter(SD_MUTEX(un)); 6724 } 6725 6726 if (un->un_direct_priority_timeid != NULL) { 6727 timeout_id_t temp_id = un->un_direct_priority_timeid; 6728 un->un_direct_priority_timeid = NULL; 6729 mutex_exit(SD_MUTEX(un)); 6730 (void) untimeout(temp_id); 6731 mutex_enter(SD_MUTEX(un)); 6732 } 6733 6734 if (un->un_f_is_fibre == TRUE) { 6735 /* 6736 * Remove callbacks for insert and remove events 6737 */ 6738 if (un->un_insert_event != NULL) { 6739 mutex_exit(SD_MUTEX(un)); 6740 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6741 mutex_enter(SD_MUTEX(un)); 6742 un->un_insert_event = NULL; 6743 } 6744 6745 if (un->un_remove_event != NULL) { 6746 mutex_exit(SD_MUTEX(un)); 6747 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6748 mutex_enter(SD_MUTEX(un)); 6749 un->un_remove_event = NULL; 6750 } 6751 } 6752 6753 mutex_exit(SD_MUTEX(un)); 6754 6755 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6756 6757 return (DDI_SUCCESS); 6758 } 6759 6760 6761 /* 6762 * Function: sd_ddi_pm_suspend 6763 * 6764 * Description: Set the drive state to low power. 6765 * Someone else is required to actually change the drive 6766 * power level. 6767 * 6768 * Arguments: un - driver soft state (unit) structure 6769 * 6770 * Return Code: DDI_FAILURE or DDI_SUCCESS 6771 * 6772 * Context: Kernel thread context 6773 */ 6774 6775 static int 6776 sd_ddi_pm_suspend(struct sd_lun *un) 6777 { 6778 ASSERT(un != NULL); 6779 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6780 6781 ASSERT(!mutex_owned(SD_MUTEX(un))); 6782 mutex_enter(SD_MUTEX(un)); 6783 6784 /* 6785 * Exit if power management is not enabled for this device, or if 6786 * the device is being used by HA. 6787 */ 6788 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6789 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6790 mutex_exit(SD_MUTEX(un)); 6791 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6792 return (DDI_SUCCESS); 6793 } 6794 6795 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6796 un->un_ncmds_in_driver); 6797 6798 /* 6799 * See if the device is not busy, ie.: 6800 * - we have no commands in the driver for this device 6801 * - not waiting for resources 6802 */ 6803 if ((un->un_ncmds_in_driver == 0) && 6804 (un->un_state != SD_STATE_RWAIT)) { 6805 /* 6806 * The device is not busy, so it is OK to go to low power state. 6807 * Indicate low power, but rely on someone else to actually 6808 * change it. 6809 */ 6810 mutex_enter(&un->un_pm_mutex); 6811 un->un_pm_count = -1; 6812 mutex_exit(&un->un_pm_mutex); 6813 un->un_power_level = SD_SPINDLE_OFF; 6814 } 6815 6816 mutex_exit(SD_MUTEX(un)); 6817 6818 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6819 6820 return (DDI_SUCCESS); 6821 } 6822 6823 6824 /* 6825 * Function: sd_ddi_resume 6826 * 6827 * Description: Performs system power-up operations.. 6828 * 6829 * Return Code: DDI_SUCCESS 6830 * DDI_FAILURE 6831 * 6832 * Context: Kernel thread context 6833 */ 6834 6835 static int 6836 sd_ddi_resume(dev_info_t *devi) 6837 { 6838 struct sd_lun *un; 6839 6840 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6841 if (un == NULL) { 6842 return (DDI_FAILURE); 6843 } 6844 6845 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6846 6847 mutex_enter(SD_MUTEX(un)); 6848 Restore_state(un); 6849 6850 /* 6851 * Restore the state which was saved to give the 6852 * the right state in un_last_state 6853 */ 6854 un->un_last_state = un->un_save_state; 6855 /* 6856 * Note: throttle comes back at full. 6857 * Also note: this MUST be done before calling pm_raise_power 6858 * otherwise the system can get hung in biowait. The scenario where 6859 * this'll happen is under cpr suspend. Writing of the system 6860 * state goes through sddump, which writes 0 to un_throttle. If 6861 * writing the system state then fails, example if the partition is 6862 * too small, then cpr attempts a resume. If throttle isn't restored 6863 * from the saved value until after calling pm_raise_power then 6864 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6865 * in biowait. 6866 */ 6867 un->un_throttle = un->un_saved_throttle; 6868 6869 /* 6870 * The chance of failure is very rare as the only command done in power 6871 * entry point is START command when you transition from 0->1 or 6872 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6873 * which suspend was done. Ignore the return value as the resume should 6874 * not be failed. In the case of removable media the media need not be 6875 * inserted and hence there is a chance that raise power will fail with 6876 * media not present. 6877 */ 6878 if (!ISREMOVABLE(un)) { 6879 mutex_exit(SD_MUTEX(un)); 6880 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6881 mutex_enter(SD_MUTEX(un)); 6882 } 6883 6884 /* 6885 * Don't broadcast to the suspend cv and therefore possibly 6886 * start I/O until after power has been restored. 6887 */ 6888 cv_broadcast(&un->un_suspend_cv); 6889 cv_broadcast(&un->un_state_cv); 6890 6891 /* restart thread */ 6892 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6893 scsi_watch_resume(un->un_swr_token); 6894 } 6895 6896 #if (defined(__fibre)) 6897 if (un->un_f_is_fibre == TRUE) { 6898 /* 6899 * Add callbacks for insert and remove events 6900 */ 6901 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6902 sd_init_event_callbacks(un); 6903 } 6904 } 6905 #endif 6906 6907 /* 6908 * Transport any pending commands to the target. 6909 * 6910 * If this is a low-activity device commands in queue will have to wait 6911 * until new commands come in, which may take awhile. Also, we 6912 * specifically don't check un_ncmds_in_transport because we know that 6913 * there really are no commands in progress after the unit was 6914 * suspended and we could have reached the throttle level, been 6915 * suspended, and have no new commands coming in for awhile. Highly 6916 * unlikely, but so is the low-activity disk scenario. 6917 */ 6918 ddi_xbuf_dispatch(un->un_xbuf_attr); 6919 6920 sd_start_cmds(un, NULL); 6921 mutex_exit(SD_MUTEX(un)); 6922 6923 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6924 6925 return (DDI_SUCCESS); 6926 } 6927 6928 6929 /* 6930 * Function: sd_ddi_pm_resume 6931 * 6932 * Description: Set the drive state to powered on. 6933 * Someone else is required to actually change the drive 6934 * power level. 6935 * 6936 * Arguments: un - driver soft state (unit) structure 6937 * 6938 * Return Code: DDI_SUCCESS 6939 * 6940 * Context: Kernel thread context 6941 */ 6942 6943 static int 6944 sd_ddi_pm_resume(struct sd_lun *un) 6945 { 6946 ASSERT(un != NULL); 6947 6948 ASSERT(!mutex_owned(SD_MUTEX(un))); 6949 mutex_enter(SD_MUTEX(un)); 6950 un->un_power_level = SD_SPINDLE_ON; 6951 6952 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6953 mutex_enter(&un->un_pm_mutex); 6954 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6955 un->un_pm_count++; 6956 ASSERT(un->un_pm_count == 0); 6957 /* 6958 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6959 * un_suspend_cv is for a system resume, not a power management 6960 * device resume. (4297749) 6961 * cv_broadcast(&un->un_suspend_cv); 6962 */ 6963 } 6964 mutex_exit(&un->un_pm_mutex); 6965 mutex_exit(SD_MUTEX(un)); 6966 6967 return (DDI_SUCCESS); 6968 } 6969 6970 6971 /* 6972 * Function: sd_pm_idletimeout_handler 6973 * 6974 * Description: A timer routine that's active only while a device is busy. 6975 * The purpose is to extend slightly the pm framework's busy 6976 * view of the device to prevent busy/idle thrashing for 6977 * back-to-back commands. Do this by comparing the current time 6978 * to the time at which the last command completed and when the 6979 * difference is greater than sd_pm_idletime, call 6980 * pm_idle_component. In addition to indicating idle to the pm 6981 * framework, update the chain type to again use the internal pm 6982 * layers of the driver. 6983 * 6984 * Arguments: arg - driver soft state (unit) structure 6985 * 6986 * Context: Executes in a timeout(9F) thread context 6987 */ 6988 6989 static void 6990 sd_pm_idletimeout_handler(void *arg) 6991 { 6992 struct sd_lun *un = arg; 6993 6994 time_t now; 6995 6996 mutex_enter(&sd_detach_mutex); 6997 if (un->un_detach_count != 0) { 6998 /* Abort if the instance is detaching */ 6999 mutex_exit(&sd_detach_mutex); 7000 return; 7001 } 7002 mutex_exit(&sd_detach_mutex); 7003 7004 now = ddi_get_time(); 7005 /* 7006 * Grab both mutexes, in the proper order, since we're accessing 7007 * both PM and softstate variables. 7008 */ 7009 mutex_enter(SD_MUTEX(un)); 7010 mutex_enter(&un->un_pm_mutex); 7011 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 7012 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 7013 /* 7014 * Update the chain types. 7015 * This takes affect on the next new command received. 7016 */ 7017 if (ISREMOVABLE(un)) { 7018 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7019 } else { 7020 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7021 } 7022 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7023 7024 SD_TRACE(SD_LOG_IO_PM, un, 7025 "sd_pm_idletimeout_handler: idling device\n"); 7026 (void) pm_idle_component(SD_DEVINFO(un), 0); 7027 un->un_pm_idle_timeid = NULL; 7028 } else { 7029 un->un_pm_idle_timeid = 7030 timeout(sd_pm_idletimeout_handler, un, 7031 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 7032 } 7033 mutex_exit(&un->un_pm_mutex); 7034 mutex_exit(SD_MUTEX(un)); 7035 } 7036 7037 7038 /* 7039 * Function: sd_pm_timeout_handler 7040 * 7041 * Description: Callback to tell framework we are idle. 7042 * 7043 * Context: timeout(9f) thread context. 7044 */ 7045 7046 static void 7047 sd_pm_timeout_handler(void *arg) 7048 { 7049 struct sd_lun *un = arg; 7050 7051 (void) pm_idle_component(SD_DEVINFO(un), 0); 7052 mutex_enter(&un->un_pm_mutex); 7053 un->un_pm_timeid = NULL; 7054 mutex_exit(&un->un_pm_mutex); 7055 } 7056 7057 7058 /* 7059 * Function: sdpower 7060 * 7061 * Description: PM entry point. 7062 * 7063 * Return Code: DDI_SUCCESS 7064 * DDI_FAILURE 7065 * 7066 * Context: Kernel thread context 7067 */ 7068 7069 static int 7070 sdpower(dev_info_t *devi, int component, int level) 7071 { 7072 struct sd_lun *un; 7073 int instance; 7074 int rval = DDI_SUCCESS; 7075 uint_t i, log_page_size, maxcycles, ncycles; 7076 uchar_t *log_page_data; 7077 int log_sense_page; 7078 int medium_present; 7079 time_t intvlp; 7080 dev_t dev; 7081 struct pm_trans_data sd_pm_tran_data; 7082 uchar_t save_state; 7083 int sval; 7084 uchar_t state_before_pm; 7085 int got_semaphore_here; 7086 7087 instance = ddi_get_instance(devi); 7088 7089 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 7090 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 7091 component != 0) { 7092 return (DDI_FAILURE); 7093 } 7094 7095 dev = sd_make_device(SD_DEVINFO(un)); 7096 7097 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 7098 7099 /* 7100 * Must synchronize power down with close. 7101 * Attempt to decrement/acquire the open/close semaphore, 7102 * but do NOT wait on it. If it's not greater than zero, 7103 * ie. it can't be decremented without waiting, then 7104 * someone else, either open or close, already has it 7105 * and the try returns 0. Use that knowledge here to determine 7106 * if it's OK to change the device power level. 7107 * Also, only increment it on exit if it was decremented, ie. gotten, 7108 * here. 7109 */ 7110 got_semaphore_here = sema_tryp(&un->un_semoclose); 7111 7112 mutex_enter(SD_MUTEX(un)); 7113 7114 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 7115 un->un_ncmds_in_driver); 7116 7117 /* 7118 * If un_ncmds_in_driver is non-zero it indicates commands are 7119 * already being processed in the driver, or if the semaphore was 7120 * not gotten here it indicates an open or close is being processed. 7121 * At the same time somebody is requesting to go low power which 7122 * can't happen, therefore we need to return failure. 7123 */ 7124 if ((level == SD_SPINDLE_OFF) && 7125 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 7126 mutex_exit(SD_MUTEX(un)); 7127 7128 if (got_semaphore_here != 0) { 7129 sema_v(&un->un_semoclose); 7130 } 7131 SD_TRACE(SD_LOG_IO_PM, un, 7132 "sdpower: exit, device has queued cmds.\n"); 7133 return (DDI_FAILURE); 7134 } 7135 7136 /* 7137 * if it is OFFLINE that means the disk is completely dead 7138 * in our case we have to put the disk in on or off by sending commands 7139 * Of course that will fail anyway so return back here. 7140 * 7141 * Power changes to a device that's OFFLINE or SUSPENDED 7142 * are not allowed. 7143 */ 7144 if ((un->un_state == SD_STATE_OFFLINE) || 7145 (un->un_state == SD_STATE_SUSPENDED)) { 7146 mutex_exit(SD_MUTEX(un)); 7147 7148 if (got_semaphore_here != 0) { 7149 sema_v(&un->un_semoclose); 7150 } 7151 SD_TRACE(SD_LOG_IO_PM, un, 7152 "sdpower: exit, device is off-line.\n"); 7153 return (DDI_FAILURE); 7154 } 7155 7156 /* 7157 * Change the device's state to indicate it's power level 7158 * is being changed. Do this to prevent a power off in the 7159 * middle of commands, which is especially bad on devices 7160 * that are really powered off instead of just spun down. 7161 */ 7162 state_before_pm = un->un_state; 7163 un->un_state = SD_STATE_PM_CHANGING; 7164 7165 mutex_exit(SD_MUTEX(un)); 7166 7167 /* 7168 * Bypass checking the log sense information for removables 7169 * and devices for which the HBA set the pm-capable property. 7170 * If un->un_pm_capable_prop is SD_PM_CAPABLE_UNDEFINED (-1) 7171 * then the HBA did not create the property. 7172 */ 7173 if ((level == SD_SPINDLE_OFF) && (!ISREMOVABLE(un)) && 7174 un->un_pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 7175 /* 7176 * Get the log sense information to understand whether the 7177 * the powercycle counts have gone beyond the threshhold. 7178 */ 7179 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 7180 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 7181 7182 mutex_enter(SD_MUTEX(un)); 7183 log_sense_page = un->un_start_stop_cycle_page; 7184 mutex_exit(SD_MUTEX(un)); 7185 7186 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 7187 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 7188 #ifdef SDDEBUG 7189 if (sd_force_pm_supported) { 7190 /* Force a successful result */ 7191 rval = 0; 7192 } 7193 #endif 7194 if (rval != 0) { 7195 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 7196 "Log Sense Failed\n"); 7197 kmem_free(log_page_data, log_page_size); 7198 /* Cannot support power management on those drives */ 7199 7200 if (got_semaphore_here != 0) { 7201 sema_v(&un->un_semoclose); 7202 } 7203 /* 7204 * On exit put the state back to it's original value 7205 * and broadcast to anyone waiting for the power 7206 * change completion. 7207 */ 7208 mutex_enter(SD_MUTEX(un)); 7209 un->un_state = state_before_pm; 7210 cv_broadcast(&un->un_suspend_cv); 7211 mutex_exit(SD_MUTEX(un)); 7212 SD_TRACE(SD_LOG_IO_PM, un, 7213 "sdpower: exit, Log Sense Failed.\n"); 7214 return (DDI_FAILURE); 7215 } 7216 7217 /* 7218 * From the page data - Convert the essential information to 7219 * pm_trans_data 7220 */ 7221 maxcycles = 7222 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 7223 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 7224 7225 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 7226 7227 ncycles = 7228 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 7229 (log_page_data[0x26] << 8) | log_page_data[0x27]; 7230 7231 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 7232 7233 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 7234 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 7235 log_page_data[8+i]; 7236 } 7237 7238 kmem_free(log_page_data, log_page_size); 7239 7240 /* 7241 * Call pm_trans_check routine to get the Ok from 7242 * the global policy 7243 */ 7244 7245 sd_pm_tran_data.format = DC_SCSI_FORMAT; 7246 sd_pm_tran_data.un.scsi_cycles.flag = 0; 7247 7248 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 7249 #ifdef SDDEBUG 7250 if (sd_force_pm_supported) { 7251 /* Force a successful result */ 7252 rval = 1; 7253 } 7254 #endif 7255 switch (rval) { 7256 case 0: 7257 /* 7258 * Not Ok to Power cycle or error in parameters passed 7259 * Would have given the advised time to consider power 7260 * cycle. Based on the new intvlp parameter we are 7261 * supposed to pretend we are busy so that pm framework 7262 * will never call our power entry point. Because of 7263 * that install a timeout handler and wait for the 7264 * recommended time to elapse so that power management 7265 * can be effective again. 7266 * 7267 * To effect this behavior, call pm_busy_component to 7268 * indicate to the framework this device is busy. 7269 * By not adjusting un_pm_count the rest of PM in 7270 * the driver will function normally, and independant 7271 * of this but because the framework is told the device 7272 * is busy it won't attempt powering down until it gets 7273 * a matching idle. The timeout handler sends this. 7274 * Note: sd_pm_entry can't be called here to do this 7275 * because sdpower may have been called as a result 7276 * of a call to pm_raise_power from within sd_pm_entry. 7277 * 7278 * If a timeout handler is already active then 7279 * don't install another. 7280 */ 7281 mutex_enter(&un->un_pm_mutex); 7282 if (un->un_pm_timeid == NULL) { 7283 un->un_pm_timeid = 7284 timeout(sd_pm_timeout_handler, 7285 un, intvlp * drv_usectohz(1000000)); 7286 mutex_exit(&un->un_pm_mutex); 7287 (void) pm_busy_component(SD_DEVINFO(un), 0); 7288 } else { 7289 mutex_exit(&un->un_pm_mutex); 7290 } 7291 if (got_semaphore_here != 0) { 7292 sema_v(&un->un_semoclose); 7293 } 7294 /* 7295 * On exit put the state back to it's original value 7296 * and broadcast to anyone waiting for the power 7297 * change completion. 7298 */ 7299 mutex_enter(SD_MUTEX(un)); 7300 un->un_state = state_before_pm; 7301 cv_broadcast(&un->un_suspend_cv); 7302 mutex_exit(SD_MUTEX(un)); 7303 7304 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 7305 "trans check Failed, not ok to power cycle.\n"); 7306 return (DDI_FAILURE); 7307 7308 case -1: 7309 if (got_semaphore_here != 0) { 7310 sema_v(&un->un_semoclose); 7311 } 7312 /* 7313 * On exit put the state back to it's original value 7314 * and broadcast to anyone waiting for the power 7315 * change completion. 7316 */ 7317 mutex_enter(SD_MUTEX(un)); 7318 un->un_state = state_before_pm; 7319 cv_broadcast(&un->un_suspend_cv); 7320 mutex_exit(SD_MUTEX(un)); 7321 SD_TRACE(SD_LOG_IO_PM, un, 7322 "sdpower: exit, trans check command Failed.\n"); 7323 return (DDI_FAILURE); 7324 } 7325 } 7326 7327 if (level == SD_SPINDLE_OFF) { 7328 /* 7329 * Save the last state... if the STOP FAILS we need it 7330 * for restoring 7331 */ 7332 mutex_enter(SD_MUTEX(un)); 7333 save_state = un->un_last_state; 7334 /* 7335 * There must not be any cmds. getting processed 7336 * in the driver when we get here. Power to the 7337 * device is potentially going off. 7338 */ 7339 ASSERT(un->un_ncmds_in_driver == 0); 7340 mutex_exit(SD_MUTEX(un)); 7341 7342 /* 7343 * For now suspend the device completely before spindle is 7344 * turned off 7345 */ 7346 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 7347 if (got_semaphore_here != 0) { 7348 sema_v(&un->un_semoclose); 7349 } 7350 /* 7351 * On exit put the state back to it's original value 7352 * and broadcast to anyone waiting for the power 7353 * change completion. 7354 */ 7355 mutex_enter(SD_MUTEX(un)); 7356 un->un_state = state_before_pm; 7357 cv_broadcast(&un->un_suspend_cv); 7358 mutex_exit(SD_MUTEX(un)); 7359 SD_TRACE(SD_LOG_IO_PM, un, 7360 "sdpower: exit, PM suspend Failed.\n"); 7361 return (DDI_FAILURE); 7362 } 7363 } 7364 7365 /* 7366 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7367 * close, or strategy. Dump no long uses this routine, it uses it's 7368 * own code so it can be done in polled mode. 7369 */ 7370 7371 medium_present = TRUE; 7372 7373 /* 7374 * When powering up, issue a TUR in case the device is at unit 7375 * attention. Don't do retries. Bypass the PM layer, otherwise 7376 * a deadlock on un_pm_busy_cv will occur. 7377 */ 7378 if (level == SD_SPINDLE_ON) { 7379 (void) sd_send_scsi_TEST_UNIT_READY(un, 7380 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7381 } 7382 7383 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7384 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7385 7386 sval = sd_send_scsi_START_STOP_UNIT(un, 7387 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 7388 SD_PATH_DIRECT); 7389 /* Command failed, check for media present. */ 7390 if ((sval == ENXIO) && ISREMOVABLE(un)) { 7391 medium_present = FALSE; 7392 } 7393 7394 /* 7395 * The conditions of interest here are: 7396 * if a spindle off with media present fails, 7397 * then restore the state and return an error. 7398 * else if a spindle on fails, 7399 * then return an error (there's no state to restore). 7400 * In all other cases we setup for the new state 7401 * and return success. 7402 */ 7403 switch (level) { 7404 case SD_SPINDLE_OFF: 7405 if ((medium_present == TRUE) && (sval != 0)) { 7406 /* The stop command from above failed */ 7407 rval = DDI_FAILURE; 7408 /* 7409 * The stop command failed, and we have media 7410 * present. Put the level back by calling the 7411 * sd_pm_resume() and set the state back to 7412 * it's previous value. 7413 */ 7414 (void) sd_ddi_pm_resume(un); 7415 mutex_enter(SD_MUTEX(un)); 7416 un->un_last_state = save_state; 7417 mutex_exit(SD_MUTEX(un)); 7418 break; 7419 } 7420 /* 7421 * The stop command from above succeeded. 7422 */ 7423 if (ISREMOVABLE(un)) { 7424 /* 7425 * Terminate watch thread in case of removable media 7426 * devices going into low power state. This is as per 7427 * the requirements of pm framework, otherwise commands 7428 * will be generated for the device (through watch 7429 * thread), even when the device is in low power state. 7430 */ 7431 mutex_enter(SD_MUTEX(un)); 7432 un->un_f_watcht_stopped = FALSE; 7433 if (un->un_swr_token != NULL) { 7434 opaque_t temp_token = un->un_swr_token; 7435 un->un_f_watcht_stopped = TRUE; 7436 un->un_swr_token = NULL; 7437 mutex_exit(SD_MUTEX(un)); 7438 (void) scsi_watch_request_terminate(temp_token, 7439 SCSI_WATCH_TERMINATE_WAIT); 7440 } else { 7441 mutex_exit(SD_MUTEX(un)); 7442 } 7443 } 7444 break; 7445 7446 default: /* The level requested is spindle on... */ 7447 /* 7448 * Legacy behavior: return success on a failed spinup 7449 * if there is no media in the drive. 7450 * Do this by looking at medium_present here. 7451 */ 7452 if ((sval != 0) && medium_present) { 7453 /* The start command from above failed */ 7454 rval = DDI_FAILURE; 7455 break; 7456 } 7457 /* 7458 * The start command from above succeeded 7459 * Resume the devices now that we have 7460 * started the disks 7461 */ 7462 (void) sd_ddi_pm_resume(un); 7463 7464 /* 7465 * Resume the watch thread since it was suspended 7466 * when the device went into low power mode. 7467 */ 7468 if (ISREMOVABLE(un)) { 7469 mutex_enter(SD_MUTEX(un)); 7470 if (un->un_f_watcht_stopped == TRUE) { 7471 opaque_t temp_token; 7472 7473 un->un_f_watcht_stopped = FALSE; 7474 mutex_exit(SD_MUTEX(un)); 7475 temp_token = scsi_watch_request_submit( 7476 SD_SCSI_DEVP(un), 7477 sd_check_media_time, 7478 SENSE_LENGTH, sd_media_watch_cb, 7479 (caddr_t)dev); 7480 mutex_enter(SD_MUTEX(un)); 7481 un->un_swr_token = temp_token; 7482 } 7483 mutex_exit(SD_MUTEX(un)); 7484 } 7485 } 7486 if (got_semaphore_here != 0) { 7487 sema_v(&un->un_semoclose); 7488 } 7489 /* 7490 * On exit put the state back to it's original value 7491 * and broadcast to anyone waiting for the power 7492 * change completion. 7493 */ 7494 mutex_enter(SD_MUTEX(un)); 7495 un->un_state = state_before_pm; 7496 cv_broadcast(&un->un_suspend_cv); 7497 mutex_exit(SD_MUTEX(un)); 7498 7499 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7500 7501 return (rval); 7502 } 7503 7504 7505 7506 /* 7507 * Function: sdattach 7508 * 7509 * Description: Driver's attach(9e) entry point function. 7510 * 7511 * Arguments: devi - opaque device info handle 7512 * cmd - attach type 7513 * 7514 * Return Code: DDI_SUCCESS 7515 * DDI_FAILURE 7516 * 7517 * Context: Kernel thread context 7518 */ 7519 7520 static int 7521 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7522 { 7523 switch (cmd) { 7524 case DDI_ATTACH: 7525 return (sd_unit_attach(devi)); 7526 case DDI_RESUME: 7527 return (sd_ddi_resume(devi)); 7528 default: 7529 break; 7530 } 7531 return (DDI_FAILURE); 7532 } 7533 7534 7535 /* 7536 * Function: sddetach 7537 * 7538 * Description: Driver's detach(9E) entry point function. 7539 * 7540 * Arguments: devi - opaque device info handle 7541 * cmd - detach type 7542 * 7543 * Return Code: DDI_SUCCESS 7544 * DDI_FAILURE 7545 * 7546 * Context: Kernel thread context 7547 */ 7548 7549 static int 7550 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7551 { 7552 switch (cmd) { 7553 case DDI_DETACH: 7554 return (sd_unit_detach(devi)); 7555 case DDI_SUSPEND: 7556 return (sd_ddi_suspend(devi)); 7557 default: 7558 break; 7559 } 7560 return (DDI_FAILURE); 7561 } 7562 7563 7564 /* 7565 * Function: sd_sync_with_callback 7566 * 7567 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7568 * state while the callback routine is active. 7569 * 7570 * Arguments: un: softstate structure for the instance 7571 * 7572 * Context: Kernel thread context 7573 */ 7574 7575 static void 7576 sd_sync_with_callback(struct sd_lun *un) 7577 { 7578 ASSERT(un != NULL); 7579 7580 mutex_enter(SD_MUTEX(un)); 7581 7582 ASSERT(un->un_in_callback >= 0); 7583 7584 while (un->un_in_callback > 0) { 7585 mutex_exit(SD_MUTEX(un)); 7586 delay(2); 7587 mutex_enter(SD_MUTEX(un)); 7588 } 7589 7590 mutex_exit(SD_MUTEX(un)); 7591 } 7592 7593 /* 7594 * Function: sd_unit_attach 7595 * 7596 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7597 * the soft state structure for the device and performs 7598 * all necessary structure and device initializations. 7599 * 7600 * Arguments: devi: the system's dev_info_t for the device. 7601 * 7602 * Return Code: DDI_SUCCESS if attach is successful. 7603 * DDI_FAILURE if any part of the attach fails. 7604 * 7605 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7606 * Kernel thread context only. Can sleep. 7607 */ 7608 7609 static int 7610 sd_unit_attach(dev_info_t *devi) 7611 { 7612 struct scsi_device *devp; 7613 struct sd_lun *un; 7614 char *variantp; 7615 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7616 int instance; 7617 int rval; 7618 uint64_t capacity; 7619 uint_t lbasize; 7620 7621 /* 7622 * Retrieve the target driver's private data area. This was set 7623 * up by the HBA. 7624 */ 7625 devp = ddi_get_driver_private(devi); 7626 7627 /* 7628 * Since we have no idea what state things were left in by the last 7629 * user of the device, set up some 'default' settings, ie. turn 'em 7630 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7631 * Do this before the scsi_probe, which sends an inquiry. 7632 * This is a fix for bug (4430280). 7633 * Of special importance is wide-xfer. The drive could have been left 7634 * in wide transfer mode by the last driver to communicate with it, 7635 * this includes us. If that's the case, and if the following is not 7636 * setup properly or we don't re-negotiate with the drive prior to 7637 * transferring data to/from the drive, it causes bus parity errors, 7638 * data overruns, and unexpected interrupts. This first occurred when 7639 * the fix for bug (4378686) was made. 7640 */ 7641 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7642 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7643 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7644 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7645 7646 /* 7647 * Use scsi_probe() to issue an INQUIRY command to the device. 7648 * This call will allocate and fill in the scsi_inquiry structure 7649 * and point the sd_inq member of the scsi_device structure to it. 7650 * If the attach succeeds, then this memory will not be de-allocated 7651 * (via scsi_unprobe()) until the instance is detached. 7652 */ 7653 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7654 goto probe_failed; 7655 } 7656 7657 /* 7658 * Check the device type as specified in the inquiry data and 7659 * claim it if it is of a type that we support. 7660 */ 7661 switch (devp->sd_inq->inq_dtype) { 7662 case DTYPE_DIRECT: 7663 break; 7664 case DTYPE_RODIRECT: 7665 break; 7666 case DTYPE_OPTICAL: 7667 break; 7668 case DTYPE_NOTPRESENT: 7669 default: 7670 /* Unsupported device type; fail the attach. */ 7671 goto probe_failed; 7672 } 7673 7674 /* 7675 * Allocate the soft state structure for this unit. 7676 * 7677 * We rely upon this memory being set to all zeroes by 7678 * ddi_soft_state_zalloc(). We assume that any member of the 7679 * soft state structure that is not explicitly initialized by 7680 * this routine will have a value of zero. 7681 */ 7682 instance = ddi_get_instance(devp->sd_dev); 7683 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7684 goto probe_failed; 7685 } 7686 7687 /* 7688 * Retrieve a pointer to the newly-allocated soft state. 7689 * 7690 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7691 * was successful, unless something has gone horribly wrong and the 7692 * ddi's soft state internals are corrupt (in which case it is 7693 * probably better to halt here than just fail the attach....) 7694 */ 7695 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7696 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7697 instance); 7698 /*NOTREACHED*/ 7699 } 7700 7701 /* 7702 * Link the back ptr of the driver soft state to the scsi_device 7703 * struct for this lun. 7704 * Save a pointer to the softstate in the driver-private area of 7705 * the scsi_device struct. 7706 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7707 * we first set un->un_sd below. 7708 */ 7709 un->un_sd = devp; 7710 devp->sd_private = (opaque_t)un; 7711 7712 /* 7713 * The following must be after devp is stored in the soft state struct. 7714 */ 7715 #ifdef SDDEBUG 7716 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7717 "%s_unit_attach: un:0x%p instance:%d\n", 7718 ddi_driver_name(devi), un, instance); 7719 #endif 7720 7721 /* 7722 * Set up the device type and node type (for the minor nodes). 7723 * By default we assume that the device can at least support the 7724 * Common Command Set. Call it a CD-ROM if it reports itself 7725 * as a RODIRECT device. 7726 */ 7727 switch (devp->sd_inq->inq_dtype) { 7728 case DTYPE_RODIRECT: 7729 un->un_node_type = DDI_NT_CD_CHAN; 7730 un->un_ctype = CTYPE_CDROM; 7731 break; 7732 case DTYPE_OPTICAL: 7733 un->un_node_type = DDI_NT_BLOCK_CHAN; 7734 un->un_ctype = CTYPE_ROD; 7735 break; 7736 default: 7737 un->un_node_type = DDI_NT_BLOCK_CHAN; 7738 un->un_ctype = CTYPE_CCS; 7739 break; 7740 } 7741 7742 /* 7743 * Try to read the interconnect type from the HBA. 7744 * 7745 * Note: This driver is currently compiled as two binaries, a parallel 7746 * scsi version (sd) and a fibre channel version (ssd). All functional 7747 * differences are determined at compile time. In the future a single 7748 * binary will be provided and the inteconnect type will be used to 7749 * differentiate between fibre and parallel scsi behaviors. At that time 7750 * it will be necessary for all fibre channel HBAs to support this 7751 * property. 7752 * 7753 * set un_f_is_fiber to TRUE ( default fiber ) 7754 */ 7755 un->un_f_is_fibre = TRUE; 7756 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7757 case INTERCONNECT_SSA: 7758 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7759 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7760 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7761 break; 7762 case INTERCONNECT_PARALLEL: 7763 un->un_f_is_fibre = FALSE; 7764 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7765 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7766 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7767 break; 7768 case INTERCONNECT_FIBRE: 7769 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7770 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7771 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7772 break; 7773 case INTERCONNECT_FABRIC: 7774 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7775 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7776 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7777 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7778 break; 7779 default: 7780 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7781 /* 7782 * The HBA does not support the "interconnect-type" property 7783 * (or did not provide a recognized type). 7784 * 7785 * Note: This will be obsoleted when a single fibre channel 7786 * and parallel scsi driver is delivered. In the meantime the 7787 * interconnect type will be set to the platform default.If that 7788 * type is not parallel SCSI, it means that we should be 7789 * assuming "ssd" semantics. However, here this also means that 7790 * the FC HBA is not supporting the "interconnect-type" property 7791 * like we expect it to, so log this occurrence. 7792 */ 7793 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7794 if (!SD_IS_PARALLEL_SCSI(un)) { 7795 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7796 "sd_unit_attach: un:0x%p Assuming " 7797 "INTERCONNECT_FIBRE\n", un); 7798 } else { 7799 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7800 "sd_unit_attach: un:0x%p Assuming " 7801 "INTERCONNECT_PARALLEL\n", un); 7802 un->un_f_is_fibre = FALSE; 7803 } 7804 #else 7805 /* 7806 * Note: This source will be implemented when a single fibre 7807 * channel and parallel scsi driver is delivered. The default 7808 * will be to assume that if a device does not support the 7809 * "interconnect-type" property it is a parallel SCSI HBA and 7810 * we will set the interconnect type for parallel scsi. 7811 */ 7812 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7813 un->un_f_is_fibre = FALSE; 7814 #endif 7815 break; 7816 } 7817 7818 if (un->un_f_is_fibre == TRUE) { 7819 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7820 SCSI_VERSION_3) { 7821 switch (un->un_interconnect_type) { 7822 case SD_INTERCONNECT_FIBRE: 7823 case SD_INTERCONNECT_SSA: 7824 un->un_node_type = DDI_NT_BLOCK_WWN; 7825 break; 7826 default: 7827 break; 7828 } 7829 } 7830 } 7831 7832 /* 7833 * Initialize the Request Sense command for the target 7834 */ 7835 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7836 goto alloc_rqs_failed; 7837 } 7838 7839 /* 7840 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7841 * with seperate binary for sd and ssd. 7842 * 7843 * x86 has 1 binary, un_retry_count is set base on connection type. 7844 * The hardcoded values will go away when Sparc uses 1 binary 7845 * for sd and ssd. This hardcoded values need to match 7846 * SD_RETRY_COUNT in sddef.h 7847 * The value used is base on interconnect type. 7848 * fibre = 3, parallel = 5 7849 */ 7850 #if defined(__i386) || defined(__amd64) 7851 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7852 #else 7853 un->un_retry_count = SD_RETRY_COUNT; 7854 #endif 7855 7856 /* 7857 * Set the per disk retry count to the default number of retries 7858 * for disks and CDROMs. This value can be overridden by the 7859 * disk property list or an entry in sd.conf. 7860 */ 7861 un->un_notready_retry_count = 7862 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7863 : DISK_NOT_READY_RETRY_COUNT(un); 7864 7865 /* 7866 * Set the busy retry count to the default value of un_retry_count. 7867 * This can be overridden by entries in sd.conf or the device 7868 * config table. 7869 */ 7870 un->un_busy_retry_count = un->un_retry_count; 7871 7872 /* 7873 * Init the reset threshold for retries. This number determines 7874 * how many retries must be performed before a reset can be issued 7875 * (for certain error conditions). This can be overridden by entries 7876 * in sd.conf or the device config table. 7877 */ 7878 un->un_reset_retry_count = (un->un_retry_count / 2); 7879 7880 /* 7881 * Set the victim_retry_count to the default un_retry_count 7882 */ 7883 un->un_victim_retry_count = (2 * un->un_retry_count); 7884 7885 /* 7886 * Set the reservation release timeout to the default value of 7887 * 5 seconds. This can be overridden by entries in ssd.conf or the 7888 * device config table. 7889 */ 7890 un->un_reserve_release_time = 5; 7891 7892 /* 7893 * Set up the default maximum transfer size. Note that this may 7894 * get updated later in the attach, when setting up default wide 7895 * operations for disks. 7896 */ 7897 #if defined(__i386) || defined(__amd64) 7898 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7899 #else 7900 un->un_max_xfer_size = (uint_t)maxphys; 7901 #endif 7902 7903 /* 7904 * Get "allow bus device reset" property (defaults to "enabled" if 7905 * the property was not defined). This is to disable bus resets for 7906 * certain kinds of error recovery. Note: In the future when a run-time 7907 * fibre check is available the soft state flag should default to 7908 * enabled. 7909 */ 7910 if (un->un_f_is_fibre == TRUE) { 7911 un->un_f_allow_bus_device_reset = TRUE; 7912 } else { 7913 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7914 "allow-bus-device-reset", 1) != 0) { 7915 un->un_f_allow_bus_device_reset = TRUE; 7916 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7917 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 7918 un); 7919 } else { 7920 un->un_f_allow_bus_device_reset = FALSE; 7921 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7922 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 7923 un); 7924 } 7925 } 7926 7927 /* 7928 * Check if this is an ATAPI device. ATAPI devices use Group 1 7929 * Read/Write commands and Group 2 Mode Sense/Select commands. 7930 * 7931 * Note: The "obsolete" way of doing this is to check for the "atapi" 7932 * property. The new "variant" property with a value of "atapi" has been 7933 * introduced so that future 'variants' of standard SCSI behavior (like 7934 * atapi) could be specified by the underlying HBA drivers by supplying 7935 * a new value for the "variant" property, instead of having to define a 7936 * new property. 7937 */ 7938 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7939 un->un_f_cfg_is_atapi = TRUE; 7940 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7941 "sd_unit_attach: un:0x%p Atapi device\n", un); 7942 } 7943 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7944 &variantp) == DDI_PROP_SUCCESS) { 7945 if (strcmp(variantp, "atapi") == 0) { 7946 un->un_f_cfg_is_atapi = TRUE; 7947 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7948 "sd_unit_attach: un:0x%p Atapi device\n", un); 7949 } 7950 ddi_prop_free(variantp); 7951 } 7952 7953 /* 7954 * Assume doorlock commands are supported. If not, the first 7955 * call to sd_send_scsi_DOORLOCK() will set to FALSE 7956 */ 7957 un->un_f_doorlock_supported = TRUE; 7958 7959 un->un_cmd_timeout = SD_IO_TIME; 7960 7961 /* Info on current states, statuses, etc. (Updated frequently) */ 7962 un->un_state = SD_STATE_NORMAL; 7963 un->un_last_state = SD_STATE_NORMAL; 7964 7965 /* Control & status info for command throttling */ 7966 un->un_throttle = sd_max_throttle; 7967 un->un_saved_throttle = sd_max_throttle; 7968 un->un_min_throttle = sd_min_throttle; 7969 7970 if (un->un_f_is_fibre == TRUE) { 7971 un->un_f_use_adaptive_throttle = TRUE; 7972 } else { 7973 un->un_f_use_adaptive_throttle = FALSE; 7974 } 7975 7976 /* Removable media support. */ 7977 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7978 un->un_mediastate = DKIO_NONE; 7979 un->un_specified_mediastate = DKIO_NONE; 7980 7981 /* CVs for suspend/resume (PM or DR) */ 7982 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7983 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7984 7985 /* Power management support. */ 7986 un->un_power_level = SD_SPINDLE_UNINIT; 7987 7988 /* 7989 * The open/close semaphore is used to serialize threads executing 7990 * in the driver's open & close entry point routines for a given 7991 * instance. 7992 */ 7993 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7994 7995 /* 7996 * The conf file entry and softstate variable is a forceful override, 7997 * meaning a non-zero value must be entered to change the default. 7998 */ 7999 un->un_f_disksort_disabled = FALSE; 8000 8001 /* 8002 * Retrieve the properties from the static driver table or the driver 8003 * configuration file (.conf) for this unit and update the soft state 8004 * for the device as needed for the indicated properties. 8005 * Note: the property configuration needs to occur here as some of the 8006 * following routines may have dependancies on soft state flags set 8007 * as part of the driver property configuration. 8008 */ 8009 sd_read_unit_properties(un); 8010 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8011 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 8012 8013 /* 8014 * By default, we mark the capacity, lbazize, and geometry 8015 * as invalid. Only if we successfully read a valid capacity 8016 * will we update the un_blockcount and un_tgt_blocksize with the 8017 * valid values (the geometry will be validated later). 8018 */ 8019 un->un_f_blockcount_is_valid = FALSE; 8020 un->un_f_tgt_blocksize_is_valid = FALSE; 8021 un->un_f_geometry_is_valid = FALSE; 8022 8023 /* 8024 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 8025 * otherwise. 8026 */ 8027 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 8028 un->un_blockcount = 0; 8029 8030 /* 8031 * Set up the per-instance info needed to determine the correct 8032 * CDBs and other info for issuing commands to the target. 8033 */ 8034 sd_init_cdb_limits(un); 8035 8036 /* 8037 * Set up the IO chains to use, based upon the target type. 8038 */ 8039 if (ISREMOVABLE(un)) { 8040 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 8041 } else { 8042 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 8043 } 8044 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 8045 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 8046 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 8047 8048 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 8049 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 8050 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 8051 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 8052 8053 8054 if (ISCD(un)) { 8055 un->un_additional_codes = sd_additional_codes; 8056 } else { 8057 un->un_additional_codes = NULL; 8058 } 8059 8060 /* 8061 * Create the kstats here so they can be available for attach-time 8062 * routines that send commands to the unit (either polled or via 8063 * sd_send_scsi_cmd). 8064 * 8065 * Note: This is a critical sequence that needs to be maintained: 8066 * 1) Instantiate the kstats here, before any routines using the 8067 * iopath (i.e. sd_send_scsi_cmd). 8068 * 2) Initialize the error stats (sd_set_errstats) and partition 8069 * stats (sd_set_pstats), following sd_validate_geometry(), 8070 * sd_register_devid(), and sd_disable_caching(). 8071 */ 8072 8073 un->un_stats = kstat_create(sd_label, instance, 8074 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 8075 if (un->un_stats != NULL) { 8076 un->un_stats->ks_lock = SD_MUTEX(un); 8077 kstat_install(un->un_stats); 8078 } 8079 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8080 "sd_unit_attach: un:0x%p un_stats created\n", un); 8081 8082 sd_create_errstats(un, instance); 8083 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8084 "sd_unit_attach: un:0x%p errstats created\n", un); 8085 8086 /* 8087 * The following if/else code was relocated here from below as part 8088 * of the fix for bug (4430280). However with the default setup added 8089 * on entry to this routine, it's no longer absolutely necessary for 8090 * this to be before the call to sd_spin_up_unit. 8091 */ 8092 if (SD_IS_PARALLEL_SCSI(un)) { 8093 /* 8094 * If SCSI-2 tagged queueing is supported by the target 8095 * and by the host adapter then we will enable it. 8096 */ 8097 un->un_tagflags = 0; 8098 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8099 (devp->sd_inq->inq_cmdque) && 8100 (un->un_f_arq_enabled == TRUE)) { 8101 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 8102 1, 1) == 1) { 8103 un->un_tagflags = FLAG_STAG; 8104 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8105 "sd_unit_attach: un:0x%p tag queueing " 8106 "enabled\n", un); 8107 } else if (scsi_ifgetcap(SD_ADDRESS(un), 8108 "untagged-qing", 0) == 1) { 8109 un->un_f_opt_queueing = TRUE; 8110 un->un_saved_throttle = un->un_throttle = 8111 min(un->un_throttle, 3); 8112 } else { 8113 un->un_f_opt_queueing = FALSE; 8114 un->un_saved_throttle = un->un_throttle = 1; 8115 } 8116 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 8117 == 1) && (un->un_f_arq_enabled == TRUE)) { 8118 /* The Host Adapter supports internal queueing. */ 8119 un->un_f_opt_queueing = TRUE; 8120 un->un_saved_throttle = un->un_throttle = 8121 min(un->un_throttle, 3); 8122 } else { 8123 un->un_f_opt_queueing = FALSE; 8124 un->un_saved_throttle = un->un_throttle = 1; 8125 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8126 "sd_unit_attach: un:0x%p no tag queueing\n", un); 8127 } 8128 8129 8130 /* Setup or tear down default wide operations for disks */ 8131 8132 /* 8133 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 8134 * and "ssd_max_xfer_size" to exist simultaneously on the same 8135 * system and be set to different values. In the future this 8136 * code may need to be updated when the ssd module is 8137 * obsoleted and removed from the system. (4299588) 8138 */ 8139 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8140 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 8141 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8142 1, 1) == 1) { 8143 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8144 "sd_unit_attach: un:0x%p Wide Transfer " 8145 "enabled\n", un); 8146 } 8147 8148 /* 8149 * If tagged queuing has also been enabled, then 8150 * enable large xfers 8151 */ 8152 if (un->un_saved_throttle == sd_max_throttle) { 8153 un->un_max_xfer_size = 8154 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8155 sd_max_xfer_size, SD_MAX_XFER_SIZE); 8156 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8157 "sd_unit_attach: un:0x%p max transfer " 8158 "size=0x%x\n", un, un->un_max_xfer_size); 8159 } 8160 } else { 8161 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8162 0, 1) == 1) { 8163 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8164 "sd_unit_attach: un:0x%p " 8165 "Wide Transfer disabled\n", un); 8166 } 8167 } 8168 } else { 8169 un->un_tagflags = FLAG_STAG; 8170 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 8171 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 8172 } 8173 8174 /* 8175 * If this target supports LUN reset, try to enable it. 8176 */ 8177 if (un->un_f_lun_reset_enabled) { 8178 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 8179 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8180 "un:0x%p lun_reset capability set\n", un); 8181 } else { 8182 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8183 "un:0x%p lun-reset capability not set\n", un); 8184 } 8185 } 8186 8187 /* 8188 * At this point in the attach, we have enough info in the 8189 * soft state to be able to issue commands to the target. 8190 * 8191 * All command paths used below MUST issue their commands as 8192 * SD_PATH_DIRECT. This is important as intermediate layers 8193 * are not all initialized yet (such as PM). 8194 */ 8195 8196 /* 8197 * Send a TEST UNIT READY command to the device. This should clear 8198 * any outstanding UNIT ATTENTION that may be present. 8199 * 8200 * Note: Don't check for success, just track if there is a reservation, 8201 * this is a throw away command to clear any unit attentions. 8202 * 8203 * Note: This MUST be the first command issued to the target during 8204 * attach to ensure power on UNIT ATTENTIONS are cleared. 8205 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8206 * with attempts at spinning up a device with no media. 8207 */ 8208 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 8209 reservation_flag = SD_TARGET_IS_RESERVED; 8210 } 8211 8212 /* 8213 * If the device is NOT a removable media device, attempt to spin 8214 * it up (using the START_STOP_UNIT command) and read its capacity 8215 * (using the READ CAPACITY command). Note, however, that either 8216 * of these could fail and in some cases we would continue with 8217 * the attach despite the failure (see below). 8218 */ 8219 if (devp->sd_inq->inq_dtype == DTYPE_DIRECT && !ISREMOVABLE(un)) { 8220 switch (sd_spin_up_unit(un)) { 8221 case 0: 8222 /* 8223 * Spin-up was successful; now try to read the 8224 * capacity. If successful then save the results 8225 * and mark the capacity & lbasize as valid. 8226 */ 8227 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8228 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8229 8230 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 8231 &lbasize, SD_PATH_DIRECT)) { 8232 case 0: { 8233 if (capacity > DK_MAX_BLOCKS) { 8234 #ifdef _LP64 8235 /* 8236 * Enable descriptor format sense data 8237 * so that we can get 64 bit sense 8238 * data fields. 8239 */ 8240 sd_enable_descr_sense(un); 8241 #else 8242 /* 32-bit kernels can't handle this */ 8243 scsi_log(SD_DEVINFO(un), 8244 sd_label, CE_WARN, 8245 "disk has %llu blocks, which " 8246 "is too large for a 32-bit " 8247 "kernel", capacity); 8248 goto spinup_failed; 8249 #endif 8250 } 8251 /* 8252 * The following relies on 8253 * sd_send_scsi_READ_CAPACITY never 8254 * returning 0 for capacity and/or lbasize. 8255 */ 8256 sd_update_block_info(un, lbasize, capacity); 8257 8258 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8259 "sd_unit_attach: un:0x%p capacity = %ld " 8260 "blocks; lbasize= %ld.\n", un, 8261 un->un_blockcount, un->un_tgt_blocksize); 8262 8263 break; 8264 } 8265 case EACCES: 8266 /* 8267 * Should never get here if the spin-up 8268 * succeeded, but code it in anyway. 8269 * From here, just continue with the attach... 8270 */ 8271 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8272 "sd_unit_attach: un:0x%p " 8273 "sd_send_scsi_READ_CAPACITY " 8274 "returned reservation conflict\n", un); 8275 reservation_flag = SD_TARGET_IS_RESERVED; 8276 break; 8277 default: 8278 /* 8279 * Likewise, should never get here if the 8280 * spin-up succeeded. Just continue with 8281 * the attach... 8282 */ 8283 break; 8284 } 8285 break; 8286 case EACCES: 8287 /* 8288 * Device is reserved by another host. In this case 8289 * we could not spin it up or read the capacity, but 8290 * we continue with the attach anyway. 8291 */ 8292 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8293 "sd_unit_attach: un:0x%p spin-up reservation " 8294 "conflict.\n", un); 8295 reservation_flag = SD_TARGET_IS_RESERVED; 8296 break; 8297 default: 8298 /* Fail the attach if the spin-up failed. */ 8299 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8300 "sd_unit_attach: un:0x%p spin-up failed.", un); 8301 goto spinup_failed; 8302 } 8303 } 8304 8305 /* 8306 * Check to see if this is a MMC drive 8307 */ 8308 if (ISCD(un)) { 8309 sd_set_mmc_caps(un); 8310 } 8311 8312 /* 8313 * Create the minor nodes for the device. 8314 * Note: If we want to support fdisk on both sparc and intel, this will 8315 * have to separate out the notion that VTOC8 is always sparc, and 8316 * VTOC16 is always intel (tho these can be the defaults). The vtoc 8317 * type will have to be determined at run-time, and the fdisk 8318 * partitioning will have to have been read & set up before we 8319 * create the minor nodes. (any other inits (such as kstats) that 8320 * also ought to be done before creating the minor nodes?) (Doesn't 8321 * setting up the minor nodes kind of imply that we're ready to 8322 * handle an open from userland?) 8323 */ 8324 if (sd_create_minor_nodes(un, devi) != DDI_SUCCESS) { 8325 goto create_minor_nodes_failed; 8326 } 8327 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8328 "sd_unit_attach: un:0x%p minor nodes created\n", un); 8329 8330 /* 8331 * Add a zero-length attribute to tell the world we support 8332 * kernel ioctls (for layered drivers) 8333 */ 8334 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8335 DDI_KERNEL_IOCTL, NULL, 0); 8336 8337 /* 8338 * Add a boolean property to tell the world we support 8339 * the B_FAILFAST flag (for layered drivers) 8340 */ 8341 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8342 "ddi-failfast-supported", NULL, 0); 8343 8344 /* 8345 * Initialize power management 8346 */ 8347 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8348 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8349 sd_setup_pm(un, devi); 8350 if (un->un_f_pm_is_enabled == FALSE) { 8351 /* 8352 * For performance, point to a jump table that does 8353 * not include pm. 8354 * The direct and priority chains don't change with PM. 8355 * 8356 * Note: this is currently done based on individual device 8357 * capabilities. When an interface for determining system 8358 * power enabled state becomes available, or when additional 8359 * layers are added to the command chain, these values will 8360 * have to be re-evaluated for correctness. 8361 */ 8362 if (ISREMOVABLE(un)) { 8363 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8364 } else { 8365 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8366 } 8367 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8368 } 8369 8370 /* 8371 * This property is set to 0 by HA software to avoid retries 8372 * on a reserved disk. (The preferred property name is 8373 * "retry-on-reservation-conflict") (1189689) 8374 * 8375 * Note: The use of a global here can have unintended consequences. A 8376 * per instance variable is preferrable to match the capabilities of 8377 * different underlying hba's (4402600) 8378 */ 8379 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8380 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8381 sd_retry_on_reservation_conflict); 8382 if (sd_retry_on_reservation_conflict != 0) { 8383 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8384 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8385 sd_retry_on_reservation_conflict); 8386 } 8387 8388 /* Set up options for QFULL handling. */ 8389 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8390 "qfull-retries", -1)) != -1) { 8391 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8392 rval, 1); 8393 } 8394 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8395 "qfull-retry-interval", -1)) != -1) { 8396 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8397 rval, 1); 8398 } 8399 8400 /* 8401 * This just prints a message that announces the existence of the 8402 * device. The message is always printed in the system logfile, but 8403 * only appears on the console if the system is booted with the 8404 * -v (verbose) argument. 8405 */ 8406 ddi_report_dev(devi); 8407 8408 /* 8409 * The framework calls driver attach routines single-threaded 8410 * for a given instance. However we still acquire SD_MUTEX here 8411 * because this required for calling the sd_validate_geometry() 8412 * and sd_register_devid() functions. 8413 */ 8414 mutex_enter(SD_MUTEX(un)); 8415 un->un_f_geometry_is_valid = FALSE; 8416 un->un_mediastate = DKIO_NONE; 8417 un->un_reserved = -1; 8418 if (!ISREMOVABLE(un)) { 8419 /* 8420 * Read and validate the device's geometry (ie, disk label) 8421 * A new unformatted drive will not have a valid geometry, but 8422 * the driver needs to successfully attach to this device so 8423 * the drive can be formatted via ioctls. 8424 */ 8425 if (((sd_validate_geometry(un, SD_PATH_DIRECT) == 8426 ENOTSUP)) && 8427 (un->un_blockcount < DK_MAX_BLOCKS)) { 8428 /* 8429 * We found a small disk with an EFI label on it; 8430 * we need to fix up the minor nodes accordingly. 8431 */ 8432 ddi_remove_minor_node(devi, "h"); 8433 ddi_remove_minor_node(devi, "h,raw"); 8434 (void) ddi_create_minor_node(devi, "wd", 8435 S_IFBLK, 8436 (instance << SDUNIT_SHIFT) | WD_NODE, 8437 un->un_node_type, NULL); 8438 (void) ddi_create_minor_node(devi, "wd,raw", 8439 S_IFCHR, 8440 (instance << SDUNIT_SHIFT) | WD_NODE, 8441 un->un_node_type, NULL); 8442 } 8443 } 8444 8445 /* 8446 * Read and initialize the devid for the unit. 8447 */ 8448 ASSERT(un->un_errstats != NULL); 8449 if (!ISREMOVABLE(un)) { 8450 sd_register_devid(un, devi, reservation_flag); 8451 } 8452 mutex_exit(SD_MUTEX(un)); 8453 8454 #if (defined(__fibre)) 8455 /* 8456 * Register callbacks for fibre only. You can't do this soley 8457 * on the basis of the devid_type because this is hba specific. 8458 * We need to query our hba capabilities to find out whether to 8459 * register or not. 8460 */ 8461 if (un->un_f_is_fibre) { 8462 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8463 sd_init_event_callbacks(un); 8464 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8465 "sd_unit_attach: un:0x%p event callbacks inserted", un); 8466 } 8467 } 8468 #endif 8469 8470 if (un->un_f_opt_disable_cache == TRUE) { 8471 if (sd_disable_caching(un) != 0) { 8472 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8473 "sd_unit_attach: un:0x%p Could not disable " 8474 "caching", un); 8475 goto devid_failed; 8476 } 8477 } 8478 8479 /* 8480 * Set the pstat and error stat values here, so data obtained during the 8481 * previous attach-time routines is available. 8482 * 8483 * Note: This is a critical sequence that needs to be maintained: 8484 * 1) Instantiate the kstats before any routines using the iopath 8485 * (i.e. sd_send_scsi_cmd). 8486 * 2) Initialize the error stats (sd_set_errstats) and partition 8487 * stats (sd_set_pstats)here, following sd_validate_geometry(), 8488 * sd_register_devid(), and sd_disable_caching(). 8489 */ 8490 if (!ISREMOVABLE(un) && (un->un_f_pkstats_enabled == TRUE)) { 8491 sd_set_pstats(un); 8492 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8493 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8494 } 8495 8496 sd_set_errstats(un); 8497 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8498 "sd_unit_attach: un:0x%p errstats set\n", un); 8499 8500 /* 8501 * Find out what type of reservation this disk supports. 8502 */ 8503 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 8504 case 0: 8505 /* 8506 * SCSI-3 reservations are supported. 8507 */ 8508 un->un_reservation_type = SD_SCSI3_RESERVATION; 8509 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8510 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8511 break; 8512 case ENOTSUP: 8513 /* 8514 * The PERSISTENT RESERVE IN command would not be recognized by 8515 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8516 */ 8517 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8518 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8519 un->un_reservation_type = SD_SCSI2_RESERVATION; 8520 break; 8521 default: 8522 /* 8523 * default to SCSI-3 reservations 8524 */ 8525 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8526 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8527 un->un_reservation_type = SD_SCSI3_RESERVATION; 8528 break; 8529 } 8530 8531 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8532 "sd_unit_attach: un:0x%p exit success\n", un); 8533 8534 return (DDI_SUCCESS); 8535 8536 /* 8537 * An error occurred during the attach; clean up & return failure. 8538 */ 8539 8540 devid_failed: 8541 8542 setup_pm_failed: 8543 ddi_remove_minor_node(devi, NULL); 8544 8545 create_minor_nodes_failed: 8546 /* 8547 * Cleanup from the scsi_ifsetcap() calls (437868) 8548 */ 8549 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8550 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8551 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8552 8553 if (un->un_f_is_fibre == FALSE) { 8554 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8555 } 8556 8557 spinup_failed: 8558 8559 mutex_enter(SD_MUTEX(un)); 8560 8561 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8562 if (un->un_direct_priority_timeid != NULL) { 8563 timeout_id_t temp_id = un->un_direct_priority_timeid; 8564 un->un_direct_priority_timeid = NULL; 8565 mutex_exit(SD_MUTEX(un)); 8566 (void) untimeout(temp_id); 8567 mutex_enter(SD_MUTEX(un)); 8568 } 8569 8570 /* Cancel any pending start/stop timeouts */ 8571 if (un->un_startstop_timeid != NULL) { 8572 timeout_id_t temp_id = un->un_startstop_timeid; 8573 un->un_startstop_timeid = NULL; 8574 mutex_exit(SD_MUTEX(un)); 8575 (void) untimeout(temp_id); 8576 mutex_enter(SD_MUTEX(un)); 8577 } 8578 8579 mutex_exit(SD_MUTEX(un)); 8580 8581 /* There should not be any in-progress I/O so ASSERT this check */ 8582 ASSERT(un->un_ncmds_in_transport == 0); 8583 ASSERT(un->un_ncmds_in_driver == 0); 8584 8585 /* Do not free the softstate if the callback routine is active */ 8586 sd_sync_with_callback(un); 8587 8588 /* 8589 * Partition stats apparently are not used with removables. These would 8590 * not have been created during attach, so no need to clean them up... 8591 */ 8592 if (un->un_stats != NULL) { 8593 kstat_delete(un->un_stats); 8594 un->un_stats = NULL; 8595 } 8596 if (un->un_errstats != NULL) { 8597 kstat_delete(un->un_errstats); 8598 un->un_errstats = NULL; 8599 } 8600 8601 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8602 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8603 8604 ddi_prop_remove_all(devi); 8605 sema_destroy(&un->un_semoclose); 8606 cv_destroy(&un->un_state_cv); 8607 8608 getrbuf_failed: 8609 8610 sd_free_rqs(un); 8611 8612 alloc_rqs_failed: 8613 8614 devp->sd_private = NULL; 8615 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8616 8617 get_softstate_failed: 8618 /* 8619 * Note: the man pages are unclear as to whether or not doing a 8620 * ddi_soft_state_free(sd_state, instance) is the right way to 8621 * clean up after the ddi_soft_state_zalloc() if the subsequent 8622 * ddi_get_soft_state() fails. The implication seems to be 8623 * that the get_soft_state cannot fail if the zalloc succeeds. 8624 */ 8625 ddi_soft_state_free(sd_state, instance); 8626 8627 probe_failed: 8628 scsi_unprobe(devp); 8629 #ifdef SDDEBUG 8630 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 8631 (sd_level_mask & SD_LOGMASK_TRACE)) { 8632 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 8633 (void *)un); 8634 } 8635 #endif 8636 return (DDI_FAILURE); 8637 } 8638 8639 8640 /* 8641 * Function: sd_unit_detach 8642 * 8643 * Description: Performs DDI_DETACH processing for sddetach(). 8644 * 8645 * Return Code: DDI_SUCCESS 8646 * DDI_FAILURE 8647 * 8648 * Context: Kernel thread context 8649 */ 8650 8651 static int 8652 sd_unit_detach(dev_info_t *devi) 8653 { 8654 struct scsi_device *devp; 8655 struct sd_lun *un; 8656 int i; 8657 dev_t dev; 8658 #if !(defined(__i386) || defined(__amd64)) && !defined(__fibre) 8659 int reset_retval; 8660 #endif 8661 int instance = ddi_get_instance(devi); 8662 8663 mutex_enter(&sd_detach_mutex); 8664 8665 /* 8666 * Fail the detach for any of the following: 8667 * - Unable to get the sd_lun struct for the instance 8668 * - A layered driver has an outstanding open on the instance 8669 * - Another thread is already detaching this instance 8670 * - Another thread is currently performing an open 8671 */ 8672 devp = ddi_get_driver_private(devi); 8673 if ((devp == NULL) || 8674 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8675 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8676 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8677 mutex_exit(&sd_detach_mutex); 8678 return (DDI_FAILURE); 8679 } 8680 8681 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8682 8683 /* 8684 * Mark this instance as currently in a detach, to inhibit any 8685 * opens from a layered driver. 8686 */ 8687 un->un_detach_count++; 8688 mutex_exit(&sd_detach_mutex); 8689 8690 dev = sd_make_device(SD_DEVINFO(un)); 8691 8692 _NOTE(COMPETING_THREADS_NOW); 8693 8694 mutex_enter(SD_MUTEX(un)); 8695 8696 /* 8697 * Fail the detach if there are any outstanding layered 8698 * opens on this device. 8699 */ 8700 for (i = 0; i < NDKMAP; i++) { 8701 if (un->un_ocmap.lyropen[i] != 0) { 8702 goto err_notclosed; 8703 } 8704 } 8705 8706 /* 8707 * Verify there are NO outstanding commands issued to this device. 8708 * ie, un_ncmds_in_transport == 0. 8709 * It's possible to have outstanding commands through the physio 8710 * code path, even though everything's closed. 8711 */ 8712 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8713 (un->un_direct_priority_timeid != NULL) || 8714 (un->un_state == SD_STATE_RWAIT)) { 8715 mutex_exit(SD_MUTEX(un)); 8716 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8717 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8718 goto err_stillbusy; 8719 } 8720 8721 /* 8722 * If we have the device reserved, release the reservation. 8723 */ 8724 if ((un->un_resvd_status & SD_RESERVE) && 8725 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8726 mutex_exit(SD_MUTEX(un)); 8727 /* 8728 * Note: sd_reserve_release sends a command to the device 8729 * via the sd_ioctlcmd() path, and can sleep. 8730 */ 8731 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8732 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8733 "sd_dr_detach: Cannot release reservation \n"); 8734 } 8735 } else { 8736 mutex_exit(SD_MUTEX(un)); 8737 } 8738 8739 /* 8740 * Untimeout any reserve recover, throttle reset, restart unit 8741 * and delayed broadcast timeout threads. Protect the timeout pointer 8742 * from getting nulled by their callback functions. 8743 */ 8744 mutex_enter(SD_MUTEX(un)); 8745 if (un->un_resvd_timeid != NULL) { 8746 timeout_id_t temp_id = un->un_resvd_timeid; 8747 un->un_resvd_timeid = NULL; 8748 mutex_exit(SD_MUTEX(un)); 8749 (void) untimeout(temp_id); 8750 mutex_enter(SD_MUTEX(un)); 8751 } 8752 8753 if (un->un_reset_throttle_timeid != NULL) { 8754 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8755 un->un_reset_throttle_timeid = NULL; 8756 mutex_exit(SD_MUTEX(un)); 8757 (void) untimeout(temp_id); 8758 mutex_enter(SD_MUTEX(un)); 8759 } 8760 8761 if (un->un_startstop_timeid != NULL) { 8762 timeout_id_t temp_id = un->un_startstop_timeid; 8763 un->un_startstop_timeid = NULL; 8764 mutex_exit(SD_MUTEX(un)); 8765 (void) untimeout(temp_id); 8766 mutex_enter(SD_MUTEX(un)); 8767 } 8768 8769 if (un->un_dcvb_timeid != NULL) { 8770 timeout_id_t temp_id = un->un_dcvb_timeid; 8771 un->un_dcvb_timeid = NULL; 8772 mutex_exit(SD_MUTEX(un)); 8773 (void) untimeout(temp_id); 8774 } else { 8775 mutex_exit(SD_MUTEX(un)); 8776 } 8777 8778 /* Remove any pending reservation reclaim requests for this device */ 8779 sd_rmv_resv_reclaim_req(dev); 8780 8781 mutex_enter(SD_MUTEX(un)); 8782 8783 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8784 if (un->un_direct_priority_timeid != NULL) { 8785 timeout_id_t temp_id = un->un_direct_priority_timeid; 8786 un->un_direct_priority_timeid = NULL; 8787 mutex_exit(SD_MUTEX(un)); 8788 (void) untimeout(temp_id); 8789 mutex_enter(SD_MUTEX(un)); 8790 } 8791 8792 /* Cancel any active multi-host disk watch thread requests */ 8793 if (un->un_mhd_token != NULL) { 8794 mutex_exit(SD_MUTEX(un)); 8795 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8796 if (scsi_watch_request_terminate(un->un_mhd_token, 8797 SCSI_WATCH_TERMINATE_NOWAIT)) { 8798 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8799 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8800 /* 8801 * Note: We are returning here after having removed 8802 * some driver timeouts above. This is consistent with 8803 * the legacy implementation but perhaps the watch 8804 * terminate call should be made with the wait flag set. 8805 */ 8806 goto err_stillbusy; 8807 } 8808 mutex_enter(SD_MUTEX(un)); 8809 un->un_mhd_token = NULL; 8810 } 8811 8812 if (un->un_swr_token != NULL) { 8813 mutex_exit(SD_MUTEX(un)); 8814 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8815 if (scsi_watch_request_terminate(un->un_swr_token, 8816 SCSI_WATCH_TERMINATE_NOWAIT)) { 8817 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8818 "sd_dr_detach: Cannot cancel swr watch request\n"); 8819 /* 8820 * Note: We are returning here after having removed 8821 * some driver timeouts above. This is consistent with 8822 * the legacy implementation but perhaps the watch 8823 * terminate call should be made with the wait flag set. 8824 */ 8825 goto err_stillbusy; 8826 } 8827 mutex_enter(SD_MUTEX(un)); 8828 un->un_swr_token = NULL; 8829 } 8830 8831 mutex_exit(SD_MUTEX(un)); 8832 8833 /* 8834 * Clear any scsi_reset_notifies. We clear the reset notifies 8835 * if we have not registered one. 8836 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8837 */ 8838 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8839 sd_mhd_reset_notify_cb, (caddr_t)un); 8840 8841 8842 8843 #if defined(__i386) || defined(__amd64) 8844 /* 8845 * Gratuitous bus resets sometimes cause an otherwise 8846 * okay ATA/ATAPI bus to hang. This is due the lack of 8847 * a clear spec of how resets should be implemented by ATA 8848 * disk drives. 8849 */ 8850 #elif !defined(__fibre) /* "#else if" does NOT work! */ 8851 /* 8852 * Reset target/bus. 8853 * 8854 * Note: This is a legacy workaround for Elite III dual-port drives that 8855 * will not come online after an aborted detach and subsequent re-attach 8856 * It should be removed when the Elite III FW is fixed, or the drives 8857 * are no longer supported. 8858 */ 8859 if (un->un_f_cfg_is_atapi == FALSE) { 8860 reset_retval = 0; 8861 8862 /* If the device is in low power mode don't reset it */ 8863 8864 mutex_enter(&un->un_pm_mutex); 8865 if (!SD_DEVICE_IS_IN_LOW_POWER(un)) { 8866 /* 8867 * First try a LUN reset if we can, then move on to a 8868 * target reset if needed; swat the bus as a last 8869 * resort. 8870 */ 8871 mutex_exit(&un->un_pm_mutex); 8872 if (un->un_f_allow_bus_device_reset == TRUE) { 8873 if (un->un_f_lun_reset_enabled == TRUE) { 8874 reset_retval = 8875 scsi_reset(SD_ADDRESS(un), 8876 RESET_LUN); 8877 } 8878 if (reset_retval == 0) { 8879 reset_retval = 8880 scsi_reset(SD_ADDRESS(un), 8881 RESET_TARGET); 8882 } 8883 } 8884 if (reset_retval == 0) { 8885 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 8886 } 8887 } else { 8888 mutex_exit(&un->un_pm_mutex); 8889 } 8890 } 8891 #endif 8892 8893 /* 8894 * protect the timeout pointers from getting nulled by 8895 * their callback functions during the cancellation process. 8896 * In such a scenario untimeout can be invoked with a null value. 8897 */ 8898 _NOTE(NO_COMPETING_THREADS_NOW); 8899 8900 mutex_enter(&un->un_pm_mutex); 8901 if (un->un_pm_idle_timeid != NULL) { 8902 timeout_id_t temp_id = un->un_pm_idle_timeid; 8903 un->un_pm_idle_timeid = NULL; 8904 mutex_exit(&un->un_pm_mutex); 8905 8906 /* 8907 * Timeout is active; cancel it. 8908 * Note that it'll never be active on a device 8909 * that does not support PM therefore we don't 8910 * have to check before calling pm_idle_component. 8911 */ 8912 (void) untimeout(temp_id); 8913 (void) pm_idle_component(SD_DEVINFO(un), 0); 8914 mutex_enter(&un->un_pm_mutex); 8915 } 8916 8917 /* 8918 * Check whether there is already a timeout scheduled for power 8919 * management. If yes then don't lower the power here, that's. 8920 * the timeout handler's job. 8921 */ 8922 if (un->un_pm_timeid != NULL) { 8923 timeout_id_t temp_id = un->un_pm_timeid; 8924 un->un_pm_timeid = NULL; 8925 mutex_exit(&un->un_pm_mutex); 8926 /* 8927 * Timeout is active; cancel it. 8928 * Note that it'll never be active on a device 8929 * that does not support PM therefore we don't 8930 * have to check before calling pm_idle_component. 8931 */ 8932 (void) untimeout(temp_id); 8933 (void) pm_idle_component(SD_DEVINFO(un), 0); 8934 8935 } else { 8936 mutex_exit(&un->un_pm_mutex); 8937 if ((un->un_f_pm_is_enabled == TRUE) && 8938 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8939 DDI_SUCCESS)) { 8940 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8941 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8942 /* 8943 * Fix for bug: 4297749, item # 13 8944 * The above test now includes a check to see if PM is 8945 * supported by this device before call 8946 * pm_lower_power(). 8947 * Note, the following is not dead code. The call to 8948 * pm_lower_power above will generate a call back into 8949 * our sdpower routine which might result in a timeout 8950 * handler getting activated. Therefore the following 8951 * code is valid and necessary. 8952 */ 8953 mutex_enter(&un->un_pm_mutex); 8954 if (un->un_pm_timeid != NULL) { 8955 timeout_id_t temp_id = un->un_pm_timeid; 8956 un->un_pm_timeid = NULL; 8957 mutex_exit(&un->un_pm_mutex); 8958 (void) untimeout(temp_id); 8959 (void) pm_idle_component(SD_DEVINFO(un), 0); 8960 } else { 8961 mutex_exit(&un->un_pm_mutex); 8962 } 8963 } 8964 } 8965 8966 /* 8967 * Cleanup from the scsi_ifsetcap() calls (437868) 8968 * Relocated here from above to be after the call to 8969 * pm_lower_power, which was getting errors. 8970 */ 8971 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8972 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8973 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8974 8975 if (un->un_f_is_fibre == FALSE) { 8976 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8977 } 8978 8979 /* 8980 * Remove any event callbacks, fibre only 8981 */ 8982 if (un->un_f_is_fibre == TRUE) { 8983 if ((un->un_insert_event != NULL) && 8984 (ddi_remove_event_handler(un->un_insert_cb_id) != 8985 DDI_SUCCESS)) { 8986 /* 8987 * Note: We are returning here after having done 8988 * substantial cleanup above. This is consistent 8989 * with the legacy implementation but this may not 8990 * be the right thing to do. 8991 */ 8992 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8993 "sd_dr_detach: Cannot cancel insert event\n"); 8994 goto err_remove_event; 8995 } 8996 un->un_insert_event = NULL; 8997 8998 if ((un->un_remove_event != NULL) && 8999 (ddi_remove_event_handler(un->un_remove_cb_id) != 9000 DDI_SUCCESS)) { 9001 /* 9002 * Note: We are returning here after having done 9003 * substantial cleanup above. This is consistent 9004 * with the legacy implementation but this may not 9005 * be the right thing to do. 9006 */ 9007 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9008 "sd_dr_detach: Cannot cancel remove event\n"); 9009 goto err_remove_event; 9010 } 9011 un->un_remove_event = NULL; 9012 } 9013 9014 /* Do not free the softstate if the callback routine is active */ 9015 sd_sync_with_callback(un); 9016 9017 /* 9018 * Hold the detach mutex here, to make sure that no other threads ever 9019 * can access a (partially) freed soft state structure. 9020 */ 9021 mutex_enter(&sd_detach_mutex); 9022 9023 /* 9024 * Clean up the soft state struct. 9025 * Cleanup is done in reverse order of allocs/inits. 9026 * At this point there should be no competing threads anymore. 9027 */ 9028 9029 /* Unregister and free device id. */ 9030 ddi_devid_unregister(devi); 9031 if (un->un_devid) { 9032 ddi_devid_free(un->un_devid); 9033 un->un_devid = NULL; 9034 } 9035 9036 /* 9037 * Destroy wmap cache if it exists. 9038 */ 9039 if (un->un_wm_cache != NULL) { 9040 kmem_cache_destroy(un->un_wm_cache); 9041 un->un_wm_cache = NULL; 9042 } 9043 9044 /* Remove minor nodes */ 9045 ddi_remove_minor_node(devi, NULL); 9046 9047 /* 9048 * kstat cleanup is done in detach for all device types (4363169). 9049 * We do not want to fail detach if the device kstats are not deleted 9050 * since there is a confusion about the devo_refcnt for the device. 9051 * We just delete the kstats and let detach complete successfully. 9052 */ 9053 if (un->un_stats != NULL) { 9054 kstat_delete(un->un_stats); 9055 un->un_stats = NULL; 9056 } 9057 if (un->un_errstats != NULL) { 9058 kstat_delete(un->un_errstats); 9059 un->un_errstats = NULL; 9060 } 9061 9062 /* Remove partition stats (not created for removables) */ 9063 if (!ISREMOVABLE(un)) { 9064 for (i = 0; i < NSDMAP; i++) { 9065 if (un->un_pstats[i] != NULL) { 9066 kstat_delete(un->un_pstats[i]); 9067 un->un_pstats[i] = NULL; 9068 } 9069 } 9070 } 9071 9072 /* Remove xbuf registration */ 9073 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 9074 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 9075 9076 /* Remove driver properties */ 9077 ddi_prop_remove_all(devi); 9078 9079 mutex_destroy(&un->un_pm_mutex); 9080 cv_destroy(&un->un_pm_busy_cv); 9081 9082 /* Open/close semaphore */ 9083 sema_destroy(&un->un_semoclose); 9084 9085 /* Removable media condvar. */ 9086 cv_destroy(&un->un_state_cv); 9087 9088 /* Suspend/resume condvar. */ 9089 cv_destroy(&un->un_suspend_cv); 9090 cv_destroy(&un->un_disk_busy_cv); 9091 9092 sd_free_rqs(un); 9093 9094 /* Free up soft state */ 9095 devp->sd_private = NULL; 9096 bzero(un, sizeof (struct sd_lun)); 9097 ddi_soft_state_free(sd_state, instance); 9098 9099 mutex_exit(&sd_detach_mutex); 9100 9101 /* This frees up the INQUIRY data associated with the device. */ 9102 scsi_unprobe(devp); 9103 9104 return (DDI_SUCCESS); 9105 9106 err_notclosed: 9107 mutex_exit(SD_MUTEX(un)); 9108 9109 err_stillbusy: 9110 _NOTE(NO_COMPETING_THREADS_NOW); 9111 9112 err_remove_event: 9113 mutex_enter(&sd_detach_mutex); 9114 un->un_detach_count--; 9115 mutex_exit(&sd_detach_mutex); 9116 9117 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9118 return (DDI_FAILURE); 9119 } 9120 9121 9122 /* 9123 * Driver minor node structure and data table 9124 */ 9125 struct driver_minor_data { 9126 char *name; 9127 minor_t minor; 9128 int type; 9129 }; 9130 9131 static struct driver_minor_data sd_minor_data[] = { 9132 {"a", 0, S_IFBLK}, 9133 {"b", 1, S_IFBLK}, 9134 {"c", 2, S_IFBLK}, 9135 {"d", 3, S_IFBLK}, 9136 {"e", 4, S_IFBLK}, 9137 {"f", 5, S_IFBLK}, 9138 {"g", 6, S_IFBLK}, 9139 {"h", 7, S_IFBLK}, 9140 #if defined(_SUNOS_VTOC_16) 9141 {"i", 8, S_IFBLK}, 9142 {"j", 9, S_IFBLK}, 9143 {"k", 10, S_IFBLK}, 9144 {"l", 11, S_IFBLK}, 9145 {"m", 12, S_IFBLK}, 9146 {"n", 13, S_IFBLK}, 9147 {"o", 14, S_IFBLK}, 9148 {"p", 15, S_IFBLK}, 9149 #endif /* defined(_SUNOS_VTOC_16) */ 9150 #if defined(_FIRMWARE_NEEDS_FDISK) 9151 {"q", 16, S_IFBLK}, 9152 {"r", 17, S_IFBLK}, 9153 {"s", 18, S_IFBLK}, 9154 {"t", 19, S_IFBLK}, 9155 {"u", 20, S_IFBLK}, 9156 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9157 {"a,raw", 0, S_IFCHR}, 9158 {"b,raw", 1, S_IFCHR}, 9159 {"c,raw", 2, S_IFCHR}, 9160 {"d,raw", 3, S_IFCHR}, 9161 {"e,raw", 4, S_IFCHR}, 9162 {"f,raw", 5, S_IFCHR}, 9163 {"g,raw", 6, S_IFCHR}, 9164 {"h,raw", 7, S_IFCHR}, 9165 #if defined(_SUNOS_VTOC_16) 9166 {"i,raw", 8, S_IFCHR}, 9167 {"j,raw", 9, S_IFCHR}, 9168 {"k,raw", 10, S_IFCHR}, 9169 {"l,raw", 11, S_IFCHR}, 9170 {"m,raw", 12, S_IFCHR}, 9171 {"n,raw", 13, S_IFCHR}, 9172 {"o,raw", 14, S_IFCHR}, 9173 {"p,raw", 15, S_IFCHR}, 9174 #endif /* defined(_SUNOS_VTOC_16) */ 9175 #if defined(_FIRMWARE_NEEDS_FDISK) 9176 {"q,raw", 16, S_IFCHR}, 9177 {"r,raw", 17, S_IFCHR}, 9178 {"s,raw", 18, S_IFCHR}, 9179 {"t,raw", 19, S_IFCHR}, 9180 {"u,raw", 20, S_IFCHR}, 9181 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9182 {0} 9183 }; 9184 9185 static struct driver_minor_data sd_minor_data_efi[] = { 9186 {"a", 0, S_IFBLK}, 9187 {"b", 1, S_IFBLK}, 9188 {"c", 2, S_IFBLK}, 9189 {"d", 3, S_IFBLK}, 9190 {"e", 4, S_IFBLK}, 9191 {"f", 5, S_IFBLK}, 9192 {"g", 6, S_IFBLK}, 9193 {"wd", 7, S_IFBLK}, 9194 #if defined(_FIRMWARE_NEEDS_FDISK) 9195 {"q", 16, S_IFBLK}, 9196 {"r", 17, S_IFBLK}, 9197 {"s", 18, S_IFBLK}, 9198 {"t", 19, S_IFBLK}, 9199 {"u", 20, S_IFBLK}, 9200 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9201 {"a,raw", 0, S_IFCHR}, 9202 {"b,raw", 1, S_IFCHR}, 9203 {"c,raw", 2, S_IFCHR}, 9204 {"d,raw", 3, S_IFCHR}, 9205 {"e,raw", 4, S_IFCHR}, 9206 {"f,raw", 5, S_IFCHR}, 9207 {"g,raw", 6, S_IFCHR}, 9208 {"wd,raw", 7, S_IFCHR}, 9209 #if defined(_FIRMWARE_NEEDS_FDISK) 9210 {"q,raw", 16, S_IFCHR}, 9211 {"r,raw", 17, S_IFCHR}, 9212 {"s,raw", 18, S_IFCHR}, 9213 {"t,raw", 19, S_IFCHR}, 9214 {"u,raw", 20, S_IFCHR}, 9215 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9216 {0} 9217 }; 9218 9219 9220 /* 9221 * Function: sd_create_minor_nodes 9222 * 9223 * Description: Create the minor device nodes for the instance. 9224 * 9225 * Arguments: un - driver soft state (unit) structure 9226 * devi - pointer to device info structure 9227 * 9228 * Return Code: DDI_SUCCESS 9229 * DDI_FAILURE 9230 * 9231 * Context: Kernel thread context 9232 */ 9233 9234 static int 9235 sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi) 9236 { 9237 struct driver_minor_data *dmdp; 9238 struct scsi_device *devp; 9239 int instance; 9240 char name[48]; 9241 9242 ASSERT(un != NULL); 9243 devp = ddi_get_driver_private(devi); 9244 instance = ddi_get_instance(devp->sd_dev); 9245 9246 /* 9247 * Create all the minor nodes for this target. 9248 */ 9249 if (un->un_blockcount > DK_MAX_BLOCKS) 9250 dmdp = sd_minor_data_efi; 9251 else 9252 dmdp = sd_minor_data; 9253 while (dmdp->name != NULL) { 9254 9255 (void) sprintf(name, "%s", dmdp->name); 9256 9257 if (ddi_create_minor_node(devi, name, dmdp->type, 9258 (instance << SDUNIT_SHIFT) | dmdp->minor, 9259 un->un_node_type, NULL) == DDI_FAILURE) { 9260 /* 9261 * Clean up any nodes that may have been created, in 9262 * case this fails in the middle of the loop. 9263 */ 9264 ddi_remove_minor_node(devi, NULL); 9265 return (DDI_FAILURE); 9266 } 9267 dmdp++; 9268 } 9269 9270 return (DDI_SUCCESS); 9271 } 9272 9273 9274 /* 9275 * Function: sd_create_errstats 9276 * 9277 * Description: This routine instantiates the device error stats. 9278 * 9279 * Note: During attach the stats are instantiated first so they are 9280 * available for attach-time routines that utilize the driver 9281 * iopath to send commands to the device. The stats are initialized 9282 * separately so data obtained during some attach-time routines is 9283 * available. (4362483) 9284 * 9285 * Arguments: un - driver soft state (unit) structure 9286 * instance - driver instance 9287 * 9288 * Context: Kernel thread context 9289 */ 9290 9291 static void 9292 sd_create_errstats(struct sd_lun *un, int instance) 9293 { 9294 struct sd_errstats *stp; 9295 char kstatmodule_err[KSTAT_STRLEN]; 9296 char kstatname[KSTAT_STRLEN]; 9297 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9298 9299 ASSERT(un != NULL); 9300 9301 if (un->un_errstats != NULL) { 9302 return; 9303 } 9304 9305 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9306 "%serr", sd_label); 9307 (void) snprintf(kstatname, sizeof (kstatname), 9308 "%s%d,err", sd_label, instance); 9309 9310 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9311 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9312 9313 if (un->un_errstats == NULL) { 9314 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9315 "sd_create_errstats: Failed kstat_create\n"); 9316 return; 9317 } 9318 9319 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9320 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9321 KSTAT_DATA_UINT32); 9322 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9323 KSTAT_DATA_UINT32); 9324 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9325 KSTAT_DATA_UINT32); 9326 kstat_named_init(&stp->sd_vid, "Vendor", 9327 KSTAT_DATA_CHAR); 9328 kstat_named_init(&stp->sd_pid, "Product", 9329 KSTAT_DATA_CHAR); 9330 kstat_named_init(&stp->sd_revision, "Revision", 9331 KSTAT_DATA_CHAR); 9332 kstat_named_init(&stp->sd_serial, "Serial No", 9333 KSTAT_DATA_CHAR); 9334 kstat_named_init(&stp->sd_capacity, "Size", 9335 KSTAT_DATA_ULONGLONG); 9336 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9337 KSTAT_DATA_UINT32); 9338 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9339 KSTAT_DATA_UINT32); 9340 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9341 KSTAT_DATA_UINT32); 9342 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9343 KSTAT_DATA_UINT32); 9344 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9345 KSTAT_DATA_UINT32); 9346 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9347 KSTAT_DATA_UINT32); 9348 9349 un->un_errstats->ks_private = un; 9350 un->un_errstats->ks_update = nulldev; 9351 9352 kstat_install(un->un_errstats); 9353 } 9354 9355 9356 /* 9357 * Function: sd_set_errstats 9358 * 9359 * Description: This routine sets the value of the vendor id, product id, 9360 * revision, serial number, and capacity device error stats. 9361 * 9362 * Note: During attach the stats are instantiated first so they are 9363 * available for attach-time routines that utilize the driver 9364 * iopath to send commands to the device. The stats are initialized 9365 * separately so data obtained during some attach-time routines is 9366 * available. (4362483) 9367 * 9368 * Arguments: un - driver soft state (unit) structure 9369 * 9370 * Context: Kernel thread context 9371 */ 9372 9373 static void 9374 sd_set_errstats(struct sd_lun *un) 9375 { 9376 struct sd_errstats *stp; 9377 9378 ASSERT(un != NULL); 9379 ASSERT(un->un_errstats != NULL); 9380 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9381 ASSERT(stp != NULL); 9382 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9383 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9384 (void) strncpy(stp->sd_revision.value.c, 9385 un->un_sd->sd_inq->inq_revision, 4); 9386 9387 /* 9388 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9389 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9390 * (4376302)) 9391 */ 9392 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9393 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9394 sizeof (SD_INQUIRY(un)->inq_serial)); 9395 } 9396 9397 if (un->un_f_blockcount_is_valid != TRUE) { 9398 /* 9399 * Set capacity error stat to 0 for no media. This ensures 9400 * a valid capacity is displayed in response to 'iostat -E' 9401 * when no media is present in the device. 9402 */ 9403 stp->sd_capacity.value.ui64 = 0; 9404 } else { 9405 /* 9406 * Multiply un_blockcount by un->un_sys_blocksize to get 9407 * capacity. 9408 * 9409 * Note: for non-512 blocksize devices "un_blockcount" has been 9410 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9411 * (un_tgt_blocksize / un->un_sys_blocksize). 9412 */ 9413 stp->sd_capacity.value.ui64 = (uint64_t) 9414 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9415 } 9416 } 9417 9418 9419 /* 9420 * Function: sd_set_pstats 9421 * 9422 * Description: This routine instantiates and initializes the partition 9423 * stats for each partition with more than zero blocks. 9424 * (4363169) 9425 * 9426 * Arguments: un - driver soft state (unit) structure 9427 * 9428 * Context: Kernel thread context 9429 */ 9430 9431 static void 9432 sd_set_pstats(struct sd_lun *un) 9433 { 9434 char kstatname[KSTAT_STRLEN]; 9435 int instance; 9436 int i; 9437 9438 ASSERT(un != NULL); 9439 9440 instance = ddi_get_instance(SD_DEVINFO(un)); 9441 9442 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9443 for (i = 0; i < NSDMAP; i++) { 9444 if ((un->un_pstats[i] == NULL) && 9445 (un->un_map[i].dkl_nblk != 0)) { 9446 (void) snprintf(kstatname, sizeof (kstatname), 9447 "%s%d,%s", sd_label, instance, 9448 sd_minor_data[i].name); 9449 un->un_pstats[i] = kstat_create(sd_label, 9450 instance, kstatname, "partition", KSTAT_TYPE_IO, 9451 1, KSTAT_FLAG_PERSISTENT); 9452 if (un->un_pstats[i] != NULL) { 9453 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9454 kstat_install(un->un_pstats[i]); 9455 } 9456 } 9457 } 9458 } 9459 9460 9461 #if (defined(__fibre)) 9462 /* 9463 * Function: sd_init_event_callbacks 9464 * 9465 * Description: This routine initializes the insertion and removal event 9466 * callbacks. (fibre only) 9467 * 9468 * Arguments: un - driver soft state (unit) structure 9469 * 9470 * Context: Kernel thread context 9471 */ 9472 9473 static void 9474 sd_init_event_callbacks(struct sd_lun *un) 9475 { 9476 ASSERT(un != NULL); 9477 9478 if ((un->un_insert_event == NULL) && 9479 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9480 &un->un_insert_event) == DDI_SUCCESS)) { 9481 /* 9482 * Add the callback for an insertion event 9483 */ 9484 (void) ddi_add_event_handler(SD_DEVINFO(un), 9485 un->un_insert_event, sd_event_callback, (void *)un, 9486 &(un->un_insert_cb_id)); 9487 } 9488 9489 if ((un->un_remove_event == NULL) && 9490 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9491 &un->un_remove_event) == DDI_SUCCESS)) { 9492 /* 9493 * Add the callback for a removal event 9494 */ 9495 (void) ddi_add_event_handler(SD_DEVINFO(un), 9496 un->un_remove_event, sd_event_callback, (void *)un, 9497 &(un->un_remove_cb_id)); 9498 } 9499 } 9500 9501 9502 /* 9503 * Function: sd_event_callback 9504 * 9505 * Description: This routine handles insert/remove events (photon). The 9506 * state is changed to OFFLINE which can be used to supress 9507 * error msgs. (fibre only) 9508 * 9509 * Arguments: un - driver soft state (unit) structure 9510 * 9511 * Context: Callout thread context 9512 */ 9513 /* ARGSUSED */ 9514 static void 9515 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9516 void *bus_impldata) 9517 { 9518 struct sd_lun *un = (struct sd_lun *)arg; 9519 9520 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9521 if (event == un->un_insert_event) { 9522 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9523 mutex_enter(SD_MUTEX(un)); 9524 if (un->un_state == SD_STATE_OFFLINE) { 9525 if (un->un_last_state != SD_STATE_SUSPENDED) { 9526 un->un_state = un->un_last_state; 9527 } else { 9528 /* 9529 * We have gone through SUSPEND/RESUME while 9530 * we were offline. Restore the last state 9531 */ 9532 un->un_state = un->un_save_state; 9533 } 9534 } 9535 mutex_exit(SD_MUTEX(un)); 9536 9537 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9538 } else if (event == un->un_remove_event) { 9539 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9540 mutex_enter(SD_MUTEX(un)); 9541 /* 9542 * We need to handle an event callback that occurs during 9543 * the suspend operation, since we don't prevent it. 9544 */ 9545 if (un->un_state != SD_STATE_OFFLINE) { 9546 if (un->un_state != SD_STATE_SUSPENDED) { 9547 New_state(un, SD_STATE_OFFLINE); 9548 } else { 9549 un->un_last_state = SD_STATE_OFFLINE; 9550 } 9551 } 9552 mutex_exit(SD_MUTEX(un)); 9553 } else { 9554 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9555 "!Unknown event\n"); 9556 } 9557 9558 } 9559 #endif 9560 9561 9562 /* 9563 * Function: sd_disable_caching() 9564 * 9565 * Description: This routine is the driver entry point for disabling 9566 * read and write caching by modifying the WCE (write cache 9567 * enable) and RCD (read cache disable) bits of mode 9568 * page 8 (MODEPAGE_CACHING). 9569 * 9570 * Arguments: un - driver soft state (unit) structure 9571 * 9572 * Return Code: EIO 9573 * code returned by sd_send_scsi_MODE_SENSE and 9574 * sd_send_scsi_MODE_SELECT 9575 * 9576 * Context: Kernel Thread 9577 */ 9578 9579 static int 9580 sd_disable_caching(struct sd_lun *un) 9581 { 9582 struct mode_caching *mode_caching_page; 9583 uchar_t *header; 9584 size_t buflen; 9585 int hdrlen; 9586 int bd_len; 9587 int rval = 0; 9588 9589 ASSERT(un != NULL); 9590 9591 /* 9592 * Do a test unit ready, otherwise a mode sense may not work if this 9593 * is the first command sent to the device after boot. 9594 */ 9595 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9596 9597 if (un->un_f_cfg_is_atapi == TRUE) { 9598 hdrlen = MODE_HEADER_LENGTH_GRP2; 9599 } else { 9600 hdrlen = MODE_HEADER_LENGTH; 9601 } 9602 9603 /* 9604 * Allocate memory for the retrieved mode page and its headers. Set 9605 * a pointer to the page itself. 9606 */ 9607 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9608 header = kmem_zalloc(buflen, KM_SLEEP); 9609 9610 /* Get the information from the device. */ 9611 if (un->un_f_cfg_is_atapi == TRUE) { 9612 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9613 MODEPAGE_CACHING, SD_PATH_DIRECT); 9614 } else { 9615 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9616 MODEPAGE_CACHING, SD_PATH_DIRECT); 9617 } 9618 if (rval != 0) { 9619 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9620 "sd_disable_caching: Mode Sense Failed\n"); 9621 kmem_free(header, buflen); 9622 return (rval); 9623 } 9624 9625 /* 9626 * Determine size of Block Descriptors in order to locate 9627 * the mode page data. ATAPI devices return 0, SCSI devices 9628 * should return MODE_BLK_DESC_LENGTH. 9629 */ 9630 if (un->un_f_cfg_is_atapi == TRUE) { 9631 struct mode_header_grp2 *mhp; 9632 mhp = (struct mode_header_grp2 *)header; 9633 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9634 } else { 9635 bd_len = ((struct mode_header *)header)->bdesc_length; 9636 } 9637 9638 if (bd_len > MODE_BLK_DESC_LENGTH) { 9639 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9640 "sd_disable_caching: Mode Sense returned invalid " 9641 "block descriptor length\n"); 9642 kmem_free(header, buflen); 9643 return (EIO); 9644 } 9645 9646 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9647 9648 /* Check the relevant bits on successful mode sense. */ 9649 if ((mode_caching_page->wce) || !(mode_caching_page->rcd)) { 9650 /* 9651 * Read or write caching is enabled. Disable both of them. 9652 */ 9653 mode_caching_page->wce = 0; 9654 mode_caching_page->rcd = 1; 9655 9656 /* Clear reserved bits before mode select. */ 9657 mode_caching_page->mode_page.ps = 0; 9658 9659 /* 9660 * Clear out mode header for mode select. 9661 * The rest of the retrieved page will be reused. 9662 */ 9663 bzero(header, hdrlen); 9664 9665 /* Change the cache page to disable all caching. */ 9666 if (un->un_f_cfg_is_atapi == TRUE) { 9667 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 9668 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9669 } else { 9670 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 9671 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9672 } 9673 } 9674 9675 kmem_free(header, buflen); 9676 return (rval); 9677 } 9678 9679 9680 /* 9681 * Function: sd_make_device 9682 * 9683 * Description: Utility routine to return the Solaris device number from 9684 * the data in the device's dev_info structure. 9685 * 9686 * Return Code: The Solaris device number 9687 * 9688 * Context: Any 9689 */ 9690 9691 static dev_t 9692 sd_make_device(dev_info_t *devi) 9693 { 9694 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9695 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9696 } 9697 9698 9699 /* 9700 * Function: sd_pm_entry 9701 * 9702 * Description: Called at the start of a new command to manage power 9703 * and busy status of a device. This includes determining whether 9704 * the current power state of the device is sufficient for 9705 * performing the command or whether it must be changed. 9706 * The PM framework is notified appropriately. 9707 * Only with a return status of DDI_SUCCESS will the 9708 * component be busy to the framework. 9709 * 9710 * All callers of sd_pm_entry must check the return status 9711 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9712 * of DDI_FAILURE indicates the device failed to power up. 9713 * In this case un_pm_count has been adjusted so the result 9714 * on exit is still powered down, ie. count is less than 0. 9715 * Calling sd_pm_exit with this count value hits an ASSERT. 9716 * 9717 * Return Code: DDI_SUCCESS or DDI_FAILURE 9718 * 9719 * Context: Kernel thread context. 9720 */ 9721 9722 static int 9723 sd_pm_entry(struct sd_lun *un) 9724 { 9725 int return_status = DDI_SUCCESS; 9726 9727 ASSERT(!mutex_owned(SD_MUTEX(un))); 9728 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9729 9730 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9731 9732 if (un->un_f_pm_is_enabled == FALSE) { 9733 SD_TRACE(SD_LOG_IO_PM, un, 9734 "sd_pm_entry: exiting, PM not enabled\n"); 9735 return (return_status); 9736 } 9737 9738 /* 9739 * Just increment a counter if PM is enabled. On the transition from 9740 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9741 * the count with each IO and mark the device as idle when the count 9742 * hits 0. 9743 * 9744 * If the count is less than 0 the device is powered down. If a powered 9745 * down device is successfully powered up then the count must be 9746 * incremented to reflect the power up. Note that it'll get incremented 9747 * a second time to become busy. 9748 * 9749 * Because the following has the potential to change the device state 9750 * and must release the un_pm_mutex to do so, only one thread can be 9751 * allowed through at a time. 9752 */ 9753 9754 mutex_enter(&un->un_pm_mutex); 9755 while (un->un_pm_busy == TRUE) { 9756 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9757 } 9758 un->un_pm_busy = TRUE; 9759 9760 if (un->un_pm_count < 1) { 9761 9762 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9763 9764 /* 9765 * Indicate we are now busy so the framework won't attempt to 9766 * power down the device. This call will only fail if either 9767 * we passed a bad component number or the device has no 9768 * components. Neither of these should ever happen. 9769 */ 9770 mutex_exit(&un->un_pm_mutex); 9771 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9772 ASSERT(return_status == DDI_SUCCESS); 9773 9774 mutex_enter(&un->un_pm_mutex); 9775 9776 if (un->un_pm_count < 0) { 9777 mutex_exit(&un->un_pm_mutex); 9778 9779 SD_TRACE(SD_LOG_IO_PM, un, 9780 "sd_pm_entry: power up component\n"); 9781 9782 /* 9783 * pm_raise_power will cause sdpower to be called 9784 * which brings the device power level to the 9785 * desired state, ON in this case. If successful, 9786 * un_pm_count and un_power_level will be updated 9787 * appropriately. 9788 */ 9789 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9790 SD_SPINDLE_ON); 9791 9792 mutex_enter(&un->un_pm_mutex); 9793 9794 if (return_status != DDI_SUCCESS) { 9795 /* 9796 * Power up failed. 9797 * Idle the device and adjust the count 9798 * so the result on exit is that we're 9799 * still powered down, ie. count is less than 0. 9800 */ 9801 SD_TRACE(SD_LOG_IO_PM, un, 9802 "sd_pm_entry: power up failed," 9803 " idle the component\n"); 9804 9805 (void) pm_idle_component(SD_DEVINFO(un), 0); 9806 un->un_pm_count--; 9807 } else { 9808 /* 9809 * Device is powered up, verify the 9810 * count is non-negative. 9811 * This is debug only. 9812 */ 9813 ASSERT(un->un_pm_count == 0); 9814 } 9815 } 9816 9817 if (return_status == DDI_SUCCESS) { 9818 /* 9819 * For performance, now that the device has been tagged 9820 * as busy, and it's known to be powered up, update the 9821 * chain types to use jump tables that do not include 9822 * pm. This significantly lowers the overhead and 9823 * therefore improves performance. 9824 */ 9825 9826 mutex_exit(&un->un_pm_mutex); 9827 mutex_enter(SD_MUTEX(un)); 9828 SD_TRACE(SD_LOG_IO_PM, un, 9829 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9830 un->un_uscsi_chain_type); 9831 9832 if (ISREMOVABLE(un)) { 9833 un->un_buf_chain_type = 9834 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9835 } else { 9836 un->un_buf_chain_type = 9837 SD_CHAIN_INFO_DISK_NO_PM; 9838 } 9839 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9840 9841 SD_TRACE(SD_LOG_IO_PM, un, 9842 " changed uscsi_chain_type to %d\n", 9843 un->un_uscsi_chain_type); 9844 mutex_exit(SD_MUTEX(un)); 9845 mutex_enter(&un->un_pm_mutex); 9846 9847 if (un->un_pm_idle_timeid == NULL) { 9848 /* 300 ms. */ 9849 un->un_pm_idle_timeid = 9850 timeout(sd_pm_idletimeout_handler, un, 9851 (drv_usectohz((clock_t)300000))); 9852 /* 9853 * Include an extra call to busy which keeps the 9854 * device busy with-respect-to the PM layer 9855 * until the timer fires, at which time it'll 9856 * get the extra idle call. 9857 */ 9858 (void) pm_busy_component(SD_DEVINFO(un), 0); 9859 } 9860 } 9861 } 9862 un->un_pm_busy = FALSE; 9863 /* Next... */ 9864 cv_signal(&un->un_pm_busy_cv); 9865 9866 un->un_pm_count++; 9867 9868 SD_TRACE(SD_LOG_IO_PM, un, 9869 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9870 9871 mutex_exit(&un->un_pm_mutex); 9872 9873 return (return_status); 9874 } 9875 9876 9877 /* 9878 * Function: sd_pm_exit 9879 * 9880 * Description: Called at the completion of a command to manage busy 9881 * status for the device. If the device becomes idle the 9882 * PM framework is notified. 9883 * 9884 * Context: Kernel thread context 9885 */ 9886 9887 static void 9888 sd_pm_exit(struct sd_lun *un) 9889 { 9890 ASSERT(!mutex_owned(SD_MUTEX(un))); 9891 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9892 9893 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9894 9895 /* 9896 * After attach the following flag is only read, so don't 9897 * take the penalty of acquiring a mutex for it. 9898 */ 9899 if (un->un_f_pm_is_enabled == TRUE) { 9900 9901 mutex_enter(&un->un_pm_mutex); 9902 un->un_pm_count--; 9903 9904 SD_TRACE(SD_LOG_IO_PM, un, 9905 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9906 9907 ASSERT(un->un_pm_count >= 0); 9908 if (un->un_pm_count == 0) { 9909 mutex_exit(&un->un_pm_mutex); 9910 9911 SD_TRACE(SD_LOG_IO_PM, un, 9912 "sd_pm_exit: idle component\n"); 9913 9914 (void) pm_idle_component(SD_DEVINFO(un), 0); 9915 9916 } else { 9917 mutex_exit(&un->un_pm_mutex); 9918 } 9919 } 9920 9921 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9922 } 9923 9924 9925 /* 9926 * Function: sdopen 9927 * 9928 * Description: Driver's open(9e) entry point function. 9929 * 9930 * Arguments: dev_i - pointer to device number 9931 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9932 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9933 * cred_p - user credential pointer 9934 * 9935 * Return Code: EINVAL 9936 * ENXIO 9937 * EIO 9938 * EROFS 9939 * EBUSY 9940 * 9941 * Context: Kernel thread context 9942 */ 9943 /* ARGSUSED */ 9944 static int 9945 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9946 { 9947 struct sd_lun *un; 9948 int nodelay; 9949 int part; 9950 uint64_t partmask; 9951 int instance; 9952 dev_t dev; 9953 int rval = EIO; 9954 9955 /* Validate the open type */ 9956 if (otyp >= OTYPCNT) { 9957 return (EINVAL); 9958 } 9959 9960 dev = *dev_p; 9961 instance = SDUNIT(dev); 9962 mutex_enter(&sd_detach_mutex); 9963 9964 /* 9965 * Fail the open if there is no softstate for the instance, or 9966 * if another thread somewhere is trying to detach the instance. 9967 */ 9968 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9969 (un->un_detach_count != 0)) { 9970 mutex_exit(&sd_detach_mutex); 9971 /* 9972 * The probe cache only needs to be cleared when open (9e) fails 9973 * with ENXIO (4238046). 9974 */ 9975 /* 9976 * un-conditionally clearing probe cache is ok with 9977 * separate sd/ssd binaries 9978 * x86 platform can be an issue with both parallel 9979 * and fibre in 1 binary 9980 */ 9981 sd_scsi_clear_probe_cache(); 9982 return (ENXIO); 9983 } 9984 9985 /* 9986 * The un_layer_count is to prevent another thread in specfs from 9987 * trying to detach the instance, which can happen when we are 9988 * called from a higher-layer driver instead of thru specfs. 9989 * This will not be needed when DDI provides a layered driver 9990 * interface that allows specfs to know that an instance is in 9991 * use by a layered driver & should not be detached. 9992 * 9993 * Note: the semantics for layered driver opens are exactly one 9994 * close for every open. 9995 */ 9996 if (otyp == OTYP_LYR) { 9997 un->un_layer_count++; 9998 } 9999 10000 /* 10001 * Keep a count of the current # of opens in progress. This is because 10002 * some layered drivers try to call us as a regular open. This can 10003 * cause problems that we cannot prevent, however by keeping this count 10004 * we can at least keep our open and detach routines from racing against 10005 * each other under such conditions. 10006 */ 10007 un->un_opens_in_progress++; 10008 mutex_exit(&sd_detach_mutex); 10009 10010 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10011 part = SDPART(dev); 10012 partmask = 1 << part; 10013 10014 /* 10015 * We use a semaphore here in order to serialize 10016 * open and close requests on the device. 10017 */ 10018 sema_p(&un->un_semoclose); 10019 10020 mutex_enter(SD_MUTEX(un)); 10021 10022 /* 10023 * All device accesses go thru sdstrategy() where we check 10024 * on suspend status but there could be a scsi_poll command, 10025 * which bypasses sdstrategy(), so we need to check pm 10026 * status. 10027 */ 10028 10029 if (!nodelay) { 10030 while ((un->un_state == SD_STATE_SUSPENDED) || 10031 (un->un_state == SD_STATE_PM_CHANGING)) { 10032 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10033 } 10034 10035 mutex_exit(SD_MUTEX(un)); 10036 if (sd_pm_entry(un) != DDI_SUCCESS) { 10037 rval = EIO; 10038 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10039 "sdopen: sd_pm_entry failed\n"); 10040 goto open_failed_with_pm; 10041 } 10042 mutex_enter(SD_MUTEX(un)); 10043 } 10044 10045 /* check for previous exclusive open */ 10046 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10047 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10048 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10049 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10050 10051 if (un->un_exclopen & (partmask)) { 10052 goto excl_open_fail; 10053 } 10054 10055 if (flag & FEXCL) { 10056 int i; 10057 if (un->un_ocmap.lyropen[part]) { 10058 goto excl_open_fail; 10059 } 10060 for (i = 0; i < (OTYPCNT - 1); i++) { 10061 if (un->un_ocmap.regopen[i] & (partmask)) { 10062 goto excl_open_fail; 10063 } 10064 } 10065 } 10066 10067 /* 10068 * Check the write permission if this is a removable media device, 10069 * NDELAY has not been set, and writable permission is requested. 10070 * 10071 * Note: If NDELAY was set and this is write-protected media the WRITE 10072 * attempt will fail with EIO as part of the I/O processing. This is a 10073 * more permissive implementation that allows the open to succeed and 10074 * WRITE attempts to fail when appropriate. 10075 */ 10076 if (ISREMOVABLE(un)) { 10077 if ((flag & FWRITE) && (!nodelay)) { 10078 mutex_exit(SD_MUTEX(un)); 10079 /* 10080 * Defer the check for write permission on writable 10081 * DVD drive till sdstrategy and will not fail open even 10082 * if FWRITE is set as the device can be writable 10083 * depending upon the media and the media can change 10084 * after the call to open(). 10085 */ 10086 if (un->un_f_dvdram_writable_device == FALSE) { 10087 if (ISCD(un) || sr_check_wp(dev)) { 10088 rval = EROFS; 10089 mutex_enter(SD_MUTEX(un)); 10090 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10091 "write to cd or write protected media\n"); 10092 goto open_fail; 10093 } 10094 } 10095 mutex_enter(SD_MUTEX(un)); 10096 } 10097 } 10098 10099 /* 10100 * If opening in NDELAY/NONBLOCK mode, just return. 10101 * Check if disk is ready and has a valid geometry later. 10102 */ 10103 if (!nodelay) { 10104 mutex_exit(SD_MUTEX(un)); 10105 rval = sd_ready_and_valid(un); 10106 mutex_enter(SD_MUTEX(un)); 10107 /* 10108 * Fail if device is not ready or if the number of disk 10109 * blocks is zero or negative for non CD devices. 10110 */ 10111 if ((rval != SD_READY_VALID) || 10112 (!ISCD(un) && un->un_map[part].dkl_nblk <= 0)) { 10113 if (ISREMOVABLE(un)) { 10114 rval = ENXIO; 10115 } else { 10116 rval = EIO; 10117 } 10118 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10119 "device not ready or invalid disk block value\n"); 10120 goto open_fail; 10121 } 10122 #if defined(__i386) || defined(__amd64) 10123 } else { 10124 uchar_t *cp; 10125 /* 10126 * x86 requires special nodelay handling, so that p0 is 10127 * always defined and accessible. 10128 * Invalidate geometry only if device is not already open. 10129 */ 10130 cp = &un->un_ocmap.chkd[0]; 10131 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10132 if (*cp != (uchar_t)0) { 10133 break; 10134 } 10135 cp++; 10136 } 10137 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10138 un->un_f_geometry_is_valid = FALSE; 10139 } 10140 10141 #endif 10142 } 10143 10144 if (otyp == OTYP_LYR) { 10145 un->un_ocmap.lyropen[part]++; 10146 } else { 10147 un->un_ocmap.regopen[otyp] |= partmask; 10148 } 10149 10150 /* Set up open and exclusive open flags */ 10151 if (flag & FEXCL) { 10152 un->un_exclopen |= (partmask); 10153 } 10154 10155 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10156 "open of part %d type %d\n", part, otyp); 10157 10158 mutex_exit(SD_MUTEX(un)); 10159 if (!nodelay) { 10160 sd_pm_exit(un); 10161 } 10162 10163 sema_v(&un->un_semoclose); 10164 10165 mutex_enter(&sd_detach_mutex); 10166 un->un_opens_in_progress--; 10167 mutex_exit(&sd_detach_mutex); 10168 10169 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10170 return (DDI_SUCCESS); 10171 10172 excl_open_fail: 10173 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10174 rval = EBUSY; 10175 10176 open_fail: 10177 mutex_exit(SD_MUTEX(un)); 10178 10179 /* 10180 * On a failed open we must exit the pm management. 10181 */ 10182 if (!nodelay) { 10183 sd_pm_exit(un); 10184 } 10185 open_failed_with_pm: 10186 sema_v(&un->un_semoclose); 10187 10188 mutex_enter(&sd_detach_mutex); 10189 un->un_opens_in_progress--; 10190 if (otyp == OTYP_LYR) { 10191 un->un_layer_count--; 10192 } 10193 mutex_exit(&sd_detach_mutex); 10194 10195 return (rval); 10196 } 10197 10198 10199 /* 10200 * Function: sdclose 10201 * 10202 * Description: Driver's close(9e) entry point function. 10203 * 10204 * Arguments: dev - device number 10205 * flag - file status flag, informational only 10206 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10207 * cred_p - user credential pointer 10208 * 10209 * Return Code: ENXIO 10210 * 10211 * Context: Kernel thread context 10212 */ 10213 /* ARGSUSED */ 10214 static int 10215 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10216 { 10217 struct sd_lun *un; 10218 uchar_t *cp; 10219 int part; 10220 int nodelay; 10221 int rval = 0; 10222 10223 /* Validate the open type */ 10224 if (otyp >= OTYPCNT) { 10225 return (ENXIO); 10226 } 10227 10228 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10229 return (ENXIO); 10230 } 10231 10232 part = SDPART(dev); 10233 nodelay = flag & (FNDELAY | FNONBLOCK); 10234 10235 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10236 "sdclose: close of part %d type %d\n", part, otyp); 10237 10238 /* 10239 * We use a semaphore here in order to serialize 10240 * open and close requests on the device. 10241 */ 10242 sema_p(&un->un_semoclose); 10243 10244 mutex_enter(SD_MUTEX(un)); 10245 10246 /* Don't proceed if power is being changed. */ 10247 while (un->un_state == SD_STATE_PM_CHANGING) { 10248 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10249 } 10250 10251 if (un->un_exclopen & (1 << part)) { 10252 un->un_exclopen &= ~(1 << part); 10253 } 10254 10255 /* Update the open partition map */ 10256 if (otyp == OTYP_LYR) { 10257 un->un_ocmap.lyropen[part] -= 1; 10258 } else { 10259 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10260 } 10261 10262 cp = &un->un_ocmap.chkd[0]; 10263 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10264 if (*cp != NULL) { 10265 break; 10266 } 10267 cp++; 10268 } 10269 10270 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10271 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10272 10273 /* 10274 * We avoid persistance upon the last close, and set 10275 * the throttle back to the maximum. 10276 */ 10277 un->un_throttle = un->un_saved_throttle; 10278 10279 if (un->un_state == SD_STATE_OFFLINE) { 10280 if (un->un_f_is_fibre == FALSE) { 10281 scsi_log(SD_DEVINFO(un), sd_label, 10282 CE_WARN, "offline\n"); 10283 } 10284 un->un_f_geometry_is_valid = FALSE; 10285 10286 } else { 10287 /* 10288 * Flush any outstanding writes in NVRAM cache. 10289 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10290 * cmd, it may not work for non-Pluto devices. 10291 * SYNCHRONIZE CACHE is not required for removables, 10292 * except DVD-RAM drives. 10293 * 10294 * Also note: because SYNCHRONIZE CACHE is currently 10295 * the only command issued here that requires the 10296 * drive be powered up, only do the power up before 10297 * sending the Sync Cache command. If additional 10298 * commands are added which require a powered up 10299 * drive, the following sequence may have to change. 10300 * 10301 * And finally, note that parallel SCSI on SPARC 10302 * only issues a Sync Cache to DVD-RAM, a newly 10303 * supported device. 10304 */ 10305 #if defined(__i386) || defined(__amd64) 10306 if (!ISREMOVABLE(un) || 10307 un->un_f_dvdram_writable_device == TRUE) { 10308 #else 10309 if (un->un_f_dvdram_writable_device == TRUE) { 10310 #endif 10311 mutex_exit(SD_MUTEX(un)); 10312 if (sd_pm_entry(un) == DDI_SUCCESS) { 10313 if (sd_send_scsi_SYNCHRONIZE_CACHE(un) 10314 != 0) { 10315 rval = EIO; 10316 } 10317 sd_pm_exit(un); 10318 } else { 10319 rval = EIO; 10320 } 10321 mutex_enter(SD_MUTEX(un)); 10322 } 10323 10324 /* 10325 * For removable media devices, send an ALLOW MEDIA 10326 * REMOVAL command, but don't get upset if it fails. 10327 * Also invalidate the geometry. We need to raise 10328 * the power of the drive before we can call 10329 * sd_send_scsi_DOORLOCK() 10330 */ 10331 if (ISREMOVABLE(un)) { 10332 mutex_exit(SD_MUTEX(un)); 10333 if (sd_pm_entry(un) == DDI_SUCCESS) { 10334 rval = sd_send_scsi_DOORLOCK(un, 10335 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10336 10337 sd_pm_exit(un); 10338 if (ISCD(un) && (rval != 0) && 10339 (nodelay != 0)) { 10340 rval = ENXIO; 10341 } 10342 } else { 10343 rval = EIO; 10344 } 10345 mutex_enter(SD_MUTEX(un)); 10346 10347 sr_ejected(un); 10348 /* 10349 * Destroy the cache (if it exists) which was 10350 * allocated for the write maps since this is 10351 * the last close for this media. 10352 */ 10353 if (un->un_wm_cache) { 10354 /* 10355 * Check if there are pending commands. 10356 * and if there are give a warning and 10357 * do not destroy the cache. 10358 */ 10359 if (un->un_ncmds_in_driver > 0) { 10360 scsi_log(SD_DEVINFO(un), 10361 sd_label, CE_WARN, 10362 "Unable to clean up memory " 10363 "because of pending I/O\n"); 10364 } else { 10365 kmem_cache_destroy( 10366 un->un_wm_cache); 10367 un->un_wm_cache = NULL; 10368 } 10369 } 10370 } 10371 } 10372 } 10373 10374 mutex_exit(SD_MUTEX(un)); 10375 sema_v(&un->un_semoclose); 10376 10377 if (otyp == OTYP_LYR) { 10378 mutex_enter(&sd_detach_mutex); 10379 /* 10380 * The detach routine may run when the layer count 10381 * drops to zero. 10382 */ 10383 un->un_layer_count--; 10384 mutex_exit(&sd_detach_mutex); 10385 } 10386 10387 return (rval); 10388 } 10389 10390 10391 /* 10392 * Function: sd_ready_and_valid 10393 * 10394 * Description: Test if device is ready and has a valid geometry. 10395 * 10396 * Arguments: dev - device number 10397 * un - driver soft state (unit) structure 10398 * 10399 * Return Code: SD_READY_VALID ready and valid label 10400 * SD_READY_NOT_VALID ready, geom ops never applicable 10401 * SD_NOT_READY_VALID not ready, no label 10402 * 10403 * Context: Never called at interrupt context. 10404 */ 10405 10406 static int 10407 sd_ready_and_valid(struct sd_lun *un) 10408 { 10409 struct sd_errstats *stp; 10410 uint64_t capacity; 10411 uint_t lbasize; 10412 int rval = SD_READY_VALID; 10413 char name_str[48]; 10414 10415 ASSERT(un != NULL); 10416 ASSERT(!mutex_owned(SD_MUTEX(un))); 10417 10418 mutex_enter(SD_MUTEX(un)); 10419 if (ISREMOVABLE(un)) { 10420 mutex_exit(SD_MUTEX(un)); 10421 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 10422 rval = SD_NOT_READY_VALID; 10423 mutex_enter(SD_MUTEX(un)); 10424 goto done; 10425 } 10426 10427 mutex_enter(SD_MUTEX(un)); 10428 if ((un->un_f_geometry_is_valid == FALSE) || 10429 (un->un_f_blockcount_is_valid == FALSE) || 10430 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10431 10432 /* capacity has to be read every open. */ 10433 mutex_exit(SD_MUTEX(un)); 10434 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 10435 &lbasize, SD_PATH_DIRECT) != 0) { 10436 mutex_enter(SD_MUTEX(un)); 10437 un->un_f_geometry_is_valid = FALSE; 10438 rval = SD_NOT_READY_VALID; 10439 goto done; 10440 } else { 10441 mutex_enter(SD_MUTEX(un)); 10442 sd_update_block_info(un, lbasize, capacity); 10443 } 10444 } 10445 10446 /* 10447 * If this is a non 512 block device, allocate space for 10448 * the wmap cache. This is being done here since every time 10449 * a media is changed this routine will be called and the 10450 * block size is a function of media rather than device. 10451 */ 10452 if (NOT_DEVBSIZE(un)) { 10453 if (!(un->un_wm_cache)) { 10454 (void) snprintf(name_str, sizeof (name_str), 10455 "%s%d_cache", 10456 ddi_driver_name(SD_DEVINFO(un)), 10457 ddi_get_instance(SD_DEVINFO(un))); 10458 un->un_wm_cache = kmem_cache_create( 10459 name_str, sizeof (struct sd_w_map), 10460 8, sd_wm_cache_constructor, 10461 sd_wm_cache_destructor, NULL, 10462 (void *)un, NULL, 0); 10463 if (!(un->un_wm_cache)) { 10464 rval = ENOMEM; 10465 goto done; 10466 } 10467 } 10468 } 10469 10470 /* 10471 * Check if the media in the device is writable or not. 10472 */ 10473 if ((un->un_f_geometry_is_valid == FALSE) && ISCD(un)) { 10474 sd_check_for_writable_cd(un); 10475 } 10476 10477 } else { 10478 /* 10479 * Do a test unit ready to clear any unit attention from non-cd 10480 * devices. 10481 */ 10482 mutex_exit(SD_MUTEX(un)); 10483 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 10484 mutex_enter(SD_MUTEX(un)); 10485 } 10486 10487 10488 if (un->un_state == SD_STATE_NORMAL) { 10489 /* 10490 * If the target is not yet ready here (defined by a TUR 10491 * failure), invalidate the geometry and print an 'offline' 10492 * message. This is a legacy message, as the state of the 10493 * target is not actually changed to SD_STATE_OFFLINE. 10494 * 10495 * If the TUR fails for EACCES (Reservation Conflict), it 10496 * means there actually is nothing wrong with the target that 10497 * would require invalidating the geometry, so continue in 10498 * that case as if the TUR was successful. 10499 */ 10500 int err; 10501 10502 mutex_exit(SD_MUTEX(un)); 10503 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 10504 mutex_enter(SD_MUTEX(un)); 10505 10506 if ((err != 0) && (err != EACCES)) { 10507 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10508 "offline\n"); 10509 un->un_f_geometry_is_valid = FALSE; 10510 rval = SD_NOT_READY_VALID; 10511 goto done; 10512 } 10513 } 10514 10515 if (un->un_f_format_in_progress == FALSE) { 10516 /* 10517 * Note: sd_validate_geometry may return TRUE, but that does 10518 * not necessarily mean un_f_geometry_is_valid == TRUE! 10519 */ 10520 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 10521 if (rval == ENOTSUP) { 10522 if (un->un_f_geometry_is_valid == TRUE) 10523 rval = 0; 10524 else { 10525 rval = SD_READY_NOT_VALID; 10526 goto done; 10527 } 10528 } 10529 if (rval != 0) { 10530 /* 10531 * We don't check the validity of geometry for 10532 * CDROMs. Also we assume we have a good label 10533 * even if sd_validate_geometry returned ENOMEM. 10534 */ 10535 if (!ISCD(un) && rval != ENOMEM) { 10536 rval = SD_NOT_READY_VALID; 10537 goto done; 10538 } 10539 } 10540 } 10541 10542 #ifdef DOESNTWORK /* on eliteII, see 1118607 */ 10543 /* 10544 * check to see if this disk is write protected, if it is and we have 10545 * not set read-only, then fail 10546 */ 10547 if ((flag & FWRITE) && (sr_check_wp(dev))) { 10548 New_state(un, SD_STATE_CLOSED); 10549 goto done; 10550 } 10551 #endif 10552 10553 /* 10554 * If this is a removable media device, try and send 10555 * a PREVENT MEDIA REMOVAL command, but don't get upset 10556 * if it fails. For a CD, however, it is an error 10557 */ 10558 if (ISREMOVABLE(un)) { 10559 mutex_exit(SD_MUTEX(un)); 10560 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 10561 SD_PATH_DIRECT) != 0) && ISCD(un)) { 10562 rval = SD_NOT_READY_VALID; 10563 mutex_enter(SD_MUTEX(un)); 10564 goto done; 10565 } 10566 mutex_enter(SD_MUTEX(un)); 10567 } 10568 10569 /* The state has changed, inform the media watch routines */ 10570 un->un_mediastate = DKIO_INSERTED; 10571 cv_broadcast(&un->un_state_cv); 10572 rval = SD_READY_VALID; 10573 10574 done: 10575 10576 /* 10577 * Initialize the capacity kstat value, if no media previously 10578 * (capacity kstat is 0) and a media has been inserted 10579 * (un_blockcount > 0). 10580 * This is a more generic way then checking for ISREMOVABLE. 10581 */ 10582 if (un->un_errstats != NULL) { 10583 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10584 if ((stp->sd_capacity.value.ui64 == 0) && 10585 (un->un_f_blockcount_is_valid == TRUE)) { 10586 stp->sd_capacity.value.ui64 = 10587 (uint64_t)((uint64_t)un->un_blockcount * 10588 un->un_sys_blocksize); 10589 } 10590 } 10591 10592 mutex_exit(SD_MUTEX(un)); 10593 return (rval); 10594 } 10595 10596 10597 /* 10598 * Function: sdmin 10599 * 10600 * Description: Routine to limit the size of a data transfer. Used in 10601 * conjunction with physio(9F). 10602 * 10603 * Arguments: bp - pointer to the indicated buf(9S) struct. 10604 * 10605 * Context: Kernel thread context. 10606 */ 10607 10608 static void 10609 sdmin(struct buf *bp) 10610 { 10611 struct sd_lun *un; 10612 int instance; 10613 10614 instance = SDUNIT(bp->b_edev); 10615 10616 un = ddi_get_soft_state(sd_state, instance); 10617 ASSERT(un != NULL); 10618 10619 if (bp->b_bcount > un->un_max_xfer_size) { 10620 bp->b_bcount = un->un_max_xfer_size; 10621 } 10622 } 10623 10624 10625 /* 10626 * Function: sdread 10627 * 10628 * Description: Driver's read(9e) entry point function. 10629 * 10630 * Arguments: dev - device number 10631 * uio - structure pointer describing where data is to be stored 10632 * in user's space 10633 * cred_p - user credential pointer 10634 * 10635 * Return Code: ENXIO 10636 * EIO 10637 * EINVAL 10638 * value returned by physio 10639 * 10640 * Context: Kernel thread context. 10641 */ 10642 /* ARGSUSED */ 10643 static int 10644 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10645 { 10646 struct sd_lun *un = NULL; 10647 int secmask; 10648 int err; 10649 10650 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10651 return (ENXIO); 10652 } 10653 10654 ASSERT(!mutex_owned(SD_MUTEX(un))); 10655 10656 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10657 mutex_enter(SD_MUTEX(un)); 10658 /* 10659 * Because the call to sd_ready_and_valid will issue I/O we 10660 * must wait here if either the device is suspended or 10661 * if it's power level is changing. 10662 */ 10663 while ((un->un_state == SD_STATE_SUSPENDED) || 10664 (un->un_state == SD_STATE_PM_CHANGING)) { 10665 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10666 } 10667 un->un_ncmds_in_driver++; 10668 mutex_exit(SD_MUTEX(un)); 10669 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10670 mutex_enter(SD_MUTEX(un)); 10671 un->un_ncmds_in_driver--; 10672 ASSERT(un->un_ncmds_in_driver >= 0); 10673 mutex_exit(SD_MUTEX(un)); 10674 return (EIO); 10675 } 10676 mutex_enter(SD_MUTEX(un)); 10677 un->un_ncmds_in_driver--; 10678 ASSERT(un->un_ncmds_in_driver >= 0); 10679 mutex_exit(SD_MUTEX(un)); 10680 } 10681 10682 /* 10683 * Read requests are restricted to multiples of the system block size. 10684 */ 10685 secmask = un->un_sys_blocksize - 1; 10686 10687 if (uio->uio_loffset & ((offset_t)(secmask))) { 10688 SD_ERROR(SD_LOG_READ_WRITE, un, 10689 "sdread: file offset not modulo %d\n", 10690 un->un_sys_blocksize); 10691 err = EINVAL; 10692 } else if (uio->uio_iov->iov_len & (secmask)) { 10693 SD_ERROR(SD_LOG_READ_WRITE, un, 10694 "sdread: transfer length not modulo %d\n", 10695 un->un_sys_blocksize); 10696 err = EINVAL; 10697 } else { 10698 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10699 } 10700 return (err); 10701 } 10702 10703 10704 /* 10705 * Function: sdwrite 10706 * 10707 * Description: Driver's write(9e) entry point function. 10708 * 10709 * Arguments: dev - device number 10710 * uio - structure pointer describing where data is stored in 10711 * user's space 10712 * cred_p - user credential pointer 10713 * 10714 * Return Code: ENXIO 10715 * EIO 10716 * EINVAL 10717 * value returned by physio 10718 * 10719 * Context: Kernel thread context. 10720 */ 10721 /* ARGSUSED */ 10722 static int 10723 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10724 { 10725 struct sd_lun *un = NULL; 10726 int secmask; 10727 int err; 10728 10729 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10730 return (ENXIO); 10731 } 10732 10733 ASSERT(!mutex_owned(SD_MUTEX(un))); 10734 10735 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10736 mutex_enter(SD_MUTEX(un)); 10737 /* 10738 * Because the call to sd_ready_and_valid will issue I/O we 10739 * must wait here if either the device is suspended or 10740 * if it's power level is changing. 10741 */ 10742 while ((un->un_state == SD_STATE_SUSPENDED) || 10743 (un->un_state == SD_STATE_PM_CHANGING)) { 10744 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10745 } 10746 un->un_ncmds_in_driver++; 10747 mutex_exit(SD_MUTEX(un)); 10748 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10749 mutex_enter(SD_MUTEX(un)); 10750 un->un_ncmds_in_driver--; 10751 ASSERT(un->un_ncmds_in_driver >= 0); 10752 mutex_exit(SD_MUTEX(un)); 10753 return (EIO); 10754 } 10755 mutex_enter(SD_MUTEX(un)); 10756 un->un_ncmds_in_driver--; 10757 ASSERT(un->un_ncmds_in_driver >= 0); 10758 mutex_exit(SD_MUTEX(un)); 10759 } 10760 10761 /* 10762 * Write requests are restricted to multiples of the system block size. 10763 */ 10764 secmask = un->un_sys_blocksize - 1; 10765 10766 if (uio->uio_loffset & ((offset_t)(secmask))) { 10767 SD_ERROR(SD_LOG_READ_WRITE, un, 10768 "sdwrite: file offset not modulo %d\n", 10769 un->un_sys_blocksize); 10770 err = EINVAL; 10771 } else if (uio->uio_iov->iov_len & (secmask)) { 10772 SD_ERROR(SD_LOG_READ_WRITE, un, 10773 "sdwrite: transfer length not modulo %d\n", 10774 un->un_sys_blocksize); 10775 err = EINVAL; 10776 } else { 10777 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10778 } 10779 return (err); 10780 } 10781 10782 10783 /* 10784 * Function: sdaread 10785 * 10786 * Description: Driver's aread(9e) entry point function. 10787 * 10788 * Arguments: dev - device number 10789 * aio - structure pointer describing where data is to be stored 10790 * cred_p - user credential pointer 10791 * 10792 * Return Code: ENXIO 10793 * EIO 10794 * EINVAL 10795 * value returned by aphysio 10796 * 10797 * Context: Kernel thread context. 10798 */ 10799 /* ARGSUSED */ 10800 static int 10801 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10802 { 10803 struct sd_lun *un = NULL; 10804 struct uio *uio = aio->aio_uio; 10805 int secmask; 10806 int err; 10807 10808 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10809 return (ENXIO); 10810 } 10811 10812 ASSERT(!mutex_owned(SD_MUTEX(un))); 10813 10814 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10815 mutex_enter(SD_MUTEX(un)); 10816 /* 10817 * Because the call to sd_ready_and_valid will issue I/O we 10818 * must wait here if either the device is suspended or 10819 * if it's power level is changing. 10820 */ 10821 while ((un->un_state == SD_STATE_SUSPENDED) || 10822 (un->un_state == SD_STATE_PM_CHANGING)) { 10823 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10824 } 10825 un->un_ncmds_in_driver++; 10826 mutex_exit(SD_MUTEX(un)); 10827 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10828 mutex_enter(SD_MUTEX(un)); 10829 un->un_ncmds_in_driver--; 10830 ASSERT(un->un_ncmds_in_driver >= 0); 10831 mutex_exit(SD_MUTEX(un)); 10832 return (EIO); 10833 } 10834 mutex_enter(SD_MUTEX(un)); 10835 un->un_ncmds_in_driver--; 10836 ASSERT(un->un_ncmds_in_driver >= 0); 10837 mutex_exit(SD_MUTEX(un)); 10838 } 10839 10840 /* 10841 * Read requests are restricted to multiples of the system block size. 10842 */ 10843 secmask = un->un_sys_blocksize - 1; 10844 10845 if (uio->uio_loffset & ((offset_t)(secmask))) { 10846 SD_ERROR(SD_LOG_READ_WRITE, un, 10847 "sdaread: file offset not modulo %d\n", 10848 un->un_sys_blocksize); 10849 err = EINVAL; 10850 } else if (uio->uio_iov->iov_len & (secmask)) { 10851 SD_ERROR(SD_LOG_READ_WRITE, un, 10852 "sdaread: transfer length not modulo %d\n", 10853 un->un_sys_blocksize); 10854 err = EINVAL; 10855 } else { 10856 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10857 } 10858 return (err); 10859 } 10860 10861 10862 /* 10863 * Function: sdawrite 10864 * 10865 * Description: Driver's awrite(9e) entry point function. 10866 * 10867 * Arguments: dev - device number 10868 * aio - structure pointer describing where data is stored 10869 * cred_p - user credential pointer 10870 * 10871 * Return Code: ENXIO 10872 * EIO 10873 * EINVAL 10874 * value returned by aphysio 10875 * 10876 * Context: Kernel thread context. 10877 */ 10878 /* ARGSUSED */ 10879 static int 10880 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10881 { 10882 struct sd_lun *un = NULL; 10883 struct uio *uio = aio->aio_uio; 10884 int secmask; 10885 int err; 10886 10887 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10888 return (ENXIO); 10889 } 10890 10891 ASSERT(!mutex_owned(SD_MUTEX(un))); 10892 10893 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10894 mutex_enter(SD_MUTEX(un)); 10895 /* 10896 * Because the call to sd_ready_and_valid will issue I/O we 10897 * must wait here if either the device is suspended or 10898 * if it's power level is changing. 10899 */ 10900 while ((un->un_state == SD_STATE_SUSPENDED) || 10901 (un->un_state == SD_STATE_PM_CHANGING)) { 10902 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10903 } 10904 un->un_ncmds_in_driver++; 10905 mutex_exit(SD_MUTEX(un)); 10906 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10907 mutex_enter(SD_MUTEX(un)); 10908 un->un_ncmds_in_driver--; 10909 ASSERT(un->un_ncmds_in_driver >= 0); 10910 mutex_exit(SD_MUTEX(un)); 10911 return (EIO); 10912 } 10913 mutex_enter(SD_MUTEX(un)); 10914 un->un_ncmds_in_driver--; 10915 ASSERT(un->un_ncmds_in_driver >= 0); 10916 mutex_exit(SD_MUTEX(un)); 10917 } 10918 10919 /* 10920 * Write requests are restricted to multiples of the system block size. 10921 */ 10922 secmask = un->un_sys_blocksize - 1; 10923 10924 if (uio->uio_loffset & ((offset_t)(secmask))) { 10925 SD_ERROR(SD_LOG_READ_WRITE, un, 10926 "sdawrite: file offset not modulo %d\n", 10927 un->un_sys_blocksize); 10928 err = EINVAL; 10929 } else if (uio->uio_iov->iov_len & (secmask)) { 10930 SD_ERROR(SD_LOG_READ_WRITE, un, 10931 "sdawrite: transfer length not modulo %d\n", 10932 un->un_sys_blocksize); 10933 err = EINVAL; 10934 } else { 10935 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10936 } 10937 return (err); 10938 } 10939 10940 10941 10942 10943 10944 /* 10945 * Driver IO processing follows the following sequence: 10946 * 10947 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10948 * | | ^ 10949 * v v | 10950 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10951 * | | | | 10952 * v | | | 10953 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10954 * | | ^ ^ 10955 * v v | | 10956 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10957 * | | | | 10958 * +---+ | +------------+ +-------+ 10959 * | | | | 10960 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10961 * | v | | 10962 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10963 * | | ^ | 10964 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10965 * | v | | 10966 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10967 * | | ^ | 10968 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10969 * | v | | 10970 * | sd_checksum_iostart() sd_checksum_iodone() | 10971 * | | ^ | 10972 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10973 * | v | | 10974 * | sd_pm_iostart() sd_pm_iodone() | 10975 * | | ^ | 10976 * | | | | 10977 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10978 * | ^ 10979 * v | 10980 * sd_core_iostart() | 10981 * | | 10982 * | +------>(*destroypkt)() 10983 * +-> sd_start_cmds() <-+ | | 10984 * | | | v 10985 * | | | scsi_destroy_pkt(9F) 10986 * | | | 10987 * +->(*initpkt)() +- sdintr() 10988 * | | | | 10989 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10990 * | +-> scsi_setup_cdb(9F) | 10991 * | | 10992 * +--> scsi_transport(9F) | 10993 * | | 10994 * +----> SCSA ---->+ 10995 * 10996 * 10997 * This code is based upon the following presumtions: 10998 * 10999 * - iostart and iodone functions operate on buf(9S) structures. These 11000 * functions perform the necessary operations on the buf(9S) and pass 11001 * them along to the next function in the chain by using the macros 11002 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11003 * (for iodone side functions). 11004 * 11005 * - The iostart side functions may sleep. The iodone side functions 11006 * are called under interrupt context and may NOT sleep. Therefore 11007 * iodone side functions also may not call iostart side functions. 11008 * (NOTE: iostart side functions should NOT sleep for memory, as 11009 * this could result in deadlock.) 11010 * 11011 * - An iostart side function may call its corresponding iodone side 11012 * function directly (if necessary). 11013 * 11014 * - In the event of an error, an iostart side function can return a buf(9S) 11015 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11016 * b_error in the usual way of course). 11017 * 11018 * - The taskq mechanism may be used by the iodone side functions to dispatch 11019 * requests to the iostart side functions. The iostart side functions in 11020 * this case would be called under the context of a taskq thread, so it's 11021 * OK for them to block/sleep/spin in this case. 11022 * 11023 * - iostart side functions may allocate "shadow" buf(9S) structs and 11024 * pass them along to the next function in the chain. The corresponding 11025 * iodone side functions must coalesce the "shadow" bufs and return 11026 * the "original" buf to the next higher layer. 11027 * 11028 * - The b_private field of the buf(9S) struct holds a pointer to 11029 * an sd_xbuf struct, which contains information needed to 11030 * construct the scsi_pkt for the command. 11031 * 11032 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11033 * layer must acquire & release the SD_MUTEX(un) as needed. 11034 */ 11035 11036 11037 /* 11038 * Create taskq for all targets in the system. This is created at 11039 * _init(9E) and destroyed at _fini(9E). 11040 * 11041 * Note: here we set the minalloc to a reasonably high number to ensure that 11042 * we will have an adequate supply of task entries available at interrupt time. 11043 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11044 * sd_create_taskq(). Since we do not want to sleep for allocations at 11045 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11046 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11047 * requests any one instant in time. 11048 */ 11049 #define SD_TASKQ_NUMTHREADS 8 11050 #define SD_TASKQ_MINALLOC 256 11051 #define SD_TASKQ_MAXALLOC 256 11052 11053 static taskq_t *sd_tq = NULL; 11054 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11055 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11056 11057 /* 11058 * The following task queue is being created for the write part of 11059 * read-modify-write of non-512 block size devices. 11060 * Limit the number of threads to 1 for now. This number has been choosen 11061 * considering the fact that it applies only to dvd ram drives/MO drives 11062 * currently. Performance for which is not main criteria at this stage. 11063 * Note: It needs to be explored if we can use a single taskq in future 11064 */ 11065 #define SD_WMR_TASKQ_NUMTHREADS 1 11066 static taskq_t *sd_wmr_tq = NULL; 11067 11068 /* 11069 * Function: sd_taskq_create 11070 * 11071 * Description: Create taskq thread(s) and preallocate task entries 11072 * 11073 * Return Code: Returns a pointer to the allocated taskq_t. 11074 * 11075 * Context: Can sleep. Requires blockable context. 11076 * 11077 * Notes: - The taskq() facility currently is NOT part of the DDI. 11078 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11079 * - taskq_create() will block for memory, also it will panic 11080 * if it cannot create the requested number of threads. 11081 * - Currently taskq_create() creates threads that cannot be 11082 * swapped. 11083 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11084 * supply of taskq entries at interrupt time (ie, so that we 11085 * do not have to sleep for memory) 11086 */ 11087 11088 static void 11089 sd_taskq_create(void) 11090 { 11091 char taskq_name[TASKQ_NAMELEN]; 11092 11093 ASSERT(sd_tq == NULL); 11094 ASSERT(sd_wmr_tq == NULL); 11095 11096 (void) snprintf(taskq_name, sizeof (taskq_name), 11097 "%s_drv_taskq", sd_label); 11098 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11099 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11100 TASKQ_PREPOPULATE)); 11101 11102 (void) snprintf(taskq_name, sizeof (taskq_name), 11103 "%s_rmw_taskq", sd_label); 11104 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11105 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11106 TASKQ_PREPOPULATE)); 11107 } 11108 11109 11110 /* 11111 * Function: sd_taskq_delete 11112 * 11113 * Description: Complementary cleanup routine for sd_taskq_create(). 11114 * 11115 * Context: Kernel thread context. 11116 */ 11117 11118 static void 11119 sd_taskq_delete(void) 11120 { 11121 ASSERT(sd_tq != NULL); 11122 ASSERT(sd_wmr_tq != NULL); 11123 taskq_destroy(sd_tq); 11124 taskq_destroy(sd_wmr_tq); 11125 sd_tq = NULL; 11126 sd_wmr_tq = NULL; 11127 } 11128 11129 11130 /* 11131 * Function: sdstrategy 11132 * 11133 * Description: Driver's strategy (9E) entry point function. 11134 * 11135 * Arguments: bp - pointer to buf(9S) 11136 * 11137 * Return Code: Always returns zero 11138 * 11139 * Context: Kernel thread context. 11140 */ 11141 11142 static int 11143 sdstrategy(struct buf *bp) 11144 { 11145 struct sd_lun *un; 11146 11147 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11148 if (un == NULL) { 11149 bioerror(bp, EIO); 11150 bp->b_resid = bp->b_bcount; 11151 biodone(bp); 11152 return (0); 11153 } 11154 /* As was done in the past, fail new cmds. if state is dumping. */ 11155 if (un->un_state == SD_STATE_DUMPING) { 11156 bioerror(bp, ENXIO); 11157 bp->b_resid = bp->b_bcount; 11158 biodone(bp); 11159 return (0); 11160 } 11161 11162 ASSERT(!mutex_owned(SD_MUTEX(un))); 11163 11164 /* 11165 * Commands may sneak in while we released the mutex in 11166 * DDI_SUSPEND, we should block new commands. However, old 11167 * commands that are still in the driver at this point should 11168 * still be allowed to drain. 11169 */ 11170 mutex_enter(SD_MUTEX(un)); 11171 /* 11172 * Must wait here if either the device is suspended or 11173 * if it's power level is changing. 11174 */ 11175 while ((un->un_state == SD_STATE_SUSPENDED) || 11176 (un->un_state == SD_STATE_PM_CHANGING)) { 11177 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11178 } 11179 11180 un->un_ncmds_in_driver++; 11181 11182 /* 11183 * atapi: Since we are running the CD for now in PIO mode we need to 11184 * call bp_mapin here to avoid bp_mapin called interrupt context under 11185 * the HBA's init_pkt routine. 11186 */ 11187 if (un->un_f_cfg_is_atapi == TRUE) { 11188 mutex_exit(SD_MUTEX(un)); 11189 bp_mapin(bp); 11190 mutex_enter(SD_MUTEX(un)); 11191 } 11192 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11193 un->un_ncmds_in_driver); 11194 11195 mutex_exit(SD_MUTEX(un)); 11196 11197 /* 11198 * This will (eventually) allocate the sd_xbuf area and 11199 * call sd_xbuf_strategy(). We just want to return the 11200 * result of ddi_xbuf_qstrategy so that we have an opt- 11201 * imized tail call which saves us a stack frame. 11202 */ 11203 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11204 } 11205 11206 11207 /* 11208 * Function: sd_xbuf_strategy 11209 * 11210 * Description: Function for initiating IO operations via the 11211 * ddi_xbuf_qstrategy() mechanism. 11212 * 11213 * Context: Kernel thread context. 11214 */ 11215 11216 static void 11217 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11218 { 11219 struct sd_lun *un = arg; 11220 11221 ASSERT(bp != NULL); 11222 ASSERT(xp != NULL); 11223 ASSERT(un != NULL); 11224 ASSERT(!mutex_owned(SD_MUTEX(un))); 11225 11226 /* 11227 * Initialize the fields in the xbuf and save a pointer to the 11228 * xbuf in bp->b_private. 11229 */ 11230 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11231 11232 /* Send the buf down the iostart chain */ 11233 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11234 } 11235 11236 11237 /* 11238 * Function: sd_xbuf_init 11239 * 11240 * Description: Prepare the given sd_xbuf struct for use. 11241 * 11242 * Arguments: un - ptr to softstate 11243 * bp - ptr to associated buf(9S) 11244 * xp - ptr to associated sd_xbuf 11245 * chain_type - IO chain type to use: 11246 * SD_CHAIN_NULL 11247 * SD_CHAIN_BUFIO 11248 * SD_CHAIN_USCSI 11249 * SD_CHAIN_DIRECT 11250 * SD_CHAIN_DIRECT_PRIORITY 11251 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11252 * initialization; may be NULL if none. 11253 * 11254 * Context: Kernel thread context 11255 */ 11256 11257 static void 11258 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11259 uchar_t chain_type, void *pktinfop) 11260 { 11261 int index; 11262 11263 ASSERT(un != NULL); 11264 ASSERT(bp != NULL); 11265 ASSERT(xp != NULL); 11266 11267 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11268 bp, chain_type); 11269 11270 xp->xb_un = un; 11271 xp->xb_pktp = NULL; 11272 xp->xb_pktinfo = pktinfop; 11273 xp->xb_private = bp->b_private; 11274 xp->xb_blkno = (daddr_t)bp->b_blkno; 11275 11276 /* 11277 * Set up the iostart and iodone chain indexes in the xbuf, based 11278 * upon the specified chain type to use. 11279 */ 11280 switch (chain_type) { 11281 case SD_CHAIN_NULL: 11282 /* 11283 * Fall thru to just use the values for the buf type, even 11284 * tho for the NULL chain these values will never be used. 11285 */ 11286 /* FALLTHRU */ 11287 case SD_CHAIN_BUFIO: 11288 index = un->un_buf_chain_type; 11289 break; 11290 case SD_CHAIN_USCSI: 11291 index = un->un_uscsi_chain_type; 11292 break; 11293 case SD_CHAIN_DIRECT: 11294 index = un->un_direct_chain_type; 11295 break; 11296 case SD_CHAIN_DIRECT_PRIORITY: 11297 index = un->un_priority_chain_type; 11298 break; 11299 default: 11300 /* We're really broken if we ever get here... */ 11301 panic("sd_xbuf_init: illegal chain type!"); 11302 /*NOTREACHED*/ 11303 } 11304 11305 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11306 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11307 11308 /* 11309 * It might be a bit easier to simply bzero the entire xbuf above, 11310 * but it turns out that since we init a fair number of members anyway, 11311 * we save a fair number cycles by doing explicit assignment of zero. 11312 */ 11313 xp->xb_pkt_flags = 0; 11314 xp->xb_dma_resid = 0; 11315 xp->xb_retry_count = 0; 11316 xp->xb_victim_retry_count = 0; 11317 xp->xb_ua_retry_count = 0; 11318 xp->xb_sense_bp = NULL; 11319 xp->xb_sense_status = 0; 11320 xp->xb_sense_state = 0; 11321 xp->xb_sense_resid = 0; 11322 11323 bp->b_private = xp; 11324 bp->b_flags &= ~(B_DONE | B_ERROR); 11325 bp->b_resid = 0; 11326 bp->av_forw = NULL; 11327 bp->av_back = NULL; 11328 bioerror(bp, 0); 11329 11330 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11331 } 11332 11333 11334 /* 11335 * Function: sd_uscsi_strategy 11336 * 11337 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11338 * 11339 * Arguments: bp - buf struct ptr 11340 * 11341 * Return Code: Always returns 0 11342 * 11343 * Context: Kernel thread context 11344 */ 11345 11346 static int 11347 sd_uscsi_strategy(struct buf *bp) 11348 { 11349 struct sd_lun *un; 11350 struct sd_uscsi_info *uip; 11351 struct sd_xbuf *xp; 11352 uchar_t chain_type; 11353 11354 ASSERT(bp != NULL); 11355 11356 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11357 if (un == NULL) { 11358 bioerror(bp, EIO); 11359 bp->b_resid = bp->b_bcount; 11360 biodone(bp); 11361 return (0); 11362 } 11363 11364 ASSERT(!mutex_owned(SD_MUTEX(un))); 11365 11366 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11367 11368 mutex_enter(SD_MUTEX(un)); 11369 /* 11370 * atapi: Since we are running the CD for now in PIO mode we need to 11371 * call bp_mapin here to avoid bp_mapin called interrupt context under 11372 * the HBA's init_pkt routine. 11373 */ 11374 if (un->un_f_cfg_is_atapi == TRUE) { 11375 mutex_exit(SD_MUTEX(un)); 11376 bp_mapin(bp); 11377 mutex_enter(SD_MUTEX(un)); 11378 } 11379 un->un_ncmds_in_driver++; 11380 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11381 un->un_ncmds_in_driver); 11382 mutex_exit(SD_MUTEX(un)); 11383 11384 /* 11385 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11386 */ 11387 ASSERT(bp->b_private != NULL); 11388 uip = (struct sd_uscsi_info *)bp->b_private; 11389 11390 switch (uip->ui_flags) { 11391 case SD_PATH_DIRECT: 11392 chain_type = SD_CHAIN_DIRECT; 11393 break; 11394 case SD_PATH_DIRECT_PRIORITY: 11395 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11396 break; 11397 default: 11398 chain_type = SD_CHAIN_USCSI; 11399 break; 11400 } 11401 11402 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 11403 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11404 11405 /* Use the index obtained within xbuf_init */ 11406 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11407 11408 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11409 11410 return (0); 11411 } 11412 11413 11414 /* 11415 * These routines perform raw i/o operations. 11416 */ 11417 /*ARGSUSED*/ 11418 static void 11419 sduscsimin(struct buf *bp) 11420 { 11421 /* 11422 * do not break up because the CDB count would then 11423 * be incorrect and data underruns would result (incomplete 11424 * read/writes which would be retried and then failed, see 11425 * sdintr(). 11426 */ 11427 } 11428 11429 11430 11431 /* 11432 * Function: sd_send_scsi_cmd 11433 * 11434 * Description: Runs a USCSI command for user (when called thru sdioctl), 11435 * or for the driver 11436 * 11437 * Arguments: dev - the dev_t for the device 11438 * incmd - ptr to a valid uscsi_cmd struct 11439 * cdbspace - UIO_USERSPACE or UIO_SYSSPACE 11440 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11441 * rqbufspace - UIO_USERSPACE or UIO_SYSSPACE 11442 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11443 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11444 * to use the USCSI "direct" chain and bypass the normal 11445 * command waitq. 11446 * 11447 * Return Code: 0 - successful completion of the given command 11448 * EIO - scsi_reset() failed, or see biowait()/physio() codes. 11449 * ENXIO - soft state not found for specified dev 11450 * EINVAL 11451 * EFAULT - copyin/copyout error 11452 * return code of biowait(9F) or physio(9F): 11453 * EIO - IO error, caller may check incmd->uscsi_status 11454 * ENXIO 11455 * EACCES - reservation conflict 11456 * 11457 * Context: Waits for command to complete. Can sleep. 11458 */ 11459 11460 static int 11461 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 11462 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 11463 int path_flag) 11464 { 11465 struct sd_uscsi_info *uip; 11466 struct uscsi_cmd *uscmd; 11467 struct sd_lun *un; 11468 struct buf *bp; 11469 int rval; 11470 int flags; 11471 11472 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11473 if (un == NULL) { 11474 return (ENXIO); 11475 } 11476 11477 ASSERT(!mutex_owned(SD_MUTEX(un))); 11478 11479 #ifdef SDDEBUG 11480 switch (dataspace) { 11481 case UIO_USERSPACE: 11482 SD_TRACE(SD_LOG_IO, un, 11483 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 11484 break; 11485 case UIO_SYSSPACE: 11486 SD_TRACE(SD_LOG_IO, un, 11487 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 11488 break; 11489 default: 11490 SD_TRACE(SD_LOG_IO, un, 11491 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 11492 break; 11493 } 11494 #endif 11495 11496 /* 11497 * Perform resets directly; no need to generate a command to do it. 11498 */ 11499 if (incmd->uscsi_flags & (USCSI_RESET | USCSI_RESET_ALL)) { 11500 flags = ((incmd->uscsi_flags & USCSI_RESET_ALL) != 0) ? 11501 RESET_ALL : RESET_TARGET; 11502 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: Issuing reset\n"); 11503 if (scsi_reset(SD_ADDRESS(un), flags) == 0) { 11504 /* Reset attempt was unsuccessful */ 11505 SD_TRACE(SD_LOG_IO, un, 11506 "sd_send_scsi_cmd: reset: failure\n"); 11507 return (EIO); 11508 } 11509 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: reset: success\n"); 11510 return (0); 11511 } 11512 11513 /* Perfunctory sanity check... */ 11514 if (incmd->uscsi_cdblen <= 0) { 11515 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11516 "invalid uscsi_cdblen, returning EINVAL\n"); 11517 return (EINVAL); 11518 } 11519 11520 /* 11521 * In order to not worry about where the uscsi structure came from 11522 * (or where the cdb it points to came from) we're going to make 11523 * kmem_alloc'd copies of them here. This will also allow reference 11524 * to the data they contain long after this process has gone to 11525 * sleep and its kernel stack has been unmapped, etc. 11526 * 11527 * First get some memory for the uscsi_cmd struct and copy the 11528 * contents of the given uscsi_cmd struct into it. 11529 */ 11530 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 11531 bcopy(incmd, uscmd, sizeof (struct uscsi_cmd)); 11532 11533 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: uscsi_cmd", 11534 (uchar_t *)uscmd, sizeof (struct uscsi_cmd), SD_LOG_HEX); 11535 11536 /* 11537 * Now get some space for the CDB, and copy the given CDB into 11538 * it. Use ddi_copyin() in case the data is in user space. 11539 */ 11540 uscmd->uscsi_cdb = kmem_zalloc((size_t)incmd->uscsi_cdblen, KM_SLEEP); 11541 flags = (cdbspace == UIO_SYSSPACE) ? FKIOCTL : 0; 11542 if (ddi_copyin(incmd->uscsi_cdb, uscmd->uscsi_cdb, 11543 (uint_t)incmd->uscsi_cdblen, flags) != 0) { 11544 kmem_free(uscmd->uscsi_cdb, (size_t)incmd->uscsi_cdblen); 11545 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11546 return (EFAULT); 11547 } 11548 11549 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: CDB", 11550 (uchar_t *)uscmd->uscsi_cdb, incmd->uscsi_cdblen, SD_LOG_HEX); 11551 11552 bp = getrbuf(KM_SLEEP); 11553 11554 /* 11555 * Allocate an sd_uscsi_info struct and fill it with the info 11556 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11557 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11558 * since we allocate the buf here in this function, we do not 11559 * need to preserve the prior contents of b_private. 11560 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11561 */ 11562 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11563 uip->ui_flags = path_flag; 11564 uip->ui_cmdp = uscmd; 11565 bp->b_private = uip; 11566 11567 /* 11568 * Initialize Request Sense buffering, if requested. 11569 */ 11570 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11571 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11572 /* 11573 * Here uscmd->uscsi_rqbuf currently points to the caller's 11574 * buffer, but we replace this with a kernel buffer that 11575 * we allocate to use with the sense data. The sense data 11576 * (if present) gets copied into this new buffer before the 11577 * command is completed. Then we copy the sense data from 11578 * our allocated buf into the caller's buffer below. Note 11579 * that incmd->uscsi_rqbuf and incmd->uscsi_rqlen are used 11580 * below to perform the copy back to the caller's buf. 11581 */ 11582 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 11583 if (rqbufspace == UIO_USERSPACE) { 11584 uscmd->uscsi_rqlen = SENSE_LENGTH; 11585 uscmd->uscsi_rqresid = SENSE_LENGTH; 11586 } else { 11587 uchar_t rlen = min(SENSE_LENGTH, uscmd->uscsi_rqlen); 11588 uscmd->uscsi_rqlen = rlen; 11589 uscmd->uscsi_rqresid = rlen; 11590 } 11591 } else { 11592 uscmd->uscsi_rqbuf = NULL; 11593 uscmd->uscsi_rqlen = 0; 11594 uscmd->uscsi_rqresid = 0; 11595 } 11596 11597 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: rqbuf:0x%p rqlen:%d\n", 11598 uscmd->uscsi_rqbuf, uscmd->uscsi_rqlen); 11599 11600 if (un->un_f_is_fibre == FALSE) { 11601 /* 11602 * Force asynchronous mode, if necessary. Doing this here 11603 * has the unfortunate effect of running other queued 11604 * commands async also, but since the main purpose of this 11605 * capability is downloading new drive firmware, we can 11606 * probably live with it. 11607 */ 11608 if ((uscmd->uscsi_flags & USCSI_ASYNC) != 0) { 11609 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11610 == 1) { 11611 if (scsi_ifsetcap(SD_ADDRESS(un), 11612 "synchronous", 0, 1) == 1) { 11613 SD_TRACE(SD_LOG_IO, un, 11614 "sd_send_scsi_cmd: forced async ok\n"); 11615 } else { 11616 SD_TRACE(SD_LOG_IO, un, 11617 "sd_send_scsi_cmd:\ 11618 forced async failed\n"); 11619 rval = EINVAL; 11620 goto done; 11621 } 11622 } 11623 } 11624 11625 /* 11626 * Re-enable synchronous mode, if requested 11627 */ 11628 if (uscmd->uscsi_flags & USCSI_SYNC) { 11629 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11630 == 0) { 11631 int i = scsi_ifsetcap(SD_ADDRESS(un), 11632 "synchronous", 1, 1); 11633 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11634 "re-enabled sync %s\n", 11635 (i == 1) ? "ok" : "failed"); 11636 } 11637 } 11638 } 11639 11640 /* 11641 * Commands sent with priority are intended for error recovery 11642 * situations, and do not have retries performed. 11643 */ 11644 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11645 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11646 } 11647 11648 /* 11649 * If we're going to do actual I/O, let physio do all the right things 11650 */ 11651 if (uscmd->uscsi_buflen != 0) { 11652 struct iovec aiov; 11653 struct uio auio; 11654 struct uio *uio = &auio; 11655 11656 bzero(&auio, sizeof (struct uio)); 11657 bzero(&aiov, sizeof (struct iovec)); 11658 aiov.iov_base = uscmd->uscsi_bufaddr; 11659 aiov.iov_len = uscmd->uscsi_buflen; 11660 uio->uio_iov = &aiov; 11661 11662 uio->uio_iovcnt = 1; 11663 uio->uio_resid = uscmd->uscsi_buflen; 11664 uio->uio_segflg = dataspace; 11665 11666 /* 11667 * physio() will block here until the command completes.... 11668 */ 11669 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling physio.\n"); 11670 11671 rval = physio(sd_uscsi_strategy, bp, dev, 11672 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE), 11673 sduscsimin, uio); 11674 11675 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11676 "returned from physio with 0x%x\n", rval); 11677 11678 } else { 11679 /* 11680 * We have to mimic what physio would do here! Argh! 11681 */ 11682 bp->b_flags = B_BUSY | 11683 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE); 11684 bp->b_edev = dev; 11685 bp->b_dev = cmpdev(dev); /* maybe unnecessary? */ 11686 bp->b_bcount = 0; 11687 bp->b_blkno = 0; 11688 11689 SD_TRACE(SD_LOG_IO, un, 11690 "sd_send_scsi_cmd: calling sd_uscsi_strategy...\n"); 11691 11692 (void) sd_uscsi_strategy(bp); 11693 11694 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling biowait\n"); 11695 11696 rval = biowait(bp); 11697 11698 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11699 "returned from biowait with 0x%x\n", rval); 11700 } 11701 11702 done: 11703 11704 #ifdef SDDEBUG 11705 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11706 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11707 uscmd->uscsi_status, uscmd->uscsi_resid); 11708 if (uscmd->uscsi_bufaddr != NULL) { 11709 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11710 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11711 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11712 if (dataspace == UIO_SYSSPACE) { 11713 SD_DUMP_MEMORY(un, SD_LOG_IO, 11714 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11715 uscmd->uscsi_buflen, SD_LOG_HEX); 11716 } 11717 } 11718 #endif 11719 11720 /* 11721 * Get the status and residual to return to the caller. 11722 */ 11723 incmd->uscsi_status = uscmd->uscsi_status; 11724 incmd->uscsi_resid = uscmd->uscsi_resid; 11725 11726 /* 11727 * If the caller wants sense data, copy back whatever sense data 11728 * we may have gotten, and update the relevant rqsense info. 11729 */ 11730 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11731 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11732 11733 int rqlen = uscmd->uscsi_rqlen - uscmd->uscsi_rqresid; 11734 rqlen = min(((int)incmd->uscsi_rqlen), rqlen); 11735 11736 /* Update the Request Sense status and resid */ 11737 incmd->uscsi_rqresid = incmd->uscsi_rqlen - rqlen; 11738 incmd->uscsi_rqstatus = uscmd->uscsi_rqstatus; 11739 11740 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11741 "uscsi_rqstatus: 0x%02x uscsi_rqresid:0x%x\n", 11742 incmd->uscsi_rqstatus, incmd->uscsi_rqresid); 11743 11744 /* Copy out the sense data for user processes */ 11745 if ((incmd->uscsi_rqbuf != NULL) && (rqlen != 0)) { 11746 int flags = 11747 (rqbufspace == UIO_USERSPACE) ? 0 : FKIOCTL; 11748 if (ddi_copyout(uscmd->uscsi_rqbuf, incmd->uscsi_rqbuf, 11749 rqlen, flags) != 0) { 11750 rval = EFAULT; 11751 } 11752 /* 11753 * Note: Can't touch incmd->uscsi_rqbuf so use 11754 * uscmd->uscsi_rqbuf instead. They're the same. 11755 */ 11756 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11757 "incmd->uscsi_rqbuf: 0x%p rqlen:%d\n", 11758 incmd->uscsi_rqbuf, rqlen); 11759 SD_DUMP_MEMORY(un, SD_LOG_IO, "rq", 11760 (uchar_t *)uscmd->uscsi_rqbuf, rqlen, SD_LOG_HEX); 11761 } 11762 } 11763 11764 /* 11765 * Free allocated resources and return; mapout the buf in case it was 11766 * mapped in by a lower layer. 11767 */ 11768 bp_mapout(bp); 11769 freerbuf(bp); 11770 kmem_free(uip, sizeof (struct sd_uscsi_info)); 11771 if (uscmd->uscsi_rqbuf != NULL) { 11772 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 11773 } 11774 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 11775 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11776 11777 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: exit\n"); 11778 11779 return (rval); 11780 } 11781 11782 11783 /* 11784 * Function: sd_buf_iodone 11785 * 11786 * Description: Frees the sd_xbuf & returns the buf to its originator. 11787 * 11788 * Context: May be called from interrupt context. 11789 */ 11790 /* ARGSUSED */ 11791 static void 11792 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11793 { 11794 struct sd_xbuf *xp; 11795 11796 ASSERT(un != NULL); 11797 ASSERT(bp != NULL); 11798 ASSERT(!mutex_owned(SD_MUTEX(un))); 11799 11800 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11801 11802 xp = SD_GET_XBUF(bp); 11803 ASSERT(xp != NULL); 11804 11805 mutex_enter(SD_MUTEX(un)); 11806 11807 /* 11808 * Grab time when the cmd completed. 11809 * This is used for determining if the system has been 11810 * idle long enough to make it idle to the PM framework. 11811 * This is for lowering the overhead, and therefore improving 11812 * performance per I/O operation. 11813 */ 11814 un->un_pm_idle_time = ddi_get_time(); 11815 11816 un->un_ncmds_in_driver--; 11817 ASSERT(un->un_ncmds_in_driver >= 0); 11818 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11819 un->un_ncmds_in_driver); 11820 11821 mutex_exit(SD_MUTEX(un)); 11822 11823 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11824 biodone(bp); /* bp is gone after this */ 11825 11826 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11827 } 11828 11829 11830 /* 11831 * Function: sd_uscsi_iodone 11832 * 11833 * Description: Frees the sd_xbuf & returns the buf to its originator. 11834 * 11835 * Context: May be called from interrupt context. 11836 */ 11837 /* ARGSUSED */ 11838 static void 11839 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11840 { 11841 struct sd_xbuf *xp; 11842 11843 ASSERT(un != NULL); 11844 ASSERT(bp != NULL); 11845 11846 xp = SD_GET_XBUF(bp); 11847 ASSERT(xp != NULL); 11848 ASSERT(!mutex_owned(SD_MUTEX(un))); 11849 11850 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11851 11852 mutex_enter(SD_MUTEX(un)); 11853 11854 /* 11855 * Grab time when the cmd completed. 11856 * This is used for determining if the system has been 11857 * idle long enough to make it idle to the PM framework. 11858 * This is for lowering the overhead, and therefore improving 11859 * performance per I/O operation. 11860 */ 11861 un->un_pm_idle_time = ddi_get_time(); 11862 11863 un->un_ncmds_in_driver--; 11864 ASSERT(un->un_ncmds_in_driver >= 0); 11865 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11866 un->un_ncmds_in_driver); 11867 11868 mutex_exit(SD_MUTEX(un)); 11869 11870 kmem_free(xp, sizeof (struct sd_xbuf)); 11871 biodone(bp); 11872 11873 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11874 } 11875 11876 11877 /* 11878 * Function: sd_mapblockaddr_iostart 11879 * 11880 * Description: Verify request lies withing the partition limits for 11881 * the indicated minor device. Issue "overrun" buf if 11882 * request would exceed partition range. Converts 11883 * partition-relative block address to absolute. 11884 * 11885 * Context: Can sleep 11886 * 11887 * Issues: This follows what the old code did, in terms of accessing 11888 * some of the partition info in the unit struct without holding 11889 * the mutext. This is a general issue, if the partition info 11890 * can be altered while IO is in progress... as soon as we send 11891 * a buf, its partitioning can be invalid before it gets to the 11892 * device. Probably the right fix is to move partitioning out 11893 * of the driver entirely. 11894 */ 11895 11896 static void 11897 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11898 { 11899 daddr_t nblocks; /* #blocks in the given partition */ 11900 daddr_t blocknum; /* Block number specified by the buf */ 11901 size_t requested_nblocks; 11902 size_t available_nblocks; 11903 int partition; 11904 diskaddr_t partition_offset; 11905 struct sd_xbuf *xp; 11906 11907 11908 ASSERT(un != NULL); 11909 ASSERT(bp != NULL); 11910 ASSERT(!mutex_owned(SD_MUTEX(un))); 11911 11912 SD_TRACE(SD_LOG_IO_PARTITION, un, 11913 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11914 11915 xp = SD_GET_XBUF(bp); 11916 ASSERT(xp != NULL); 11917 11918 /* 11919 * If the geometry is not indicated as valid, attempt to access 11920 * the unit & verify the geometry/label. This can be the case for 11921 * removable-media devices, of if the device was opened in 11922 * NDELAY/NONBLOCK mode. 11923 */ 11924 if ((un->un_f_geometry_is_valid != TRUE) && 11925 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11926 /* 11927 * For removable devices it is possible to start an I/O 11928 * without a media by opening the device in nodelay mode. 11929 * Also for writable CDs there can be many scenarios where 11930 * there is no geometry yet but volume manager is trying to 11931 * issue a read() just because it can see TOC on the CD. So 11932 * do not print a message for removables. 11933 */ 11934 if (!ISREMOVABLE(un)) { 11935 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11936 "i/o to invalid geometry\n"); 11937 } 11938 bioerror(bp, EIO); 11939 bp->b_resid = bp->b_bcount; 11940 SD_BEGIN_IODONE(index, un, bp); 11941 return; 11942 } 11943 11944 partition = SDPART(bp->b_edev); 11945 11946 /* #blocks in partition */ 11947 nblocks = un->un_map[partition].dkl_nblk; /* #blocks in partition */ 11948 11949 /* Use of a local variable potentially improves performance slightly */ 11950 partition_offset = un->un_offset[partition]; 11951 11952 /* 11953 * blocknum is the starting block number of the request. At this 11954 * point it is still relative to the start of the minor device. 11955 */ 11956 blocknum = xp->xb_blkno; 11957 11958 /* 11959 * Legacy: If the starting block number is one past the last block 11960 * in the partition, do not set B_ERROR in the buf. 11961 */ 11962 if (blocknum == nblocks) { 11963 goto error_exit; 11964 } 11965 11966 /* 11967 * Confirm that the first block of the request lies within the 11968 * partition limits. Also the requested number of bytes must be 11969 * a multiple of the system block size. 11970 */ 11971 if ((blocknum < 0) || (blocknum >= nblocks) || 11972 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11973 bp->b_flags |= B_ERROR; 11974 goto error_exit; 11975 } 11976 11977 /* 11978 * If the requsted # blocks exceeds the available # blocks, that 11979 * is an overrun of the partition. 11980 */ 11981 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11982 available_nblocks = (size_t)(nblocks - blocknum); 11983 ASSERT(nblocks >= blocknum); 11984 11985 if (requested_nblocks > available_nblocks) { 11986 /* 11987 * Allocate an "overrun" buf to allow the request to proceed 11988 * for the amount of space available in the partition. The 11989 * amount not transferred will be added into the b_resid 11990 * when the operation is complete. The overrun buf 11991 * replaces the original buf here, and the original buf 11992 * is saved inside the overrun buf, for later use. 11993 */ 11994 size_t resid = SD_SYSBLOCKS2BYTES(un, 11995 (offset_t)(requested_nblocks - available_nblocks)); 11996 size_t count = bp->b_bcount - resid; 11997 /* 11998 * Note: count is an unsigned entity thus it'll NEVER 11999 * be less than 0 so ASSERT the original values are 12000 * correct. 12001 */ 12002 ASSERT(bp->b_bcount >= resid); 12003 12004 bp = sd_bioclone_alloc(bp, count, blocknum, 12005 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12006 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12007 ASSERT(xp != NULL); 12008 } 12009 12010 /* At this point there should be no residual for this buf. */ 12011 ASSERT(bp->b_resid == 0); 12012 12013 /* Convert the block number to an absolute address. */ 12014 xp->xb_blkno += partition_offset; 12015 12016 SD_NEXT_IOSTART(index, un, bp); 12017 12018 SD_TRACE(SD_LOG_IO_PARTITION, un, 12019 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12020 12021 return; 12022 12023 error_exit: 12024 bp->b_resid = bp->b_bcount; 12025 SD_BEGIN_IODONE(index, un, bp); 12026 SD_TRACE(SD_LOG_IO_PARTITION, un, 12027 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12028 } 12029 12030 12031 /* 12032 * Function: sd_mapblockaddr_iodone 12033 * 12034 * Description: Completion-side processing for partition management. 12035 * 12036 * Context: May be called under interrupt context 12037 */ 12038 12039 static void 12040 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12041 { 12042 /* int partition; */ /* Not used, see below. */ 12043 ASSERT(un != NULL); 12044 ASSERT(bp != NULL); 12045 ASSERT(!mutex_owned(SD_MUTEX(un))); 12046 12047 SD_TRACE(SD_LOG_IO_PARTITION, un, 12048 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12049 12050 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12051 /* 12052 * We have an "overrun" buf to deal with... 12053 */ 12054 struct sd_xbuf *xp; 12055 struct buf *obp; /* ptr to the original buf */ 12056 12057 xp = SD_GET_XBUF(bp); 12058 ASSERT(xp != NULL); 12059 12060 /* Retrieve the pointer to the original buf */ 12061 obp = (struct buf *)xp->xb_private; 12062 ASSERT(obp != NULL); 12063 12064 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12065 bioerror(obp, bp->b_error); 12066 12067 sd_bioclone_free(bp); 12068 12069 /* 12070 * Get back the original buf. 12071 * Note that since the restoration of xb_blkno below 12072 * was removed, the sd_xbuf is not needed. 12073 */ 12074 bp = obp; 12075 /* 12076 * xp = SD_GET_XBUF(bp); 12077 * ASSERT(xp != NULL); 12078 */ 12079 } 12080 12081 /* 12082 * Convert sd->xb_blkno back to a minor-device relative value. 12083 * Note: this has been commented out, as it is not needed in the 12084 * current implementation of the driver (ie, since this function 12085 * is at the top of the layering chains, so the info will be 12086 * discarded) and it is in the "hot" IO path. 12087 * 12088 * partition = getminor(bp->b_edev) & SDPART_MASK; 12089 * xp->xb_blkno -= un->un_offset[partition]; 12090 */ 12091 12092 SD_NEXT_IODONE(index, un, bp); 12093 12094 SD_TRACE(SD_LOG_IO_PARTITION, un, 12095 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12096 } 12097 12098 12099 /* 12100 * Function: sd_mapblocksize_iostart 12101 * 12102 * Description: Convert between system block size (un->un_sys_blocksize) 12103 * and target block size (un->un_tgt_blocksize). 12104 * 12105 * Context: Can sleep to allocate resources. 12106 * 12107 * Assumptions: A higher layer has already performed any partition validation, 12108 * and converted the xp->xb_blkno to an absolute value relative 12109 * to the start of the device. 12110 * 12111 * It is also assumed that the higher layer has implemented 12112 * an "overrun" mechanism for the case where the request would 12113 * read/write beyond the end of a partition. In this case we 12114 * assume (and ASSERT) that bp->b_resid == 0. 12115 * 12116 * Note: The implementation for this routine assumes the target 12117 * block size remains constant between allocation and transport. 12118 */ 12119 12120 static void 12121 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12122 { 12123 struct sd_mapblocksize_info *bsp; 12124 struct sd_xbuf *xp; 12125 offset_t first_byte; 12126 daddr_t start_block, end_block; 12127 daddr_t request_bytes; 12128 ushort_t is_aligned = FALSE; 12129 12130 ASSERT(un != NULL); 12131 ASSERT(bp != NULL); 12132 ASSERT(!mutex_owned(SD_MUTEX(un))); 12133 ASSERT(bp->b_resid == 0); 12134 12135 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12136 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12137 12138 /* 12139 * For a non-writable CD, a write request is an error 12140 */ 12141 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12142 (un->un_f_mmc_writable_media == FALSE)) { 12143 bioerror(bp, EIO); 12144 bp->b_resid = bp->b_bcount; 12145 SD_BEGIN_IODONE(index, un, bp); 12146 return; 12147 } 12148 12149 /* 12150 * We do not need a shadow buf if the device is using 12151 * un->un_sys_blocksize as its block size or if bcount == 0. 12152 * In this case there is no layer-private data block allocated. 12153 */ 12154 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12155 (bp->b_bcount == 0)) { 12156 goto done; 12157 } 12158 12159 #if defined(__i386) || defined(__amd64) 12160 /* We do not support non-block-aligned transfers for ROD devices */ 12161 ASSERT(!ISROD(un)); 12162 #endif 12163 12164 xp = SD_GET_XBUF(bp); 12165 ASSERT(xp != NULL); 12166 12167 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12168 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12169 un->un_tgt_blocksize, un->un_sys_blocksize); 12170 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12171 "request start block:0x%x\n", xp->xb_blkno); 12172 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12173 "request len:0x%x\n", bp->b_bcount); 12174 12175 /* 12176 * Allocate the layer-private data area for the mapblocksize layer. 12177 * Layers are allowed to use the xp_private member of the sd_xbuf 12178 * struct to store the pointer to their layer-private data block, but 12179 * each layer also has the responsibility of restoring the prior 12180 * contents of xb_private before returning the buf/xbuf to the 12181 * higher layer that sent it. 12182 * 12183 * Here we save the prior contents of xp->xb_private into the 12184 * bsp->mbs_oprivate field of our layer-private data area. This value 12185 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12186 * the layer-private area and returning the buf/xbuf to the layer 12187 * that sent it. 12188 * 12189 * Note that here we use kmem_zalloc for the allocation as there are 12190 * parts of the mapblocksize code that expect certain fields to be 12191 * zero unless explicitly set to a required value. 12192 */ 12193 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12194 bsp->mbs_oprivate = xp->xb_private; 12195 xp->xb_private = bsp; 12196 12197 /* 12198 * This treats the data on the disk (target) as an array of bytes. 12199 * first_byte is the byte offset, from the beginning of the device, 12200 * to the location of the request. This is converted from a 12201 * un->un_sys_blocksize block address to a byte offset, and then back 12202 * to a block address based upon a un->un_tgt_blocksize block size. 12203 * 12204 * xp->xb_blkno should be absolute upon entry into this function, 12205 * but, but it is based upon partitions that use the "system" 12206 * block size. It must be adjusted to reflect the block size of 12207 * the target. 12208 * 12209 * Note that end_block is actually the block that follows the last 12210 * block of the request, but that's what is needed for the computation. 12211 */ 12212 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12213 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12214 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12215 un->un_tgt_blocksize; 12216 12217 /* request_bytes is rounded up to a multiple of the target block size */ 12218 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12219 12220 /* 12221 * See if the starting address of the request and the request 12222 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12223 * then we do not need to allocate a shadow buf to handle the request. 12224 */ 12225 if (((first_byte % un->un_tgt_blocksize) == 0) && 12226 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12227 is_aligned = TRUE; 12228 } 12229 12230 if ((bp->b_flags & B_READ) == 0) { 12231 /* 12232 * Lock the range for a write operation. An aligned request is 12233 * considered a simple write; otherwise the request must be a 12234 * read-modify-write. 12235 */ 12236 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12237 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12238 } 12239 12240 /* 12241 * Alloc a shadow buf if the request is not aligned. Also, this is 12242 * where the READ command is generated for a read-modify-write. (The 12243 * write phase is deferred until after the read completes.) 12244 */ 12245 if (is_aligned == FALSE) { 12246 12247 struct sd_mapblocksize_info *shadow_bsp; 12248 struct sd_xbuf *shadow_xp; 12249 struct buf *shadow_bp; 12250 12251 /* 12252 * Allocate the shadow buf and it associated xbuf. Note that 12253 * after this call the xb_blkno value in both the original 12254 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12255 * same: absolute relative to the start of the device, and 12256 * adjusted for the target block size. The b_blkno in the 12257 * shadow buf will also be set to this value. We should never 12258 * change b_blkno in the original bp however. 12259 * 12260 * Note also that the shadow buf will always need to be a 12261 * READ command, regardless of whether the incoming command 12262 * is a READ or a WRITE. 12263 */ 12264 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12265 xp->xb_blkno, 12266 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12267 12268 shadow_xp = SD_GET_XBUF(shadow_bp); 12269 12270 /* 12271 * Allocate the layer-private data for the shadow buf. 12272 * (No need to preserve xb_private in the shadow xbuf.) 12273 */ 12274 shadow_xp->xb_private = shadow_bsp = 12275 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12276 12277 /* 12278 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12279 * to figure out where the start of the user data is (based upon 12280 * the system block size) in the data returned by the READ 12281 * command (which will be based upon the target blocksize). Note 12282 * that this is only really used if the request is unaligned. 12283 */ 12284 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12285 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12286 ASSERT((bsp->mbs_copy_offset >= 0) && 12287 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12288 12289 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12290 12291 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12292 12293 /* Transfer the wmap (if any) to the shadow buf */ 12294 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12295 bsp->mbs_wmp = NULL; 12296 12297 /* 12298 * The shadow buf goes on from here in place of the 12299 * original buf. 12300 */ 12301 shadow_bsp->mbs_orig_bp = bp; 12302 bp = shadow_bp; 12303 } 12304 12305 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12306 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12307 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12308 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12309 request_bytes); 12310 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12311 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12312 12313 done: 12314 SD_NEXT_IOSTART(index, un, bp); 12315 12316 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12317 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12318 } 12319 12320 12321 /* 12322 * Function: sd_mapblocksize_iodone 12323 * 12324 * Description: Completion side processing for block-size mapping. 12325 * 12326 * Context: May be called under interrupt context 12327 */ 12328 12329 static void 12330 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12331 { 12332 struct sd_mapblocksize_info *bsp; 12333 struct sd_xbuf *xp; 12334 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12335 struct buf *orig_bp; /* ptr to the original buf */ 12336 offset_t shadow_end; 12337 offset_t request_end; 12338 offset_t shadow_start; 12339 ssize_t copy_offset; 12340 size_t copy_length; 12341 size_t shortfall; 12342 uint_t is_write; /* TRUE if this bp is a WRITE */ 12343 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12344 12345 ASSERT(un != NULL); 12346 ASSERT(bp != NULL); 12347 12348 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12349 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12350 12351 /* 12352 * There is no shadow buf or layer-private data if the target is 12353 * using un->un_sys_blocksize as its block size or if bcount == 0. 12354 */ 12355 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12356 (bp->b_bcount == 0)) { 12357 goto exit; 12358 } 12359 12360 xp = SD_GET_XBUF(bp); 12361 ASSERT(xp != NULL); 12362 12363 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12364 bsp = xp->xb_private; 12365 12366 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12367 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12368 12369 if (is_write) { 12370 /* 12371 * For a WRITE request we must free up the block range that 12372 * we have locked up. This holds regardless of whether this is 12373 * an aligned write request or a read-modify-write request. 12374 */ 12375 sd_range_unlock(un, bsp->mbs_wmp); 12376 bsp->mbs_wmp = NULL; 12377 } 12378 12379 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12380 /* 12381 * An aligned read or write command will have no shadow buf; 12382 * there is not much else to do with it. 12383 */ 12384 goto done; 12385 } 12386 12387 orig_bp = bsp->mbs_orig_bp; 12388 ASSERT(orig_bp != NULL); 12389 orig_xp = SD_GET_XBUF(orig_bp); 12390 ASSERT(orig_xp != NULL); 12391 ASSERT(!mutex_owned(SD_MUTEX(un))); 12392 12393 if (!is_write && has_wmap) { 12394 /* 12395 * A READ with a wmap means this is the READ phase of a 12396 * read-modify-write. If an error occurred on the READ then 12397 * we do not proceed with the WRITE phase or copy any data. 12398 * Just release the write maps and return with an error. 12399 */ 12400 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12401 orig_bp->b_resid = orig_bp->b_bcount; 12402 bioerror(orig_bp, bp->b_error); 12403 sd_range_unlock(un, bsp->mbs_wmp); 12404 goto freebuf_done; 12405 } 12406 } 12407 12408 /* 12409 * Here is where we set up to copy the data from the shadow buf 12410 * into the space associated with the original buf. 12411 * 12412 * To deal with the conversion between block sizes, these 12413 * computations treat the data as an array of bytes, with the 12414 * first byte (byte 0) corresponding to the first byte in the 12415 * first block on the disk. 12416 */ 12417 12418 /* 12419 * shadow_start and shadow_len indicate the location and size of 12420 * the data returned with the shadow IO request. 12421 */ 12422 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12423 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12424 12425 /* 12426 * copy_offset gives the offset (in bytes) from the start of the first 12427 * block of the READ request to the beginning of the data. We retrieve 12428 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12429 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12430 * data to be copied (in bytes). 12431 */ 12432 copy_offset = bsp->mbs_copy_offset; 12433 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12434 copy_length = orig_bp->b_bcount; 12435 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12436 12437 /* 12438 * Set up the resid and error fields of orig_bp as appropriate. 12439 */ 12440 if (shadow_end >= request_end) { 12441 /* We got all the requested data; set resid to zero */ 12442 orig_bp->b_resid = 0; 12443 } else { 12444 /* 12445 * We failed to get enough data to fully satisfy the original 12446 * request. Just copy back whatever data we got and set 12447 * up the residual and error code as required. 12448 * 12449 * 'shortfall' is the amount by which the data received with the 12450 * shadow buf has "fallen short" of the requested amount. 12451 */ 12452 shortfall = (size_t)(request_end - shadow_end); 12453 12454 if (shortfall > orig_bp->b_bcount) { 12455 /* 12456 * We did not get enough data to even partially 12457 * fulfill the original request. The residual is 12458 * equal to the amount requested. 12459 */ 12460 orig_bp->b_resid = orig_bp->b_bcount; 12461 } else { 12462 /* 12463 * We did not get all the data that we requested 12464 * from the device, but we will try to return what 12465 * portion we did get. 12466 */ 12467 orig_bp->b_resid = shortfall; 12468 } 12469 ASSERT(copy_length >= orig_bp->b_resid); 12470 copy_length -= orig_bp->b_resid; 12471 } 12472 12473 /* Propagate the error code from the shadow buf to the original buf */ 12474 bioerror(orig_bp, bp->b_error); 12475 12476 if (is_write) { 12477 goto freebuf_done; /* No data copying for a WRITE */ 12478 } 12479 12480 if (has_wmap) { 12481 /* 12482 * This is a READ command from the READ phase of a 12483 * read-modify-write request. We have to copy the data given 12484 * by the user OVER the data returned by the READ command, 12485 * then convert the command from a READ to a WRITE and send 12486 * it back to the target. 12487 */ 12488 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12489 copy_length); 12490 12491 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12492 12493 /* 12494 * Dispatch the WRITE command to the taskq thread, which 12495 * will in turn send the command to the target. When the 12496 * WRITE command completes, we (sd_mapblocksize_iodone()) 12497 * will get called again as part of the iodone chain 12498 * processing for it. Note that we will still be dealing 12499 * with the shadow buf at that point. 12500 */ 12501 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12502 KM_NOSLEEP) != 0) { 12503 /* 12504 * Dispatch was successful so we are done. Return 12505 * without going any higher up the iodone chain. Do 12506 * not free up any layer-private data until after the 12507 * WRITE completes. 12508 */ 12509 return; 12510 } 12511 12512 /* 12513 * Dispatch of the WRITE command failed; set up the error 12514 * condition and send this IO back up the iodone chain. 12515 */ 12516 bioerror(orig_bp, EIO); 12517 orig_bp->b_resid = orig_bp->b_bcount; 12518 12519 } else { 12520 /* 12521 * This is a regular READ request (ie, not a RMW). Copy the 12522 * data from the shadow buf into the original buf. The 12523 * copy_offset compensates for any "misalignment" between the 12524 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12525 * original buf (with its un->un_sys_blocksize blocks). 12526 */ 12527 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12528 copy_length); 12529 } 12530 12531 freebuf_done: 12532 12533 /* 12534 * At this point we still have both the shadow buf AND the original 12535 * buf to deal with, as well as the layer-private data area in each. 12536 * Local variables are as follows: 12537 * 12538 * bp -- points to shadow buf 12539 * xp -- points to xbuf of shadow buf 12540 * bsp -- points to layer-private data area of shadow buf 12541 * orig_bp -- points to original buf 12542 * 12543 * First free the shadow buf and its associated xbuf, then free the 12544 * layer-private data area from the shadow buf. There is no need to 12545 * restore xb_private in the shadow xbuf. 12546 */ 12547 sd_shadow_buf_free(bp); 12548 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12549 12550 /* 12551 * Now update the local variables to point to the original buf, xbuf, 12552 * and layer-private area. 12553 */ 12554 bp = orig_bp; 12555 xp = SD_GET_XBUF(bp); 12556 ASSERT(xp != NULL); 12557 ASSERT(xp == orig_xp); 12558 bsp = xp->xb_private; 12559 ASSERT(bsp != NULL); 12560 12561 done: 12562 /* 12563 * Restore xb_private to whatever it was set to by the next higher 12564 * layer in the chain, then free the layer-private data area. 12565 */ 12566 xp->xb_private = bsp->mbs_oprivate; 12567 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12568 12569 exit: 12570 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12571 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12572 12573 SD_NEXT_IODONE(index, un, bp); 12574 } 12575 12576 12577 /* 12578 * Function: sd_checksum_iostart 12579 * 12580 * Description: A stub function for a layer that's currently not used. 12581 * For now just a placeholder. 12582 * 12583 * Context: Kernel thread context 12584 */ 12585 12586 static void 12587 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12588 { 12589 ASSERT(un != NULL); 12590 ASSERT(bp != NULL); 12591 ASSERT(!mutex_owned(SD_MUTEX(un))); 12592 SD_NEXT_IOSTART(index, un, bp); 12593 } 12594 12595 12596 /* 12597 * Function: sd_checksum_iodone 12598 * 12599 * Description: A stub function for a layer that's currently not used. 12600 * For now just a placeholder. 12601 * 12602 * Context: May be called under interrupt context 12603 */ 12604 12605 static void 12606 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12607 { 12608 ASSERT(un != NULL); 12609 ASSERT(bp != NULL); 12610 ASSERT(!mutex_owned(SD_MUTEX(un))); 12611 SD_NEXT_IODONE(index, un, bp); 12612 } 12613 12614 12615 /* 12616 * Function: sd_checksum_uscsi_iostart 12617 * 12618 * Description: A stub function for a layer that's currently not used. 12619 * For now just a placeholder. 12620 * 12621 * Context: Kernel thread context 12622 */ 12623 12624 static void 12625 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12626 { 12627 ASSERT(un != NULL); 12628 ASSERT(bp != NULL); 12629 ASSERT(!mutex_owned(SD_MUTEX(un))); 12630 SD_NEXT_IOSTART(index, un, bp); 12631 } 12632 12633 12634 /* 12635 * Function: sd_checksum_uscsi_iodone 12636 * 12637 * Description: A stub function for a layer that's currently not used. 12638 * For now just a placeholder. 12639 * 12640 * Context: May be called under interrupt context 12641 */ 12642 12643 static void 12644 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12645 { 12646 ASSERT(un != NULL); 12647 ASSERT(bp != NULL); 12648 ASSERT(!mutex_owned(SD_MUTEX(un))); 12649 SD_NEXT_IODONE(index, un, bp); 12650 } 12651 12652 12653 /* 12654 * Function: sd_pm_iostart 12655 * 12656 * Description: iostart-side routine for Power mangement. 12657 * 12658 * Context: Kernel thread context 12659 */ 12660 12661 static void 12662 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12663 { 12664 ASSERT(un != NULL); 12665 ASSERT(bp != NULL); 12666 ASSERT(!mutex_owned(SD_MUTEX(un))); 12667 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12668 12669 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12670 12671 if (sd_pm_entry(un) != DDI_SUCCESS) { 12672 /* 12673 * Set up to return the failed buf back up the 'iodone' 12674 * side of the calling chain. 12675 */ 12676 bioerror(bp, EIO); 12677 bp->b_resid = bp->b_bcount; 12678 12679 SD_BEGIN_IODONE(index, un, bp); 12680 12681 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12682 return; 12683 } 12684 12685 SD_NEXT_IOSTART(index, un, bp); 12686 12687 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12688 } 12689 12690 12691 /* 12692 * Function: sd_pm_iodone 12693 * 12694 * Description: iodone-side routine for power mangement. 12695 * 12696 * Context: may be called from interrupt context 12697 */ 12698 12699 static void 12700 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12701 { 12702 ASSERT(un != NULL); 12703 ASSERT(bp != NULL); 12704 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12705 12706 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12707 12708 /* 12709 * After attach the following flag is only read, so don't 12710 * take the penalty of acquiring a mutex for it. 12711 */ 12712 if (un->un_f_pm_is_enabled == TRUE) { 12713 sd_pm_exit(un); 12714 } 12715 12716 SD_NEXT_IODONE(index, un, bp); 12717 12718 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12719 } 12720 12721 12722 /* 12723 * Function: sd_core_iostart 12724 * 12725 * Description: Primary driver function for enqueuing buf(9S) structs from 12726 * the system and initiating IO to the target device 12727 * 12728 * Context: Kernel thread context. Can sleep. 12729 * 12730 * Assumptions: - The given xp->xb_blkno is absolute 12731 * (ie, relative to the start of the device). 12732 * - The IO is to be done using the native blocksize of 12733 * the device, as specified in un->un_tgt_blocksize. 12734 */ 12735 /* ARGSUSED */ 12736 static void 12737 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12738 { 12739 struct sd_xbuf *xp; 12740 12741 ASSERT(un != NULL); 12742 ASSERT(bp != NULL); 12743 ASSERT(!mutex_owned(SD_MUTEX(un))); 12744 ASSERT(bp->b_resid == 0); 12745 12746 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12747 12748 xp = SD_GET_XBUF(bp); 12749 ASSERT(xp != NULL); 12750 12751 mutex_enter(SD_MUTEX(un)); 12752 12753 /* 12754 * If we are currently in the failfast state, fail any new IO 12755 * that has B_FAILFAST set, then return. 12756 */ 12757 if ((bp->b_flags & B_FAILFAST) && 12758 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12759 mutex_exit(SD_MUTEX(un)); 12760 bioerror(bp, EIO); 12761 bp->b_resid = bp->b_bcount; 12762 SD_BEGIN_IODONE(index, un, bp); 12763 return; 12764 } 12765 12766 if (SD_IS_DIRECT_PRIORITY(xp)) { 12767 /* 12768 * Priority command -- transport it immediately. 12769 * 12770 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12771 * because all direct priority commands should be associated 12772 * with error recovery actions which we don't want to retry. 12773 */ 12774 sd_start_cmds(un, bp); 12775 } else { 12776 /* 12777 * Normal command -- add it to the wait queue, then start 12778 * transporting commands from the wait queue. 12779 */ 12780 sd_add_buf_to_waitq(un, bp); 12781 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12782 sd_start_cmds(un, NULL); 12783 } 12784 12785 mutex_exit(SD_MUTEX(un)); 12786 12787 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12788 } 12789 12790 12791 /* 12792 * Function: sd_init_cdb_limits 12793 * 12794 * Description: This is to handle scsi_pkt initialization differences 12795 * between the driver platforms. 12796 * 12797 * Legacy behaviors: 12798 * 12799 * If the block number or the sector count exceeds the 12800 * capabilities of a Group 0 command, shift over to a 12801 * Group 1 command. We don't blindly use Group 1 12802 * commands because a) some drives (CDC Wren IVs) get a 12803 * bit confused, and b) there is probably a fair amount 12804 * of speed difference for a target to receive and decode 12805 * a 10 byte command instead of a 6 byte command. 12806 * 12807 * The xfer time difference of 6 vs 10 byte CDBs is 12808 * still significant so this code is still worthwhile. 12809 * 10 byte CDBs are very inefficient with the fas HBA driver 12810 * and older disks. Each CDB byte took 1 usec with some 12811 * popular disks. 12812 * 12813 * Context: Must be called at attach time 12814 */ 12815 12816 static void 12817 sd_init_cdb_limits(struct sd_lun *un) 12818 { 12819 /* 12820 * Use CDB_GROUP1 commands for most devices except for 12821 * parallel SCSI fixed drives in which case we get better 12822 * performance using CDB_GROUP0 commands (where applicable). 12823 */ 12824 un->un_mincdb = SD_CDB_GROUP1; 12825 #if !defined(__fibre) 12826 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12827 !ISREMOVABLE(un)) { 12828 un->un_mincdb = SD_CDB_GROUP0; 12829 } 12830 #endif 12831 12832 /* 12833 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12834 * commands for fixed disks unless we are building for a 32 bit 12835 * kernel. 12836 */ 12837 #ifdef _LP64 12838 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP4; 12839 #else 12840 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP1; 12841 #endif 12842 12843 /* 12844 * x86 systems require the PKT_DMA_PARTIAL flag 12845 */ 12846 #if defined(__x86) 12847 un->un_pkt_flags = PKT_DMA_PARTIAL; 12848 #else 12849 un->un_pkt_flags = 0; 12850 #endif 12851 12852 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12853 ? sizeof (struct scsi_arq_status) : 1); 12854 un->un_cmd_timeout = (ushort_t)sd_io_time; 12855 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12856 } 12857 12858 12859 /* 12860 * Function: sd_initpkt_for_buf 12861 * 12862 * Description: Allocate and initialize for transport a scsi_pkt struct, 12863 * based upon the info specified in the given buf struct. 12864 * 12865 * Assumes the xb_blkno in the request is absolute (ie, 12866 * relative to the start of the device (NOT partition!). 12867 * Also assumes that the request is using the native block 12868 * size of the device (as returned by the READ CAPACITY 12869 * command). 12870 * 12871 * Return Code: SD_PKT_ALLOC_SUCCESS 12872 * SD_PKT_ALLOC_FAILURE 12873 * SD_PKT_ALLOC_FAILURE_NO_DMA 12874 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12875 * 12876 * Context: Kernel thread and may be called from software interrupt context 12877 * as part of a sdrunout callback. This function may not block or 12878 * call routines that block 12879 */ 12880 12881 static int 12882 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12883 { 12884 struct sd_xbuf *xp; 12885 struct scsi_pkt *pktp = NULL; 12886 struct sd_lun *un; 12887 size_t blockcount; 12888 daddr_t startblock; 12889 int rval; 12890 int cmd_flags; 12891 12892 ASSERT(bp != NULL); 12893 ASSERT(pktpp != NULL); 12894 xp = SD_GET_XBUF(bp); 12895 ASSERT(xp != NULL); 12896 un = SD_GET_UN(bp); 12897 ASSERT(un != NULL); 12898 ASSERT(mutex_owned(SD_MUTEX(un))); 12899 ASSERT(bp->b_resid == 0); 12900 12901 SD_TRACE(SD_LOG_IO_CORE, un, 12902 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12903 12904 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12905 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12906 /* 12907 * Already have a scsi_pkt -- just need DMA resources. 12908 * We must recompute the CDB in case the mapping returns 12909 * a nonzero pkt_resid. 12910 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12911 * that is being retried, the unmap/remap of the DMA resouces 12912 * will result in the entire transfer starting over again 12913 * from the very first block. 12914 */ 12915 ASSERT(xp->xb_pktp != NULL); 12916 pktp = xp->xb_pktp; 12917 } else { 12918 pktp = NULL; 12919 } 12920 #endif /* __i386 || __amd64 */ 12921 12922 startblock = xp->xb_blkno; /* Absolute block num. */ 12923 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12924 12925 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12926 12927 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12928 12929 #else 12930 12931 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 12932 12933 #endif 12934 12935 /* 12936 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12937 * call scsi_init_pkt, and build the CDB. 12938 */ 12939 rval = sd_setup_rw_pkt(un, &pktp, bp, 12940 cmd_flags, sdrunout, (caddr_t)un, 12941 startblock, blockcount); 12942 12943 if (rval == 0) { 12944 /* 12945 * Success. 12946 * 12947 * If partial DMA is being used and required for this transfer. 12948 * set it up here. 12949 */ 12950 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12951 (pktp->pkt_resid != 0)) { 12952 12953 /* 12954 * Save the CDB length and pkt_resid for the 12955 * next xfer 12956 */ 12957 xp->xb_dma_resid = pktp->pkt_resid; 12958 12959 /* rezero resid */ 12960 pktp->pkt_resid = 0; 12961 12962 } else { 12963 xp->xb_dma_resid = 0; 12964 } 12965 12966 pktp->pkt_flags = un->un_tagflags; 12967 pktp->pkt_time = un->un_cmd_timeout; 12968 pktp->pkt_comp = sdintr; 12969 12970 pktp->pkt_private = bp; 12971 *pktpp = pktp; 12972 12973 SD_TRACE(SD_LOG_IO_CORE, un, 12974 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12975 12976 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12977 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12978 #endif 12979 12980 return (SD_PKT_ALLOC_SUCCESS); 12981 12982 } 12983 12984 /* 12985 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12986 * from sd_setup_rw_pkt. 12987 */ 12988 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12989 12990 if (rval == SD_PKT_ALLOC_FAILURE) { 12991 *pktpp = NULL; 12992 /* 12993 * Set the driver state to RWAIT to indicate the driver 12994 * is waiting on resource allocations. The driver will not 12995 * suspend, pm_suspend, or detatch while the state is RWAIT. 12996 */ 12997 New_state(un, SD_STATE_RWAIT); 12998 12999 SD_ERROR(SD_LOG_IO_CORE, un, 13000 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13001 13002 if ((bp->b_flags & B_ERROR) != 0) { 13003 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13004 } 13005 return (SD_PKT_ALLOC_FAILURE); 13006 } else { 13007 /* 13008 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13009 * 13010 * This should never happen. Maybe someone messed with the 13011 * kernel's minphys? 13012 */ 13013 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13014 "Request rejected: too large for CDB: " 13015 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13016 SD_ERROR(SD_LOG_IO_CORE, un, 13017 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13018 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13019 13020 } 13021 } 13022 13023 13024 /* 13025 * Function: sd_destroypkt_for_buf 13026 * 13027 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13028 * 13029 * Context: Kernel thread or interrupt context 13030 */ 13031 13032 static void 13033 sd_destroypkt_for_buf(struct buf *bp) 13034 { 13035 ASSERT(bp != NULL); 13036 ASSERT(SD_GET_UN(bp) != NULL); 13037 13038 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13039 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13040 13041 ASSERT(SD_GET_PKTP(bp) != NULL); 13042 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13043 13044 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13045 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13046 } 13047 13048 /* 13049 * Function: sd_setup_rw_pkt 13050 * 13051 * Description: Determines appropriate CDB group for the requested LBA 13052 * and transfer length, calls scsi_init_pkt, and builds 13053 * the CDB. Do not use for partial DMA transfers except 13054 * for the initial transfer since the CDB size must 13055 * remain constant. 13056 * 13057 * Context: Kernel thread and may be called from software interrupt 13058 * context as part of a sdrunout callback. This function may not 13059 * block or call routines that block 13060 */ 13061 13062 13063 int 13064 sd_setup_rw_pkt(struct sd_lun *un, 13065 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13066 int (*callback)(caddr_t), caddr_t callback_arg, 13067 diskaddr_t lba, uint32_t blockcount) 13068 { 13069 struct scsi_pkt *return_pktp; 13070 union scsi_cdb *cdbp; 13071 struct sd_cdbinfo *cp = NULL; 13072 int i; 13073 13074 /* 13075 * See which size CDB to use, based upon the request. 13076 */ 13077 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13078 13079 /* 13080 * Check lba and block count against sd_cdbtab limits. 13081 * In the partial DMA case, we have to use the same size 13082 * CDB for all the transfers. Check lba + blockcount 13083 * against the max LBA so we know that segment of the 13084 * transfer can use the CDB we select. 13085 */ 13086 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13087 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13088 13089 /* 13090 * The command will fit into the CDB type 13091 * specified by sd_cdbtab[i]. 13092 */ 13093 cp = sd_cdbtab + i; 13094 13095 /* 13096 * Call scsi_init_pkt so we can fill in the 13097 * CDB. 13098 */ 13099 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13100 bp, cp->sc_grpcode, un->un_status_len, 0, 13101 flags, callback, callback_arg); 13102 13103 if (return_pktp != NULL) { 13104 13105 /* 13106 * Return new value of pkt 13107 */ 13108 *pktpp = return_pktp; 13109 13110 /* 13111 * To be safe, zero the CDB insuring there is 13112 * no leftover data from a previous command. 13113 */ 13114 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13115 13116 /* 13117 * Handle partial DMA mapping 13118 */ 13119 if (return_pktp->pkt_resid != 0) { 13120 13121 /* 13122 * Not going to xfer as many blocks as 13123 * originally expected 13124 */ 13125 blockcount -= 13126 SD_BYTES2TGTBLOCKS(un, 13127 return_pktp->pkt_resid); 13128 } 13129 13130 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13131 13132 /* 13133 * Set command byte based on the CDB 13134 * type we matched. 13135 */ 13136 cdbp->scc_cmd = cp->sc_grpmask | 13137 ((bp->b_flags & B_READ) ? 13138 SCMD_READ : SCMD_WRITE); 13139 13140 SD_FILL_SCSI1_LUN(un, return_pktp); 13141 13142 /* 13143 * Fill in LBA and length 13144 */ 13145 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13146 (cp->sc_grpcode == CDB_GROUP4) || 13147 (cp->sc_grpcode == CDB_GROUP0) || 13148 (cp->sc_grpcode == CDB_GROUP5)); 13149 13150 if (cp->sc_grpcode == CDB_GROUP1) { 13151 FORMG1ADDR(cdbp, lba); 13152 FORMG1COUNT(cdbp, blockcount); 13153 return (0); 13154 } else if (cp->sc_grpcode == CDB_GROUP4) { 13155 FORMG4LONGADDR(cdbp, lba); 13156 FORMG4COUNT(cdbp, blockcount); 13157 return (0); 13158 } else if (cp->sc_grpcode == CDB_GROUP0) { 13159 FORMG0ADDR(cdbp, lba); 13160 FORMG0COUNT(cdbp, blockcount); 13161 return (0); 13162 } else if (cp->sc_grpcode == CDB_GROUP5) { 13163 FORMG5ADDR(cdbp, lba); 13164 FORMG5COUNT(cdbp, blockcount); 13165 return (0); 13166 } 13167 13168 /* 13169 * It should be impossible to not match one 13170 * of the CDB types above, so we should never 13171 * reach this point. Set the CDB command byte 13172 * to test-unit-ready to avoid writing 13173 * to somewhere we don't intend. 13174 */ 13175 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13176 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13177 } else { 13178 /* 13179 * Couldn't get scsi_pkt 13180 */ 13181 return (SD_PKT_ALLOC_FAILURE); 13182 } 13183 } 13184 } 13185 13186 /* 13187 * None of the available CDB types were suitable. This really 13188 * should never happen: on a 64 bit system we support 13189 * READ16/WRITE16 which will hold an entire 64 bit disk address 13190 * and on a 32 bit system we will refuse to bind to a device 13191 * larger than 2TB so addresses will never be larger than 32 bits. 13192 */ 13193 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13194 } 13195 13196 /* 13197 * Function: sd_setup_next_rw_pkt 13198 * 13199 * Description: Setup packet for partial DMA transfers, except for the 13200 * initial transfer. sd_setup_rw_pkt should be used for 13201 * the initial transfer. 13202 * 13203 * Context: Kernel thread and may be called from interrupt context. 13204 */ 13205 13206 int 13207 sd_setup_next_rw_pkt(struct sd_lun *un, 13208 struct scsi_pkt *pktp, struct buf *bp, 13209 diskaddr_t lba, uint32_t blockcount) 13210 { 13211 uchar_t com; 13212 union scsi_cdb *cdbp; 13213 uchar_t cdb_group_id; 13214 13215 ASSERT(pktp != NULL); 13216 ASSERT(pktp->pkt_cdbp != NULL); 13217 13218 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13219 com = cdbp->scc_cmd; 13220 cdb_group_id = CDB_GROUPID(com); 13221 13222 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13223 (cdb_group_id == CDB_GROUPID_1) || 13224 (cdb_group_id == CDB_GROUPID_4) || 13225 (cdb_group_id == CDB_GROUPID_5)); 13226 13227 /* 13228 * Move pkt to the next portion of the xfer. 13229 * func is NULL_FUNC so we do not have to release 13230 * the disk mutex here. 13231 */ 13232 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13233 NULL_FUNC, NULL) == pktp) { 13234 /* Success. Handle partial DMA */ 13235 if (pktp->pkt_resid != 0) { 13236 blockcount -= 13237 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13238 } 13239 13240 cdbp->scc_cmd = com; 13241 SD_FILL_SCSI1_LUN(un, pktp); 13242 if (cdb_group_id == CDB_GROUPID_1) { 13243 FORMG1ADDR(cdbp, lba); 13244 FORMG1COUNT(cdbp, blockcount); 13245 return (0); 13246 } else if (cdb_group_id == CDB_GROUPID_4) { 13247 FORMG4LONGADDR(cdbp, lba); 13248 FORMG4COUNT(cdbp, blockcount); 13249 return (0); 13250 } else if (cdb_group_id == CDB_GROUPID_0) { 13251 FORMG0ADDR(cdbp, lba); 13252 FORMG0COUNT(cdbp, blockcount); 13253 return (0); 13254 } else if (cdb_group_id == CDB_GROUPID_5) { 13255 FORMG5ADDR(cdbp, lba); 13256 FORMG5COUNT(cdbp, blockcount); 13257 return (0); 13258 } 13259 13260 /* Unreachable */ 13261 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13262 } 13263 13264 /* 13265 * Error setting up next portion of cmd transfer. 13266 * Something is definitely very wrong and this 13267 * should not happen. 13268 */ 13269 return (SD_PKT_ALLOC_FAILURE); 13270 } 13271 13272 /* 13273 * Function: sd_initpkt_for_uscsi 13274 * 13275 * Description: Allocate and initialize for transport a scsi_pkt struct, 13276 * based upon the info specified in the given uscsi_cmd struct. 13277 * 13278 * Return Code: SD_PKT_ALLOC_SUCCESS 13279 * SD_PKT_ALLOC_FAILURE 13280 * SD_PKT_ALLOC_FAILURE_NO_DMA 13281 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13282 * 13283 * Context: Kernel thread and may be called from software interrupt context 13284 * as part of a sdrunout callback. This function may not block or 13285 * call routines that block 13286 */ 13287 13288 static int 13289 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13290 { 13291 struct uscsi_cmd *uscmd; 13292 struct sd_xbuf *xp; 13293 struct scsi_pkt *pktp; 13294 struct sd_lun *un; 13295 uint32_t flags = 0; 13296 13297 ASSERT(bp != NULL); 13298 ASSERT(pktpp != NULL); 13299 xp = SD_GET_XBUF(bp); 13300 ASSERT(xp != NULL); 13301 un = SD_GET_UN(bp); 13302 ASSERT(un != NULL); 13303 ASSERT(mutex_owned(SD_MUTEX(un))); 13304 13305 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13306 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13307 ASSERT(uscmd != NULL); 13308 13309 SD_TRACE(SD_LOG_IO_CORE, un, 13310 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13311 13312 /* Allocate the scsi_pkt for the command. */ 13313 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13314 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13315 sizeof (struct scsi_arq_status), 0, un->un_pkt_flags, 13316 sdrunout, (caddr_t)un); 13317 13318 if (pktp == NULL) { 13319 *pktpp = NULL; 13320 /* 13321 * Set the driver state to RWAIT to indicate the driver 13322 * is waiting on resource allocations. The driver will not 13323 * suspend, pm_suspend, or detatch while the state is RWAIT. 13324 */ 13325 New_state(un, SD_STATE_RWAIT); 13326 13327 SD_ERROR(SD_LOG_IO_CORE, un, 13328 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13329 13330 if ((bp->b_flags & B_ERROR) != 0) { 13331 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13332 } 13333 return (SD_PKT_ALLOC_FAILURE); 13334 } 13335 13336 /* 13337 * We do not do DMA breakup for USCSI commands, so return failure 13338 * here if all the needed DMA resources were not allocated. 13339 */ 13340 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13341 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13342 scsi_destroy_pkt(pktp); 13343 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13344 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13345 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13346 } 13347 13348 /* Init the cdb from the given uscsi struct */ 13349 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13350 uscmd->uscsi_cdb[0], 0, 0, 0); 13351 13352 SD_FILL_SCSI1_LUN(un, pktp); 13353 13354 /* 13355 * Set up the optional USCSI flags. See the uscsi (7I) man page 13356 * for listing of the supported flags. 13357 */ 13358 13359 if (uscmd->uscsi_flags & USCSI_SILENT) { 13360 flags |= FLAG_SILENT; 13361 } 13362 13363 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13364 flags |= FLAG_DIAGNOSE; 13365 } 13366 13367 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13368 flags |= FLAG_ISOLATE; 13369 } 13370 13371 if (un->un_f_is_fibre == FALSE) { 13372 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13373 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13374 } 13375 } 13376 13377 /* 13378 * Set the pkt flags here so we save time later. 13379 * Note: These flags are NOT in the uscsi man page!!! 13380 */ 13381 if (uscmd->uscsi_flags & USCSI_HEAD) { 13382 flags |= FLAG_HEAD; 13383 } 13384 13385 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13386 flags |= FLAG_NOINTR; 13387 } 13388 13389 /* 13390 * For tagged queueing, things get a bit complicated. 13391 * Check first for head of queue and last for ordered queue. 13392 * If neither head nor order, use the default driver tag flags. 13393 */ 13394 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13395 if (uscmd->uscsi_flags & USCSI_HTAG) { 13396 flags |= FLAG_HTAG; 13397 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13398 flags |= FLAG_OTAG; 13399 } else { 13400 flags |= un->un_tagflags & FLAG_TAGMASK; 13401 } 13402 } 13403 13404 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13405 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13406 } 13407 13408 pktp->pkt_flags = flags; 13409 13410 /* Copy the caller's CDB into the pkt... */ 13411 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13412 13413 if (uscmd->uscsi_timeout == 0) { 13414 pktp->pkt_time = un->un_uscsi_timeout; 13415 } else { 13416 pktp->pkt_time = uscmd->uscsi_timeout; 13417 } 13418 13419 /* need it later to identify USCSI request in sdintr */ 13420 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13421 13422 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13423 13424 pktp->pkt_private = bp; 13425 pktp->pkt_comp = sdintr; 13426 *pktpp = pktp; 13427 13428 SD_TRACE(SD_LOG_IO_CORE, un, 13429 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13430 13431 return (SD_PKT_ALLOC_SUCCESS); 13432 } 13433 13434 13435 /* 13436 * Function: sd_destroypkt_for_uscsi 13437 * 13438 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13439 * IOs.. Also saves relevant info into the associated uscsi_cmd 13440 * struct. 13441 * 13442 * Context: May be called under interrupt context 13443 */ 13444 13445 static void 13446 sd_destroypkt_for_uscsi(struct buf *bp) 13447 { 13448 struct uscsi_cmd *uscmd; 13449 struct sd_xbuf *xp; 13450 struct scsi_pkt *pktp; 13451 struct sd_lun *un; 13452 13453 ASSERT(bp != NULL); 13454 xp = SD_GET_XBUF(bp); 13455 ASSERT(xp != NULL); 13456 un = SD_GET_UN(bp); 13457 ASSERT(un != NULL); 13458 ASSERT(!mutex_owned(SD_MUTEX(un))); 13459 pktp = SD_GET_PKTP(bp); 13460 ASSERT(pktp != NULL); 13461 13462 SD_TRACE(SD_LOG_IO_CORE, un, 13463 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13464 13465 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13466 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13467 ASSERT(uscmd != NULL); 13468 13469 /* Save the status and the residual into the uscsi_cmd struct */ 13470 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13471 uscmd->uscsi_resid = bp->b_resid; 13472 13473 /* 13474 * If enabled, copy any saved sense data into the area specified 13475 * by the uscsi command. 13476 */ 13477 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13478 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13479 /* 13480 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13481 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13482 */ 13483 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13484 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13485 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 13486 } 13487 13488 /* We are done with the scsi_pkt; free it now */ 13489 ASSERT(SD_GET_PKTP(bp) != NULL); 13490 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13491 13492 SD_TRACE(SD_LOG_IO_CORE, un, 13493 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13494 } 13495 13496 13497 /* 13498 * Function: sd_bioclone_alloc 13499 * 13500 * Description: Allocate a buf(9S) and init it as per the given buf 13501 * and the various arguments. The associated sd_xbuf 13502 * struct is (nearly) duplicated. The struct buf *bp 13503 * argument is saved in new_xp->xb_private. 13504 * 13505 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13506 * datalen - size of data area for the shadow bp 13507 * blkno - starting LBA 13508 * func - function pointer for b_iodone in the shadow buf. (May 13509 * be NULL if none.) 13510 * 13511 * Return Code: Pointer to allocates buf(9S) struct 13512 * 13513 * Context: Can sleep. 13514 */ 13515 13516 static struct buf * 13517 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13518 daddr_t blkno, int (*func)(struct buf *)) 13519 { 13520 struct sd_lun *un; 13521 struct sd_xbuf *xp; 13522 struct sd_xbuf *new_xp; 13523 struct buf *new_bp; 13524 13525 ASSERT(bp != NULL); 13526 xp = SD_GET_XBUF(bp); 13527 ASSERT(xp != NULL); 13528 un = SD_GET_UN(bp); 13529 ASSERT(un != NULL); 13530 ASSERT(!mutex_owned(SD_MUTEX(un))); 13531 13532 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13533 NULL, KM_SLEEP); 13534 13535 new_bp->b_lblkno = blkno; 13536 13537 /* 13538 * Allocate an xbuf for the shadow bp and copy the contents of the 13539 * original xbuf into it. 13540 */ 13541 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13542 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13543 13544 /* 13545 * The given bp is automatically saved in the xb_private member 13546 * of the new xbuf. Callers are allowed to depend on this. 13547 */ 13548 new_xp->xb_private = bp; 13549 13550 new_bp->b_private = new_xp; 13551 13552 return (new_bp); 13553 } 13554 13555 /* 13556 * Function: sd_shadow_buf_alloc 13557 * 13558 * Description: Allocate a buf(9S) and init it as per the given buf 13559 * and the various arguments. The associated sd_xbuf 13560 * struct is (nearly) duplicated. The struct buf *bp 13561 * argument is saved in new_xp->xb_private. 13562 * 13563 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13564 * datalen - size of data area for the shadow bp 13565 * bflags - B_READ or B_WRITE (pseudo flag) 13566 * blkno - starting LBA 13567 * func - function pointer for b_iodone in the shadow buf. (May 13568 * be NULL if none.) 13569 * 13570 * Return Code: Pointer to allocates buf(9S) struct 13571 * 13572 * Context: Can sleep. 13573 */ 13574 13575 static struct buf * 13576 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13577 daddr_t blkno, int (*func)(struct buf *)) 13578 { 13579 struct sd_lun *un; 13580 struct sd_xbuf *xp; 13581 struct sd_xbuf *new_xp; 13582 struct buf *new_bp; 13583 13584 ASSERT(bp != NULL); 13585 xp = SD_GET_XBUF(bp); 13586 ASSERT(xp != NULL); 13587 un = SD_GET_UN(bp); 13588 ASSERT(un != NULL); 13589 ASSERT(!mutex_owned(SD_MUTEX(un))); 13590 13591 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13592 bp_mapin(bp); 13593 } 13594 13595 bflags &= (B_READ | B_WRITE); 13596 #if defined(__i386) || defined(__amd64) 13597 new_bp = getrbuf(KM_SLEEP); 13598 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13599 new_bp->b_bcount = datalen; 13600 new_bp->b_flags = bp->b_flags | bflags; 13601 #else 13602 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13603 datalen, bflags, SLEEP_FUNC, NULL); 13604 #endif 13605 new_bp->av_forw = NULL; 13606 new_bp->av_back = NULL; 13607 new_bp->b_dev = bp->b_dev; 13608 new_bp->b_blkno = blkno; 13609 new_bp->b_iodone = func; 13610 new_bp->b_edev = bp->b_edev; 13611 new_bp->b_resid = 0; 13612 13613 /* We need to preserve the B_FAILFAST flag */ 13614 if (bp->b_flags & B_FAILFAST) { 13615 new_bp->b_flags |= B_FAILFAST; 13616 } 13617 13618 /* 13619 * Allocate an xbuf for the shadow bp and copy the contents of the 13620 * original xbuf into it. 13621 */ 13622 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13623 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13624 13625 /* Need later to copy data between the shadow buf & original buf! */ 13626 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13627 13628 /* 13629 * The given bp is automatically saved in the xb_private member 13630 * of the new xbuf. Callers are allowed to depend on this. 13631 */ 13632 new_xp->xb_private = bp; 13633 13634 new_bp->b_private = new_xp; 13635 13636 return (new_bp); 13637 } 13638 13639 /* 13640 * Function: sd_bioclone_free 13641 * 13642 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13643 * in the larger than partition operation. 13644 * 13645 * Context: May be called under interrupt context 13646 */ 13647 13648 static void 13649 sd_bioclone_free(struct buf *bp) 13650 { 13651 struct sd_xbuf *xp; 13652 13653 ASSERT(bp != NULL); 13654 xp = SD_GET_XBUF(bp); 13655 ASSERT(xp != NULL); 13656 13657 /* 13658 * Call bp_mapout() before freeing the buf, in case a lower 13659 * layer or HBA had done a bp_mapin(). we must do this here 13660 * as we are the "originator" of the shadow buf. 13661 */ 13662 bp_mapout(bp); 13663 13664 /* 13665 * Null out b_iodone before freeing the bp, to ensure that the driver 13666 * never gets confused by a stale value in this field. (Just a little 13667 * extra defensiveness here.) 13668 */ 13669 bp->b_iodone = NULL; 13670 13671 freerbuf(bp); 13672 13673 kmem_free(xp, sizeof (struct sd_xbuf)); 13674 } 13675 13676 /* 13677 * Function: sd_shadow_buf_free 13678 * 13679 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13680 * 13681 * Context: May be called under interrupt context 13682 */ 13683 13684 static void 13685 sd_shadow_buf_free(struct buf *bp) 13686 { 13687 struct sd_xbuf *xp; 13688 13689 ASSERT(bp != NULL); 13690 xp = SD_GET_XBUF(bp); 13691 ASSERT(xp != NULL); 13692 13693 #if defined(__sparc) 13694 /* 13695 * Call bp_mapout() before freeing the buf, in case a lower 13696 * layer or HBA had done a bp_mapin(). we must do this here 13697 * as we are the "originator" of the shadow buf. 13698 */ 13699 bp_mapout(bp); 13700 #endif 13701 13702 /* 13703 * Null out b_iodone before freeing the bp, to ensure that the driver 13704 * never gets confused by a stale value in this field. (Just a little 13705 * extra defensiveness here.) 13706 */ 13707 bp->b_iodone = NULL; 13708 13709 #if defined(__i386) || defined(__amd64) 13710 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13711 freerbuf(bp); 13712 #else 13713 scsi_free_consistent_buf(bp); 13714 #endif 13715 13716 kmem_free(xp, sizeof (struct sd_xbuf)); 13717 } 13718 13719 13720 /* 13721 * Function: sd_print_transport_rejected_message 13722 * 13723 * Description: This implements the ludicrously complex rules for printing 13724 * a "transport rejected" message. This is to address the 13725 * specific problem of having a flood of this error message 13726 * produced when a failover occurs. 13727 * 13728 * Context: Any. 13729 */ 13730 13731 static void 13732 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13733 int code) 13734 { 13735 ASSERT(un != NULL); 13736 ASSERT(mutex_owned(SD_MUTEX(un))); 13737 ASSERT(xp != NULL); 13738 13739 /* 13740 * Print the "transport rejected" message under the following 13741 * conditions: 13742 * 13743 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13744 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13745 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13746 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13747 * scsi_transport(9F) (which indicates that the target might have 13748 * gone off-line). This uses the un->un_tran_fatal_count 13749 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13750 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13751 * from scsi_transport(). 13752 * 13753 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13754 * the preceeding cases in order for the message to be printed. 13755 */ 13756 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13757 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13758 (code != TRAN_FATAL_ERROR) || 13759 (un->un_tran_fatal_count == 1)) { 13760 switch (code) { 13761 case TRAN_BADPKT: 13762 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13763 "transport rejected bad packet\n"); 13764 break; 13765 case TRAN_FATAL_ERROR: 13766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13767 "transport rejected fatal error\n"); 13768 break; 13769 default: 13770 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13771 "transport rejected (%d)\n", code); 13772 break; 13773 } 13774 } 13775 } 13776 } 13777 13778 13779 /* 13780 * Function: sd_add_buf_to_waitq 13781 * 13782 * Description: Add the given buf(9S) struct to the wait queue for the 13783 * instance. If sorting is enabled, then the buf is added 13784 * to the queue via an elevator sort algorithm (a la 13785 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13786 * If sorting is not enabled, then the buf is just added 13787 * to the end of the wait queue. 13788 * 13789 * Return Code: void 13790 * 13791 * Context: Does not sleep/block, therefore technically can be called 13792 * from any context. However if sorting is enabled then the 13793 * execution time is indeterminate, and may take long if 13794 * the wait queue grows large. 13795 */ 13796 13797 static void 13798 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13799 { 13800 struct buf *ap; 13801 13802 ASSERT(bp != NULL); 13803 ASSERT(un != NULL); 13804 ASSERT(mutex_owned(SD_MUTEX(un))); 13805 13806 /* If the queue is empty, add the buf as the only entry & return. */ 13807 if (un->un_waitq_headp == NULL) { 13808 ASSERT(un->un_waitq_tailp == NULL); 13809 un->un_waitq_headp = un->un_waitq_tailp = bp; 13810 bp->av_forw = NULL; 13811 return; 13812 } 13813 13814 ASSERT(un->un_waitq_tailp != NULL); 13815 13816 /* 13817 * If sorting is disabled, just add the buf to the tail end of 13818 * the wait queue and return. 13819 */ 13820 if (un->un_f_disksort_disabled) { 13821 un->un_waitq_tailp->av_forw = bp; 13822 un->un_waitq_tailp = bp; 13823 bp->av_forw = NULL; 13824 return; 13825 } 13826 13827 /* 13828 * Sort thru the list of requests currently on the wait queue 13829 * and add the new buf request at the appropriate position. 13830 * 13831 * The un->un_waitq_headp is an activity chain pointer on which 13832 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13833 * first queue holds those requests which are positioned after 13834 * the current SD_GET_BLKNO() (in the first request); the second holds 13835 * requests which came in after their SD_GET_BLKNO() number was passed. 13836 * Thus we implement a one way scan, retracting after reaching 13837 * the end of the drive to the first request on the second 13838 * queue, at which time it becomes the first queue. 13839 * A one-way scan is natural because of the way UNIX read-ahead 13840 * blocks are allocated. 13841 * 13842 * If we lie after the first request, then we must locate the 13843 * second request list and add ourselves to it. 13844 */ 13845 ap = un->un_waitq_headp; 13846 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13847 while (ap->av_forw != NULL) { 13848 /* 13849 * Look for an "inversion" in the (normally 13850 * ascending) block numbers. This indicates 13851 * the start of the second request list. 13852 */ 13853 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13854 /* 13855 * Search the second request list for the 13856 * first request at a larger block number. 13857 * We go before that; however if there is 13858 * no such request, we go at the end. 13859 */ 13860 do { 13861 if (SD_GET_BLKNO(bp) < 13862 SD_GET_BLKNO(ap->av_forw)) { 13863 goto insert; 13864 } 13865 ap = ap->av_forw; 13866 } while (ap->av_forw != NULL); 13867 goto insert; /* after last */ 13868 } 13869 ap = ap->av_forw; 13870 } 13871 13872 /* 13873 * No inversions... we will go after the last, and 13874 * be the first request in the second request list. 13875 */ 13876 goto insert; 13877 } 13878 13879 /* 13880 * Request is at/after the current request... 13881 * sort in the first request list. 13882 */ 13883 while (ap->av_forw != NULL) { 13884 /* 13885 * We want to go after the current request (1) if 13886 * there is an inversion after it (i.e. it is the end 13887 * of the first request list), or (2) if the next 13888 * request is a larger block no. than our request. 13889 */ 13890 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13891 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13892 goto insert; 13893 } 13894 ap = ap->av_forw; 13895 } 13896 13897 /* 13898 * Neither a second list nor a larger request, therefore 13899 * we go at the end of the first list (which is the same 13900 * as the end of the whole schebang). 13901 */ 13902 insert: 13903 bp->av_forw = ap->av_forw; 13904 ap->av_forw = bp; 13905 13906 /* 13907 * If we inserted onto the tail end of the waitq, make sure the 13908 * tail pointer is updated. 13909 */ 13910 if (ap == un->un_waitq_tailp) { 13911 un->un_waitq_tailp = bp; 13912 } 13913 } 13914 13915 13916 /* 13917 * Function: sd_start_cmds 13918 * 13919 * Description: Remove and transport cmds from the driver queues. 13920 * 13921 * Arguments: un - pointer to the unit (soft state) struct for the target. 13922 * 13923 * immed_bp - ptr to a buf to be transported immediately. Only 13924 * the immed_bp is transported; bufs on the waitq are not 13925 * processed and the un_retry_bp is not checked. If immed_bp is 13926 * NULL, then normal queue processing is performed. 13927 * 13928 * Context: May be called from kernel thread context, interrupt context, 13929 * or runout callback context. This function may not block or 13930 * call routines that block. 13931 */ 13932 13933 static void 13934 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13935 { 13936 struct sd_xbuf *xp; 13937 struct buf *bp; 13938 void (*statp)(kstat_io_t *); 13939 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13940 void (*saved_statp)(kstat_io_t *); 13941 #endif 13942 int rval; 13943 13944 ASSERT(un != NULL); 13945 ASSERT(mutex_owned(SD_MUTEX(un))); 13946 ASSERT(un->un_ncmds_in_transport >= 0); 13947 ASSERT(un->un_throttle >= 0); 13948 13949 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13950 13951 do { 13952 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13953 saved_statp = NULL; 13954 #endif 13955 13956 /* 13957 * If we are syncing or dumping, fail the command to 13958 * avoid recursively calling back into scsi_transport(). 13959 * See panic.c for more information about the states 13960 * the system can be in during panic. 13961 */ 13962 if ((un->un_state == SD_STATE_DUMPING) || 13963 (un->un_in_callback > 1)) { 13964 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13965 "sd_start_cmds: panicking\n"); 13966 goto exit; 13967 } 13968 13969 if ((bp = immed_bp) != NULL) { 13970 /* 13971 * We have a bp that must be transported immediately. 13972 * It's OK to transport the immed_bp here without doing 13973 * the throttle limit check because the immed_bp is 13974 * always used in a retry/recovery case. This means 13975 * that we know we are not at the throttle limit by 13976 * virtue of the fact that to get here we must have 13977 * already gotten a command back via sdintr(). This also 13978 * relies on (1) the command on un_retry_bp preventing 13979 * further commands from the waitq from being issued; 13980 * and (2) the code in sd_retry_command checking the 13981 * throttle limit before issuing a delayed or immediate 13982 * retry. This holds even if the throttle limit is 13983 * currently ratcheted down from its maximum value. 13984 */ 13985 statp = kstat_runq_enter; 13986 if (bp == un->un_retry_bp) { 13987 ASSERT((un->un_retry_statp == NULL) || 13988 (un->un_retry_statp == kstat_waitq_enter) || 13989 (un->un_retry_statp == 13990 kstat_runq_back_to_waitq)); 13991 /* 13992 * If the waitq kstat was incremented when 13993 * sd_set_retry_bp() queued this bp for a retry, 13994 * then we must set up statp so that the waitq 13995 * count will get decremented correctly below. 13996 * Also we must clear un->un_retry_statp to 13997 * ensure that we do not act on a stale value 13998 * in this field. 13999 */ 14000 if ((un->un_retry_statp == kstat_waitq_enter) || 14001 (un->un_retry_statp == 14002 kstat_runq_back_to_waitq)) { 14003 statp = kstat_waitq_to_runq; 14004 } 14005 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14006 saved_statp = un->un_retry_statp; 14007 #endif 14008 un->un_retry_statp = NULL; 14009 14010 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14011 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14012 "un_throttle:%d un_ncmds_in_transport:%d\n", 14013 un, un->un_retry_bp, un->un_throttle, 14014 un->un_ncmds_in_transport); 14015 } else { 14016 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14017 "processing priority bp:0x%p\n", bp); 14018 } 14019 14020 } else if ((bp = un->un_waitq_headp) != NULL) { 14021 /* 14022 * A command on the waitq is ready to go, but do not 14023 * send it if: 14024 * 14025 * (1) the throttle limit has been reached, or 14026 * (2) a retry is pending, or 14027 * (3) a START_STOP_UNIT callback pending, or 14028 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14029 * command is pending. 14030 * 14031 * For all of these conditions, IO processing will 14032 * restart after the condition is cleared. 14033 */ 14034 if (un->un_ncmds_in_transport >= un->un_throttle) { 14035 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14036 "sd_start_cmds: exiting, " 14037 "throttle limit reached!\n"); 14038 goto exit; 14039 } 14040 if (un->un_retry_bp != NULL) { 14041 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14042 "sd_start_cmds: exiting, retry pending!\n"); 14043 goto exit; 14044 } 14045 if (un->un_startstop_timeid != NULL) { 14046 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14047 "sd_start_cmds: exiting, " 14048 "START_STOP pending!\n"); 14049 goto exit; 14050 } 14051 if (un->un_direct_priority_timeid != NULL) { 14052 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14053 "sd_start_cmds: exiting, " 14054 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14055 goto exit; 14056 } 14057 14058 /* Dequeue the command */ 14059 un->un_waitq_headp = bp->av_forw; 14060 if (un->un_waitq_headp == NULL) { 14061 un->un_waitq_tailp = NULL; 14062 } 14063 bp->av_forw = NULL; 14064 statp = kstat_waitq_to_runq; 14065 SD_TRACE(SD_LOG_IO_CORE, un, 14066 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14067 14068 } else { 14069 /* No work to do so bail out now */ 14070 SD_TRACE(SD_LOG_IO_CORE, un, 14071 "sd_start_cmds: no more work, exiting!\n"); 14072 goto exit; 14073 } 14074 14075 /* 14076 * Reset the state to normal. This is the mechanism by which 14077 * the state transitions from either SD_STATE_RWAIT or 14078 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14079 * If state is SD_STATE_PM_CHANGING then this command is 14080 * part of the device power control and the state must 14081 * not be put back to normal. Doing so would would 14082 * allow new commands to proceed when they shouldn't, 14083 * the device may be going off. 14084 */ 14085 if ((un->un_state != SD_STATE_SUSPENDED) && 14086 (un->un_state != SD_STATE_PM_CHANGING)) { 14087 New_state(un, SD_STATE_NORMAL); 14088 } 14089 14090 xp = SD_GET_XBUF(bp); 14091 ASSERT(xp != NULL); 14092 14093 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14094 /* 14095 * Allocate the scsi_pkt if we need one, or attach DMA 14096 * resources if we have a scsi_pkt that needs them. The 14097 * latter should only occur for commands that are being 14098 * retried. 14099 */ 14100 if ((xp->xb_pktp == NULL) || 14101 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14102 #else 14103 if (xp->xb_pktp == NULL) { 14104 #endif 14105 /* 14106 * There is no scsi_pkt allocated for this buf. Call 14107 * the initpkt function to allocate & init one. 14108 * 14109 * The scsi_init_pkt runout callback functionality is 14110 * implemented as follows: 14111 * 14112 * 1) The initpkt function always calls 14113 * scsi_init_pkt(9F) with sdrunout specified as the 14114 * callback routine. 14115 * 2) A successful packet allocation is initialized and 14116 * the I/O is transported. 14117 * 3) The I/O associated with an allocation resource 14118 * failure is left on its queue to be retried via 14119 * runout or the next I/O. 14120 * 4) The I/O associated with a DMA error is removed 14121 * from the queue and failed with EIO. Processing of 14122 * the transport queues is also halted to be 14123 * restarted via runout or the next I/O. 14124 * 5) The I/O associated with a CDB size or packet 14125 * size error is removed from the queue and failed 14126 * with EIO. Processing of the transport queues is 14127 * continued. 14128 * 14129 * Note: there is no interface for canceling a runout 14130 * callback. To prevent the driver from detaching or 14131 * suspending while a runout is pending the driver 14132 * state is set to SD_STATE_RWAIT 14133 * 14134 * Note: using the scsi_init_pkt callback facility can 14135 * result in an I/O request persisting at the head of 14136 * the list which cannot be satisfied even after 14137 * multiple retries. In the future the driver may 14138 * implement some kind of maximum runout count before 14139 * failing an I/O. 14140 * 14141 * Note: the use of funcp below may seem superfluous, 14142 * but it helps warlock figure out the correct 14143 * initpkt function calls (see [s]sd.wlcmd). 14144 */ 14145 struct scsi_pkt *pktp; 14146 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14147 14148 ASSERT(bp != un->un_rqs_bp); 14149 14150 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14151 switch ((*funcp)(bp, &pktp)) { 14152 case SD_PKT_ALLOC_SUCCESS: 14153 xp->xb_pktp = pktp; 14154 SD_TRACE(SD_LOG_IO_CORE, un, 14155 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14156 pktp); 14157 goto got_pkt; 14158 14159 case SD_PKT_ALLOC_FAILURE: 14160 /* 14161 * Temporary (hopefully) resource depletion. 14162 * Since retries and RQS commands always have a 14163 * scsi_pkt allocated, these cases should never 14164 * get here. So the only cases this needs to 14165 * handle is a bp from the waitq (which we put 14166 * back onto the waitq for sdrunout), or a bp 14167 * sent as an immed_bp (which we just fail). 14168 */ 14169 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14170 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14171 14172 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14173 14174 if (bp == immed_bp) { 14175 /* 14176 * If SD_XB_DMA_FREED is clear, then 14177 * this is a failure to allocate a 14178 * scsi_pkt, and we must fail the 14179 * command. 14180 */ 14181 if ((xp->xb_pkt_flags & 14182 SD_XB_DMA_FREED) == 0) { 14183 break; 14184 } 14185 14186 /* 14187 * If this immediate command is NOT our 14188 * un_retry_bp, then we must fail it. 14189 */ 14190 if (bp != un->un_retry_bp) { 14191 break; 14192 } 14193 14194 /* 14195 * We get here if this cmd is our 14196 * un_retry_bp that was DMAFREED, but 14197 * scsi_init_pkt() failed to reallocate 14198 * DMA resources when we attempted to 14199 * retry it. This can happen when an 14200 * mpxio failover is in progress, but 14201 * we don't want to just fail the 14202 * command in this case. 14203 * 14204 * Use timeout(9F) to restart it after 14205 * a 100ms delay. We don't want to 14206 * let sdrunout() restart it, because 14207 * sdrunout() is just supposed to start 14208 * commands that are sitting on the 14209 * wait queue. The un_retry_bp stays 14210 * set until the command completes, but 14211 * sdrunout can be called many times 14212 * before that happens. Since sdrunout 14213 * cannot tell if the un_retry_bp is 14214 * already in the transport, it could 14215 * end up calling scsi_transport() for 14216 * the un_retry_bp multiple times. 14217 * 14218 * Also: don't schedule the callback 14219 * if some other callback is already 14220 * pending. 14221 */ 14222 if (un->un_retry_statp == NULL) { 14223 /* 14224 * restore the kstat pointer to 14225 * keep kstat counts coherent 14226 * when we do retry the command. 14227 */ 14228 un->un_retry_statp = 14229 saved_statp; 14230 } 14231 14232 if ((un->un_startstop_timeid == NULL) && 14233 (un->un_retry_timeid == NULL) && 14234 (un->un_direct_priority_timeid == 14235 NULL)) { 14236 14237 un->un_retry_timeid = 14238 timeout( 14239 sd_start_retry_command, 14240 un, SD_RESTART_TIMEOUT); 14241 } 14242 goto exit; 14243 } 14244 14245 #else 14246 if (bp == immed_bp) { 14247 break; /* Just fail the command */ 14248 } 14249 #endif 14250 14251 /* Add the buf back to the head of the waitq */ 14252 bp->av_forw = un->un_waitq_headp; 14253 un->un_waitq_headp = bp; 14254 if (un->un_waitq_tailp == NULL) { 14255 un->un_waitq_tailp = bp; 14256 } 14257 goto exit; 14258 14259 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14260 /* 14261 * HBA DMA resource failure. Fail the command 14262 * and continue processing of the queues. 14263 */ 14264 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14265 "sd_start_cmds: " 14266 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14267 break; 14268 14269 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14270 /* 14271 * Note:x86: Partial DMA mapping not supported 14272 * for USCSI commands, and all the needed DMA 14273 * resources were not allocated. 14274 */ 14275 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14276 "sd_start_cmds: " 14277 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14278 break; 14279 14280 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14281 /* 14282 * Note:x86: Request cannot fit into CDB based 14283 * on lba and len. 14284 */ 14285 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14286 "sd_start_cmds: " 14287 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14288 break; 14289 14290 default: 14291 /* Should NEVER get here! */ 14292 panic("scsi_initpkt error"); 14293 /*NOTREACHED*/ 14294 } 14295 14296 /* 14297 * Fatal error in allocating a scsi_pkt for this buf. 14298 * Update kstats & return the buf with an error code. 14299 * We must use sd_return_failed_command_no_restart() to 14300 * avoid a recursive call back into sd_start_cmds(). 14301 * However this also means that we must keep processing 14302 * the waitq here in order to avoid stalling. 14303 */ 14304 if (statp == kstat_waitq_to_runq) { 14305 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14306 } 14307 sd_return_failed_command_no_restart(un, bp, EIO); 14308 if (bp == immed_bp) { 14309 /* immed_bp is gone by now, so clear this */ 14310 immed_bp = NULL; 14311 } 14312 continue; 14313 } 14314 got_pkt: 14315 if (bp == immed_bp) { 14316 /* goto the head of the class.... */ 14317 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14318 } 14319 14320 un->un_ncmds_in_transport++; 14321 SD_UPDATE_KSTATS(un, statp, bp); 14322 14323 /* 14324 * Call scsi_transport() to send the command to the target. 14325 * According to SCSA architecture, we must drop the mutex here 14326 * before calling scsi_transport() in order to avoid deadlock. 14327 * Note that the scsi_pkt's completion routine can be executed 14328 * (from interrupt context) even before the call to 14329 * scsi_transport() returns. 14330 */ 14331 SD_TRACE(SD_LOG_IO_CORE, un, 14332 "sd_start_cmds: calling scsi_transport()\n"); 14333 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14334 14335 mutex_exit(SD_MUTEX(un)); 14336 rval = scsi_transport(xp->xb_pktp); 14337 mutex_enter(SD_MUTEX(un)); 14338 14339 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14340 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14341 14342 switch (rval) { 14343 case TRAN_ACCEPT: 14344 /* Clear this with every pkt accepted by the HBA */ 14345 un->un_tran_fatal_count = 0; 14346 break; /* Success; try the next cmd (if any) */ 14347 14348 case TRAN_BUSY: 14349 un->un_ncmds_in_transport--; 14350 ASSERT(un->un_ncmds_in_transport >= 0); 14351 14352 /* 14353 * Don't retry request sense, the sense data 14354 * is lost when another request is sent. 14355 * Free up the rqs buf and retry 14356 * the original failed cmd. Update kstat. 14357 */ 14358 if (bp == un->un_rqs_bp) { 14359 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14360 bp = sd_mark_rqs_idle(un, xp); 14361 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14362 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 14363 kstat_waitq_enter); 14364 goto exit; 14365 } 14366 14367 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14368 /* 14369 * Free the DMA resources for the scsi_pkt. This will 14370 * allow mpxio to select another path the next time 14371 * we call scsi_transport() with this scsi_pkt. 14372 * See sdintr() for the rationalization behind this. 14373 */ 14374 if ((un->un_f_is_fibre == TRUE) && 14375 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14376 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14377 scsi_dmafree(xp->xb_pktp); 14378 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14379 } 14380 #endif 14381 14382 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14383 /* 14384 * Commands that are SD_PATH_DIRECT_PRIORITY 14385 * are for error recovery situations. These do 14386 * not use the normal command waitq, so if they 14387 * get a TRAN_BUSY we cannot put them back onto 14388 * the waitq for later retry. One possible 14389 * problem is that there could already be some 14390 * other command on un_retry_bp that is waiting 14391 * for this one to complete, so we would be 14392 * deadlocked if we put this command back onto 14393 * the waitq for later retry (since un_retry_bp 14394 * must complete before the driver gets back to 14395 * commands on the waitq). 14396 * 14397 * To avoid deadlock we must schedule a callback 14398 * that will restart this command after a set 14399 * interval. This should keep retrying for as 14400 * long as the underlying transport keeps 14401 * returning TRAN_BUSY (just like for other 14402 * commands). Use the same timeout interval as 14403 * for the ordinary TRAN_BUSY retry. 14404 */ 14405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14406 "sd_start_cmds: scsi_transport() returned " 14407 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14408 14409 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14410 un->un_direct_priority_timeid = 14411 timeout(sd_start_direct_priority_command, 14412 bp, SD_BSY_TIMEOUT / 500); 14413 14414 goto exit; 14415 } 14416 14417 /* 14418 * For TRAN_BUSY, we want to reduce the throttle value, 14419 * unless we are retrying a command. 14420 */ 14421 if (bp != un->un_retry_bp) { 14422 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14423 } 14424 14425 /* 14426 * Set up the bp to be tried again 10 ms later. 14427 * Note:x86: Is there a timeout value in the sd_lun 14428 * for this condition? 14429 */ 14430 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 14431 kstat_runq_back_to_waitq); 14432 goto exit; 14433 14434 case TRAN_FATAL_ERROR: 14435 un->un_tran_fatal_count++; 14436 /* FALLTHRU */ 14437 14438 case TRAN_BADPKT: 14439 default: 14440 un->un_ncmds_in_transport--; 14441 ASSERT(un->un_ncmds_in_transport >= 0); 14442 14443 /* 14444 * If this is our REQUEST SENSE command with a 14445 * transport error, we must get back the pointers 14446 * to the original buf, and mark the REQUEST 14447 * SENSE command as "available". 14448 */ 14449 if (bp == un->un_rqs_bp) { 14450 bp = sd_mark_rqs_idle(un, xp); 14451 xp = SD_GET_XBUF(bp); 14452 } else { 14453 /* 14454 * Legacy behavior: do not update transport 14455 * error count for request sense commands. 14456 */ 14457 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14458 } 14459 14460 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14461 sd_print_transport_rejected_message(un, xp, rval); 14462 14463 /* 14464 * We must use sd_return_failed_command_no_restart() to 14465 * avoid a recursive call back into sd_start_cmds(). 14466 * However this also means that we must keep processing 14467 * the waitq here in order to avoid stalling. 14468 */ 14469 sd_return_failed_command_no_restart(un, bp, EIO); 14470 14471 /* 14472 * Notify any threads waiting in sd_ddi_suspend() that 14473 * a command completion has occurred. 14474 */ 14475 if (un->un_state == SD_STATE_SUSPENDED) { 14476 cv_broadcast(&un->un_disk_busy_cv); 14477 } 14478 14479 if (bp == immed_bp) { 14480 /* immed_bp is gone by now, so clear this */ 14481 immed_bp = NULL; 14482 } 14483 break; 14484 } 14485 14486 } while (immed_bp == NULL); 14487 14488 exit: 14489 ASSERT(mutex_owned(SD_MUTEX(un))); 14490 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14491 } 14492 14493 14494 /* 14495 * Function: sd_return_command 14496 * 14497 * Description: Returns a command to its originator (with or without an 14498 * error). Also starts commands waiting to be transported 14499 * to the target. 14500 * 14501 * Context: May be called from interrupt, kernel, or timeout context 14502 */ 14503 14504 static void 14505 sd_return_command(struct sd_lun *un, struct buf *bp) 14506 { 14507 struct sd_xbuf *xp; 14508 #if defined(__i386) || defined(__amd64) 14509 struct scsi_pkt *pktp; 14510 #endif 14511 14512 ASSERT(bp != NULL); 14513 ASSERT(un != NULL); 14514 ASSERT(mutex_owned(SD_MUTEX(un))); 14515 ASSERT(bp != un->un_rqs_bp); 14516 xp = SD_GET_XBUF(bp); 14517 ASSERT(xp != NULL); 14518 14519 #if defined(__i386) || defined(__amd64) 14520 pktp = SD_GET_PKTP(bp); 14521 #endif 14522 14523 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14524 14525 #if defined(__i386) || defined(__amd64) 14526 /* 14527 * Note:x86: check for the "sdrestart failed" case. 14528 */ 14529 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14530 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14531 (xp->xb_pktp->pkt_resid == 0)) { 14532 14533 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14534 /* 14535 * Successfully set up next portion of cmd 14536 * transfer, try sending it 14537 */ 14538 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14539 NULL, NULL, 0, (clock_t)0, NULL); 14540 sd_start_cmds(un, NULL); 14541 return; /* Note:x86: need a return here? */ 14542 } 14543 } 14544 #endif 14545 14546 /* 14547 * If this is the failfast bp, clear it from un_failfast_bp. This 14548 * can happen if upon being re-tried the failfast bp either 14549 * succeeded or encountered another error (possibly even a different 14550 * error than the one that precipitated the failfast state, but in 14551 * that case it would have had to exhaust retries as well). Regardless, 14552 * this should not occur whenever the instance is in the active 14553 * failfast state. 14554 */ 14555 if (bp == un->un_failfast_bp) { 14556 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14557 un->un_failfast_bp = NULL; 14558 } 14559 14560 /* 14561 * Clear the failfast state upon successful completion of ANY cmd. 14562 */ 14563 if (bp->b_error == 0) { 14564 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14565 } 14566 14567 /* 14568 * This is used if the command was retried one or more times. Show that 14569 * we are done with it, and allow processing of the waitq to resume. 14570 */ 14571 if (bp == un->un_retry_bp) { 14572 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14573 "sd_return_command: un:0x%p: " 14574 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14575 un->un_retry_bp = NULL; 14576 un->un_retry_statp = NULL; 14577 } 14578 14579 SD_UPDATE_RDWR_STATS(un, bp); 14580 SD_UPDATE_PARTITION_STATS(un, bp); 14581 14582 switch (un->un_state) { 14583 case SD_STATE_SUSPENDED: 14584 /* 14585 * Notify any threads waiting in sd_ddi_suspend() that 14586 * a command completion has occurred. 14587 */ 14588 cv_broadcast(&un->un_disk_busy_cv); 14589 break; 14590 default: 14591 sd_start_cmds(un, NULL); 14592 break; 14593 } 14594 14595 /* Return this command up the iodone chain to its originator. */ 14596 mutex_exit(SD_MUTEX(un)); 14597 14598 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14599 xp->xb_pktp = NULL; 14600 14601 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14602 14603 ASSERT(!mutex_owned(SD_MUTEX(un))); 14604 mutex_enter(SD_MUTEX(un)); 14605 14606 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14607 } 14608 14609 14610 /* 14611 * Function: sd_return_failed_command 14612 * 14613 * Description: Command completion when an error occurred. 14614 * 14615 * Context: May be called from interrupt context 14616 */ 14617 14618 static void 14619 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14620 { 14621 ASSERT(bp != NULL); 14622 ASSERT(un != NULL); 14623 ASSERT(mutex_owned(SD_MUTEX(un))); 14624 14625 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14626 "sd_return_failed_command: entry\n"); 14627 14628 /* 14629 * b_resid could already be nonzero due to a partial data 14630 * transfer, so do not change it here. 14631 */ 14632 SD_BIOERROR(bp, errcode); 14633 14634 sd_return_command(un, bp); 14635 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14636 "sd_return_failed_command: exit\n"); 14637 } 14638 14639 14640 /* 14641 * Function: sd_return_failed_command_no_restart 14642 * 14643 * Description: Same as sd_return_failed_command, but ensures that no 14644 * call back into sd_start_cmds will be issued. 14645 * 14646 * Context: May be called from interrupt context 14647 */ 14648 14649 static void 14650 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14651 int errcode) 14652 { 14653 struct sd_xbuf *xp; 14654 14655 ASSERT(bp != NULL); 14656 ASSERT(un != NULL); 14657 ASSERT(mutex_owned(SD_MUTEX(un))); 14658 xp = SD_GET_XBUF(bp); 14659 ASSERT(xp != NULL); 14660 ASSERT(errcode != 0); 14661 14662 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14663 "sd_return_failed_command_no_restart: entry\n"); 14664 14665 /* 14666 * b_resid could already be nonzero due to a partial data 14667 * transfer, so do not change it here. 14668 */ 14669 SD_BIOERROR(bp, errcode); 14670 14671 /* 14672 * If this is the failfast bp, clear it. This can happen if the 14673 * failfast bp encounterd a fatal error when we attempted to 14674 * re-try it (such as a scsi_transport(9F) failure). However 14675 * we should NOT be in an active failfast state if the failfast 14676 * bp is not NULL. 14677 */ 14678 if (bp == un->un_failfast_bp) { 14679 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14680 un->un_failfast_bp = NULL; 14681 } 14682 14683 if (bp == un->un_retry_bp) { 14684 /* 14685 * This command was retried one or more times. Show that we are 14686 * done with it, and allow processing of the waitq to resume. 14687 */ 14688 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14689 "sd_return_failed_command_no_restart: " 14690 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14691 un->un_retry_bp = NULL; 14692 un->un_retry_statp = NULL; 14693 } 14694 14695 SD_UPDATE_RDWR_STATS(un, bp); 14696 SD_UPDATE_PARTITION_STATS(un, bp); 14697 14698 mutex_exit(SD_MUTEX(un)); 14699 14700 if (xp->xb_pktp != NULL) { 14701 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14702 xp->xb_pktp = NULL; 14703 } 14704 14705 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14706 14707 mutex_enter(SD_MUTEX(un)); 14708 14709 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14710 "sd_return_failed_command_no_restart: exit\n"); 14711 } 14712 14713 14714 /* 14715 * Function: sd_retry_command 14716 * 14717 * Description: queue up a command for retry, or (optionally) fail it 14718 * if retry counts are exhausted. 14719 * 14720 * Arguments: un - Pointer to the sd_lun struct for the target. 14721 * 14722 * bp - Pointer to the buf for the command to be retried. 14723 * 14724 * retry_check_flag - Flag to see which (if any) of the retry 14725 * counts should be decremented/checked. If the indicated 14726 * retry count is exhausted, then the command will not be 14727 * retried; it will be failed instead. This should use a 14728 * value equal to one of the following: 14729 * 14730 * SD_RETRIES_NOCHECK 14731 * SD_RESD_RETRIES_STANDARD 14732 * SD_RETRIES_VICTIM 14733 * 14734 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14735 * if the check should be made to see of FLAG_ISOLATE is set 14736 * in the pkt. If FLAG_ISOLATE is set, then the command is 14737 * not retried, it is simply failed. 14738 * 14739 * user_funcp - Ptr to function to call before dispatching the 14740 * command. May be NULL if no action needs to be performed. 14741 * (Primarily intended for printing messages.) 14742 * 14743 * user_arg - Optional argument to be passed along to 14744 * the user_funcp call. 14745 * 14746 * failure_code - errno return code to set in the bp if the 14747 * command is going to be failed. 14748 * 14749 * retry_delay - Retry delay interval in (clock_t) units. May 14750 * be zero which indicates that the retry should be retried 14751 * immediately (ie, without an intervening delay). 14752 * 14753 * statp - Ptr to kstat function to be updated if the command 14754 * is queued for a delayed retry. May be NULL if no kstat 14755 * update is desired. 14756 * 14757 * Context: May be called from interupt context. 14758 */ 14759 14760 static void 14761 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14762 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14763 code), void *user_arg, int failure_code, clock_t retry_delay, 14764 void (*statp)(kstat_io_t *)) 14765 { 14766 struct sd_xbuf *xp; 14767 struct scsi_pkt *pktp; 14768 14769 ASSERT(un != NULL); 14770 ASSERT(mutex_owned(SD_MUTEX(un))); 14771 ASSERT(bp != NULL); 14772 xp = SD_GET_XBUF(bp); 14773 ASSERT(xp != NULL); 14774 pktp = SD_GET_PKTP(bp); 14775 ASSERT(pktp != NULL); 14776 14777 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14778 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14779 14780 /* 14781 * If we are syncing or dumping, fail the command to avoid 14782 * recursively calling back into scsi_transport(). 14783 */ 14784 if (ddi_in_panic()) { 14785 goto fail_command_no_log; 14786 } 14787 14788 /* 14789 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14790 * log an error and fail the command. 14791 */ 14792 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14793 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14794 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14795 sd_dump_memory(un, SD_LOG_IO, "CDB", 14796 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14797 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14798 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14799 goto fail_command; 14800 } 14801 14802 /* 14803 * If we are suspended, then put the command onto head of the 14804 * wait queue since we don't want to start more commands. 14805 */ 14806 switch (un->un_state) { 14807 case SD_STATE_SUSPENDED: 14808 case SD_STATE_DUMPING: 14809 bp->av_forw = un->un_waitq_headp; 14810 un->un_waitq_headp = bp; 14811 if (un->un_waitq_tailp == NULL) { 14812 un->un_waitq_tailp = bp; 14813 } 14814 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14816 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14817 return; 14818 default: 14819 break; 14820 } 14821 14822 /* 14823 * If the caller wants us to check FLAG_ISOLATE, then see if that 14824 * is set; if it is then we do not want to retry the command. 14825 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14826 */ 14827 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14828 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14829 goto fail_command; 14830 } 14831 } 14832 14833 14834 /* 14835 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14836 * command timeout or a selection timeout has occurred. This means 14837 * that we were unable to establish an kind of communication with 14838 * the target, and subsequent retries and/or commands are likely 14839 * to encounter similar results and take a long time to complete. 14840 * 14841 * If this is a failfast error condition, we need to update the 14842 * failfast state, even if this bp does not have B_FAILFAST set. 14843 */ 14844 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14845 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14846 ASSERT(un->un_failfast_bp == NULL); 14847 /* 14848 * If we are already in the active failfast state, and 14849 * another failfast error condition has been detected, 14850 * then fail this command if it has B_FAILFAST set. 14851 * If B_FAILFAST is clear, then maintain the legacy 14852 * behavior of retrying heroically, even tho this will 14853 * take a lot more time to fail the command. 14854 */ 14855 if (bp->b_flags & B_FAILFAST) { 14856 goto fail_command; 14857 } 14858 } else { 14859 /* 14860 * We're not in the active failfast state, but we 14861 * have a failfast error condition, so we must begin 14862 * transition to the next state. We do this regardless 14863 * of whether or not this bp has B_FAILFAST set. 14864 */ 14865 if (un->un_failfast_bp == NULL) { 14866 /* 14867 * This is the first bp to meet a failfast 14868 * condition so save it on un_failfast_bp & 14869 * do normal retry processing. Do not enter 14870 * active failfast state yet. This marks 14871 * entry into the "failfast pending" state. 14872 */ 14873 un->un_failfast_bp = bp; 14874 14875 } else if (un->un_failfast_bp == bp) { 14876 /* 14877 * This is the second time *this* bp has 14878 * encountered a failfast error condition, 14879 * so enter active failfast state & flush 14880 * queues as appropriate. 14881 */ 14882 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14883 un->un_failfast_bp = NULL; 14884 sd_failfast_flushq(un); 14885 14886 /* 14887 * Fail this bp now if B_FAILFAST set; 14888 * otherwise continue with retries. (It would 14889 * be pretty ironic if this bp succeeded on a 14890 * subsequent retry after we just flushed all 14891 * the queues). 14892 */ 14893 if (bp->b_flags & B_FAILFAST) { 14894 goto fail_command; 14895 } 14896 14897 #if !defined(lint) && !defined(__lint) 14898 } else { 14899 /* 14900 * If neither of the preceeding conditionals 14901 * was true, it means that there is some 14902 * *other* bp that has met an inital failfast 14903 * condition and is currently either being 14904 * retried or is waiting to be retried. In 14905 * that case we should perform normal retry 14906 * processing on *this* bp, since there is a 14907 * chance that the current failfast condition 14908 * is transient and recoverable. If that does 14909 * not turn out to be the case, then retries 14910 * will be cleared when the wait queue is 14911 * flushed anyway. 14912 */ 14913 #endif 14914 } 14915 } 14916 } else { 14917 /* 14918 * SD_RETRIES_FAILFAST is clear, which indicates that we 14919 * likely were able to at least establish some level of 14920 * communication with the target and subsequent commands 14921 * and/or retries are likely to get through to the target, 14922 * In this case we want to be aggressive about clearing 14923 * the failfast state. Note that this does not affect 14924 * the "failfast pending" condition. 14925 */ 14926 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14927 } 14928 14929 14930 /* 14931 * Check the specified retry count to see if we can still do 14932 * any retries with this pkt before we should fail it. 14933 */ 14934 switch (retry_check_flag & SD_RETRIES_MASK) { 14935 case SD_RETRIES_VICTIM: 14936 /* 14937 * Check the victim retry count. If exhausted, then fall 14938 * thru & check against the standard retry count. 14939 */ 14940 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14941 /* Increment count & proceed with the retry */ 14942 xp->xb_victim_retry_count++; 14943 break; 14944 } 14945 /* Victim retries exhausted, fall back to std. retries... */ 14946 /* FALLTHRU */ 14947 14948 case SD_RETRIES_STANDARD: 14949 if (xp->xb_retry_count >= un->un_retry_count) { 14950 /* Retries exhausted, fail the command */ 14951 SD_TRACE(SD_LOG_IO_CORE, un, 14952 "sd_retry_command: retries exhausted!\n"); 14953 /* 14954 * update b_resid for failed SCMD_READ & SCMD_WRITE 14955 * commands with nonzero pkt_resid. 14956 */ 14957 if ((pktp->pkt_reason == CMD_CMPLT) && 14958 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14959 (pktp->pkt_resid != 0)) { 14960 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 14961 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 14962 SD_UPDATE_B_RESID(bp, pktp); 14963 } 14964 } 14965 goto fail_command; 14966 } 14967 xp->xb_retry_count++; 14968 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14969 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14970 break; 14971 14972 case SD_RETRIES_UA: 14973 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 14974 /* Retries exhausted, fail the command */ 14975 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14976 "Unit Attention retries exhausted. " 14977 "Check the target.\n"); 14978 goto fail_command; 14979 } 14980 xp->xb_ua_retry_count++; 14981 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14982 "sd_retry_command: retry count:%d\n", 14983 xp->xb_ua_retry_count); 14984 break; 14985 14986 case SD_RETRIES_BUSY: 14987 if (xp->xb_retry_count >= un->un_busy_retry_count) { 14988 /* Retries exhausted, fail the command */ 14989 SD_TRACE(SD_LOG_IO_CORE, un, 14990 "sd_retry_command: retries exhausted!\n"); 14991 goto fail_command; 14992 } 14993 xp->xb_retry_count++; 14994 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14995 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14996 break; 14997 14998 case SD_RETRIES_NOCHECK: 14999 default: 15000 /* No retry count to check. Just proceed with the retry */ 15001 break; 15002 } 15003 15004 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15005 15006 /* 15007 * If we were given a zero timeout, we must attempt to retry the 15008 * command immediately (ie, without a delay). 15009 */ 15010 if (retry_delay == 0) { 15011 /* 15012 * Check some limiting conditions to see if we can actually 15013 * do the immediate retry. If we cannot, then we must 15014 * fall back to queueing up a delayed retry. 15015 */ 15016 if (un->un_ncmds_in_transport >= un->un_throttle) { 15017 /* 15018 * We are at the throttle limit for the target, 15019 * fall back to delayed retry. 15020 */ 15021 retry_delay = SD_BSY_TIMEOUT; 15022 statp = kstat_waitq_enter; 15023 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15024 "sd_retry_command: immed. retry hit throttle!\n"); 15025 } else { 15026 /* 15027 * We're clear to proceed with the immediate retry. 15028 * First call the user-provided function (if any) 15029 */ 15030 if (user_funcp != NULL) { 15031 (*user_funcp)(un, bp, user_arg, 15032 SD_IMMEDIATE_RETRY_ISSUED); 15033 } 15034 15035 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15036 "sd_retry_command: issuing immediate retry\n"); 15037 15038 /* 15039 * Call sd_start_cmds() to transport the command to 15040 * the target. 15041 */ 15042 sd_start_cmds(un, bp); 15043 15044 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15045 "sd_retry_command exit\n"); 15046 return; 15047 } 15048 } 15049 15050 /* 15051 * Set up to retry the command after a delay. 15052 * First call the user-provided function (if any) 15053 */ 15054 if (user_funcp != NULL) { 15055 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15056 } 15057 15058 sd_set_retry_bp(un, bp, retry_delay, statp); 15059 15060 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15061 return; 15062 15063 fail_command: 15064 15065 if (user_funcp != NULL) { 15066 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15067 } 15068 15069 fail_command_no_log: 15070 15071 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15072 "sd_retry_command: returning failed command\n"); 15073 15074 sd_return_failed_command(un, bp, failure_code); 15075 15076 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15077 } 15078 15079 15080 /* 15081 * Function: sd_set_retry_bp 15082 * 15083 * Description: Set up the given bp for retry. 15084 * 15085 * Arguments: un - ptr to associated softstate 15086 * bp - ptr to buf(9S) for the command 15087 * retry_delay - time interval before issuing retry (may be 0) 15088 * statp - optional pointer to kstat function 15089 * 15090 * Context: May be called under interrupt context 15091 */ 15092 15093 static void 15094 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15095 void (*statp)(kstat_io_t *)) 15096 { 15097 ASSERT(un != NULL); 15098 ASSERT(mutex_owned(SD_MUTEX(un))); 15099 ASSERT(bp != NULL); 15100 15101 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15102 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15103 15104 /* 15105 * Indicate that the command is being retried. This will not allow any 15106 * other commands on the wait queue to be transported to the target 15107 * until this command has been completed (success or failure). The 15108 * "retry command" is not transported to the target until the given 15109 * time delay expires, unless the user specified a 0 retry_delay. 15110 * 15111 * Note: the timeout(9F) callback routine is what actually calls 15112 * sd_start_cmds() to transport the command, with the exception of a 15113 * zero retry_delay. The only current implementor of a zero retry delay 15114 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15115 */ 15116 if (un->un_retry_bp == NULL) { 15117 ASSERT(un->un_retry_statp == NULL); 15118 un->un_retry_bp = bp; 15119 15120 /* 15121 * If the user has not specified a delay the command should 15122 * be queued and no timeout should be scheduled. 15123 */ 15124 if (retry_delay == 0) { 15125 /* 15126 * Save the kstat pointer that will be used in the 15127 * call to SD_UPDATE_KSTATS() below, so that 15128 * sd_start_cmds() can correctly decrement the waitq 15129 * count when it is time to transport this command. 15130 */ 15131 un->un_retry_statp = statp; 15132 goto done; 15133 } 15134 } 15135 15136 if (un->un_retry_bp == bp) { 15137 /* 15138 * Save the kstat pointer that will be used in the call to 15139 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15140 * correctly decrement the waitq count when it is time to 15141 * transport this command. 15142 */ 15143 un->un_retry_statp = statp; 15144 15145 /* 15146 * Schedule a timeout if: 15147 * 1) The user has specified a delay. 15148 * 2) There is not a START_STOP_UNIT callback pending. 15149 * 15150 * If no delay has been specified, then it is up to the caller 15151 * to ensure that IO processing continues without stalling. 15152 * Effectively, this means that the caller will issue the 15153 * required call to sd_start_cmds(). The START_STOP_UNIT 15154 * callback does this after the START STOP UNIT command has 15155 * completed. In either of these cases we should not schedule 15156 * a timeout callback here. Also don't schedule the timeout if 15157 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15158 */ 15159 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15160 (un->un_direct_priority_timeid == NULL)) { 15161 un->un_retry_timeid = 15162 timeout(sd_start_retry_command, un, retry_delay); 15163 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15164 "sd_set_retry_bp: setting timeout: un: 0x%p" 15165 " bp:0x%p un_retry_timeid:0x%p\n", 15166 un, bp, un->un_retry_timeid); 15167 } 15168 } else { 15169 /* 15170 * We only get in here if there is already another command 15171 * waiting to be retried. In this case, we just put the 15172 * given command onto the wait queue, so it can be transported 15173 * after the current retry command has completed. 15174 * 15175 * Also we have to make sure that if the command at the head 15176 * of the wait queue is the un_failfast_bp, that we do not 15177 * put ahead of it any other commands that are to be retried. 15178 */ 15179 if ((un->un_failfast_bp != NULL) && 15180 (un->un_failfast_bp == un->un_waitq_headp)) { 15181 /* 15182 * Enqueue this command AFTER the first command on 15183 * the wait queue (which is also un_failfast_bp). 15184 */ 15185 bp->av_forw = un->un_waitq_headp->av_forw; 15186 un->un_waitq_headp->av_forw = bp; 15187 if (un->un_waitq_headp == un->un_waitq_tailp) { 15188 un->un_waitq_tailp = bp; 15189 } 15190 } else { 15191 /* Enqueue this command at the head of the waitq. */ 15192 bp->av_forw = un->un_waitq_headp; 15193 un->un_waitq_headp = bp; 15194 if (un->un_waitq_tailp == NULL) { 15195 un->un_waitq_tailp = bp; 15196 } 15197 } 15198 15199 if (statp == NULL) { 15200 statp = kstat_waitq_enter; 15201 } 15202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15203 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15204 } 15205 15206 done: 15207 if (statp != NULL) { 15208 SD_UPDATE_KSTATS(un, statp, bp); 15209 } 15210 15211 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15212 "sd_set_retry_bp: exit un:0x%p\n", un); 15213 } 15214 15215 15216 /* 15217 * Function: sd_start_retry_command 15218 * 15219 * Description: Start the command that has been waiting on the target's 15220 * retry queue. Called from timeout(9F) context after the 15221 * retry delay interval has expired. 15222 * 15223 * Arguments: arg - pointer to associated softstate for the device. 15224 * 15225 * Context: timeout(9F) thread context. May not sleep. 15226 */ 15227 15228 static void 15229 sd_start_retry_command(void *arg) 15230 { 15231 struct sd_lun *un = arg; 15232 15233 ASSERT(un != NULL); 15234 ASSERT(!mutex_owned(SD_MUTEX(un))); 15235 15236 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15237 "sd_start_retry_command: entry\n"); 15238 15239 mutex_enter(SD_MUTEX(un)); 15240 15241 un->un_retry_timeid = NULL; 15242 15243 if (un->un_retry_bp != NULL) { 15244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15245 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15246 un, un->un_retry_bp); 15247 sd_start_cmds(un, un->un_retry_bp); 15248 } 15249 15250 mutex_exit(SD_MUTEX(un)); 15251 15252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15253 "sd_start_retry_command: exit\n"); 15254 } 15255 15256 15257 /* 15258 * Function: sd_start_direct_priority_command 15259 * 15260 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15261 * received TRAN_BUSY when we called scsi_transport() to send it 15262 * to the underlying HBA. This function is called from timeout(9F) 15263 * context after the delay interval has expired. 15264 * 15265 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15266 * 15267 * Context: timeout(9F) thread context. May not sleep. 15268 */ 15269 15270 static void 15271 sd_start_direct_priority_command(void *arg) 15272 { 15273 struct buf *priority_bp = arg; 15274 struct sd_lun *un; 15275 15276 ASSERT(priority_bp != NULL); 15277 un = SD_GET_UN(priority_bp); 15278 ASSERT(un != NULL); 15279 ASSERT(!mutex_owned(SD_MUTEX(un))); 15280 15281 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15282 "sd_start_direct_priority_command: entry\n"); 15283 15284 mutex_enter(SD_MUTEX(un)); 15285 un->un_direct_priority_timeid = NULL; 15286 sd_start_cmds(un, priority_bp); 15287 mutex_exit(SD_MUTEX(un)); 15288 15289 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15290 "sd_start_direct_priority_command: exit\n"); 15291 } 15292 15293 15294 /* 15295 * Function: sd_send_request_sense_command 15296 * 15297 * Description: Sends a REQUEST SENSE command to the target 15298 * 15299 * Context: May be called from interrupt context. 15300 */ 15301 15302 static void 15303 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15304 struct scsi_pkt *pktp) 15305 { 15306 ASSERT(bp != NULL); 15307 ASSERT(un != NULL); 15308 ASSERT(mutex_owned(SD_MUTEX(un))); 15309 15310 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15311 "entry: buf:0x%p\n", bp); 15312 15313 /* 15314 * If we are syncing or dumping, then fail the command to avoid a 15315 * recursive callback into scsi_transport(). Also fail the command 15316 * if we are suspended (legacy behavior). 15317 */ 15318 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15319 (un->un_state == SD_STATE_DUMPING)) { 15320 sd_return_failed_command(un, bp, EIO); 15321 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15322 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15323 return; 15324 } 15325 15326 /* 15327 * Retry the failed command and don't issue the request sense if: 15328 * 1) the sense buf is busy 15329 * 2) we have 1 or more outstanding commands on the target 15330 * (the sense data will be cleared or invalidated any way) 15331 * 15332 * Note: There could be an issue with not checking a retry limit here, 15333 * the problem is determining which retry limit to check. 15334 */ 15335 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15336 /* Don't retry if the command is flagged as non-retryable */ 15337 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15338 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15339 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 15340 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15341 "sd_send_request_sense_command: " 15342 "at full throttle, retrying exit\n"); 15343 } else { 15344 sd_return_failed_command(un, bp, EIO); 15345 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15346 "sd_send_request_sense_command: " 15347 "at full throttle, non-retryable exit\n"); 15348 } 15349 return; 15350 } 15351 15352 sd_mark_rqs_busy(un, bp); 15353 sd_start_cmds(un, un->un_rqs_bp); 15354 15355 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15356 "sd_send_request_sense_command: exit\n"); 15357 } 15358 15359 15360 /* 15361 * Function: sd_mark_rqs_busy 15362 * 15363 * Description: Indicate that the request sense bp for this instance is 15364 * in use. 15365 * 15366 * Context: May be called under interrupt context 15367 */ 15368 15369 static void 15370 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15371 { 15372 struct sd_xbuf *sense_xp; 15373 15374 ASSERT(un != NULL); 15375 ASSERT(bp != NULL); 15376 ASSERT(mutex_owned(SD_MUTEX(un))); 15377 ASSERT(un->un_sense_isbusy == 0); 15378 15379 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15380 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15381 15382 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15383 ASSERT(sense_xp != NULL); 15384 15385 SD_INFO(SD_LOG_IO, un, 15386 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15387 15388 ASSERT(sense_xp->xb_pktp != NULL); 15389 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15390 == (FLAG_SENSING | FLAG_HEAD)); 15391 15392 un->un_sense_isbusy = 1; 15393 un->un_rqs_bp->b_resid = 0; 15394 sense_xp->xb_pktp->pkt_resid = 0; 15395 sense_xp->xb_pktp->pkt_reason = 0; 15396 15397 /* So we can get back the bp at interrupt time! */ 15398 sense_xp->xb_sense_bp = bp; 15399 15400 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15401 15402 /* 15403 * Mark this buf as awaiting sense data. (This is already set in 15404 * the pkt_flags for the RQS packet.) 15405 */ 15406 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15407 15408 sense_xp->xb_retry_count = 0; 15409 sense_xp->xb_victim_retry_count = 0; 15410 sense_xp->xb_ua_retry_count = 0; 15411 sense_xp->xb_dma_resid = 0; 15412 15413 /* Clean up the fields for auto-request sense */ 15414 sense_xp->xb_sense_status = 0; 15415 sense_xp->xb_sense_state = 0; 15416 sense_xp->xb_sense_resid = 0; 15417 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15418 15419 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15420 } 15421 15422 15423 /* 15424 * Function: sd_mark_rqs_idle 15425 * 15426 * Description: SD_MUTEX must be held continuously through this routine 15427 * to prevent reuse of the rqs struct before the caller can 15428 * complete it's processing. 15429 * 15430 * Return Code: Pointer to the RQS buf 15431 * 15432 * Context: May be called under interrupt context 15433 */ 15434 15435 static struct buf * 15436 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15437 { 15438 struct buf *bp; 15439 ASSERT(un != NULL); 15440 ASSERT(sense_xp != NULL); 15441 ASSERT(mutex_owned(SD_MUTEX(un))); 15442 ASSERT(un->un_sense_isbusy != 0); 15443 15444 un->un_sense_isbusy = 0; 15445 bp = sense_xp->xb_sense_bp; 15446 sense_xp->xb_sense_bp = NULL; 15447 15448 /* This pkt is no longer interested in getting sense data */ 15449 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15450 15451 return (bp); 15452 } 15453 15454 15455 15456 /* 15457 * Function: sd_alloc_rqs 15458 * 15459 * Description: Set up the unit to receive auto request sense data 15460 * 15461 * Return Code: DDI_SUCCESS or DDI_FAILURE 15462 * 15463 * Context: Called under attach(9E) context 15464 */ 15465 15466 static int 15467 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15468 { 15469 struct sd_xbuf *xp; 15470 15471 ASSERT(un != NULL); 15472 ASSERT(!mutex_owned(SD_MUTEX(un))); 15473 ASSERT(un->un_rqs_bp == NULL); 15474 ASSERT(un->un_rqs_pktp == NULL); 15475 15476 /* 15477 * First allocate the required buf and scsi_pkt structs, then set up 15478 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15479 */ 15480 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15481 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15482 if (un->un_rqs_bp == NULL) { 15483 return (DDI_FAILURE); 15484 } 15485 15486 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15487 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15488 15489 if (un->un_rqs_pktp == NULL) { 15490 sd_free_rqs(un); 15491 return (DDI_FAILURE); 15492 } 15493 15494 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15495 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15496 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 15497 15498 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15499 15500 /* Set up the other needed members in the ARQ scsi_pkt. */ 15501 un->un_rqs_pktp->pkt_comp = sdintr; 15502 un->un_rqs_pktp->pkt_time = sd_io_time; 15503 un->un_rqs_pktp->pkt_flags |= 15504 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15505 15506 /* 15507 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15508 * provide any intpkt, destroypkt routines as we take care of 15509 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15510 */ 15511 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15512 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15513 xp->xb_pktp = un->un_rqs_pktp; 15514 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15515 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15516 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15517 15518 /* 15519 * Save the pointer to the request sense private bp so it can 15520 * be retrieved in sdintr. 15521 */ 15522 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15523 ASSERT(un->un_rqs_bp->b_private == xp); 15524 15525 /* 15526 * See if the HBA supports auto-request sense for the specified 15527 * target/lun. If it does, then try to enable it (if not already 15528 * enabled). 15529 * 15530 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15531 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15532 * return success. However, in both of these cases ARQ is always 15533 * enabled and scsi_ifgetcap will always return true. The best approach 15534 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15535 * 15536 * The 3rd case is the HBA (adp) always return enabled on 15537 * scsi_ifgetgetcap even when it's not enable, the best approach 15538 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15539 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15540 */ 15541 15542 if (un->un_f_is_fibre == TRUE) { 15543 un->un_f_arq_enabled = TRUE; 15544 } else { 15545 #if defined(__i386) || defined(__amd64) 15546 /* 15547 * Circumvent the Adaptec bug, remove this code when 15548 * the bug is fixed 15549 */ 15550 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15551 #endif 15552 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15553 case 0: 15554 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15555 "sd_alloc_rqs: HBA supports ARQ\n"); 15556 /* 15557 * ARQ is supported by this HBA but currently is not 15558 * enabled. Attempt to enable it and if successful then 15559 * mark this instance as ARQ enabled. 15560 */ 15561 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15562 == 1) { 15563 /* Successfully enabled ARQ in the HBA */ 15564 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15565 "sd_alloc_rqs: ARQ enabled\n"); 15566 un->un_f_arq_enabled = TRUE; 15567 } else { 15568 /* Could not enable ARQ in the HBA */ 15569 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15570 "sd_alloc_rqs: failed ARQ enable\n"); 15571 un->un_f_arq_enabled = FALSE; 15572 } 15573 break; 15574 case 1: 15575 /* 15576 * ARQ is supported by this HBA and is already enabled. 15577 * Just mark ARQ as enabled for this instance. 15578 */ 15579 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15580 "sd_alloc_rqs: ARQ already enabled\n"); 15581 un->un_f_arq_enabled = TRUE; 15582 break; 15583 default: 15584 /* 15585 * ARQ is not supported by this HBA; disable it for this 15586 * instance. 15587 */ 15588 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15589 "sd_alloc_rqs: HBA does not support ARQ\n"); 15590 un->un_f_arq_enabled = FALSE; 15591 break; 15592 } 15593 } 15594 15595 return (DDI_SUCCESS); 15596 } 15597 15598 15599 /* 15600 * Function: sd_free_rqs 15601 * 15602 * Description: Cleanup for the pre-instance RQS command. 15603 * 15604 * Context: Kernel thread context 15605 */ 15606 15607 static void 15608 sd_free_rqs(struct sd_lun *un) 15609 { 15610 ASSERT(un != NULL); 15611 15612 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15613 15614 /* 15615 * If consistent memory is bound to a scsi_pkt, the pkt 15616 * has to be destroyed *before* freeing the consistent memory. 15617 * Don't change the sequence of this operations. 15618 * scsi_destroy_pkt() might access memory, which isn't allowed, 15619 * after it was freed in scsi_free_consistent_buf(). 15620 */ 15621 if (un->un_rqs_pktp != NULL) { 15622 scsi_destroy_pkt(un->un_rqs_pktp); 15623 un->un_rqs_pktp = NULL; 15624 } 15625 15626 if (un->un_rqs_bp != NULL) { 15627 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 15628 scsi_free_consistent_buf(un->un_rqs_bp); 15629 un->un_rqs_bp = NULL; 15630 } 15631 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15632 } 15633 15634 15635 15636 /* 15637 * Function: sd_reduce_throttle 15638 * 15639 * Description: Reduces the maximun # of outstanding commands on a 15640 * target to the current number of outstanding commands. 15641 * Queues a tiemout(9F) callback to restore the limit 15642 * after a specified interval has elapsed. 15643 * Typically used when we get a TRAN_BUSY return code 15644 * back from scsi_transport(). 15645 * 15646 * Arguments: un - ptr to the sd_lun softstate struct 15647 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15648 * 15649 * Context: May be called from interrupt context 15650 */ 15651 15652 static void 15653 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15654 { 15655 ASSERT(un != NULL); 15656 ASSERT(mutex_owned(SD_MUTEX(un))); 15657 ASSERT(un->un_ncmds_in_transport >= 0); 15658 15659 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15660 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15661 un, un->un_throttle, un->un_ncmds_in_transport); 15662 15663 if (un->un_throttle > 1) { 15664 if (un->un_f_use_adaptive_throttle == TRUE) { 15665 switch (throttle_type) { 15666 case SD_THROTTLE_TRAN_BUSY: 15667 if (un->un_busy_throttle == 0) { 15668 un->un_busy_throttle = un->un_throttle; 15669 } 15670 break; 15671 case SD_THROTTLE_QFULL: 15672 un->un_busy_throttle = 0; 15673 break; 15674 default: 15675 ASSERT(FALSE); 15676 } 15677 15678 if (un->un_ncmds_in_transport > 0) { 15679 un->un_throttle = un->un_ncmds_in_transport; 15680 } 15681 } else { 15682 if (un->un_ncmds_in_transport == 0) { 15683 un->un_throttle = 1; 15684 } else { 15685 un->un_throttle = un->un_ncmds_in_transport; 15686 } 15687 } 15688 } 15689 15690 /* Reschedule the timeout if none is currently active */ 15691 if (un->un_reset_throttle_timeid == NULL) { 15692 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15693 un, sd_reset_throttle_timeout); 15694 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15695 "sd_reduce_throttle: timeout scheduled!\n"); 15696 } 15697 15698 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15699 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15700 } 15701 15702 15703 15704 /* 15705 * Function: sd_restore_throttle 15706 * 15707 * Description: Callback function for timeout(9F). Resets the current 15708 * value of un->un_throttle to its default. 15709 * 15710 * Arguments: arg - pointer to associated softstate for the device. 15711 * 15712 * Context: May be called from interrupt context 15713 */ 15714 15715 static void 15716 sd_restore_throttle(void *arg) 15717 { 15718 struct sd_lun *un = arg; 15719 15720 ASSERT(un != NULL); 15721 ASSERT(!mutex_owned(SD_MUTEX(un))); 15722 15723 mutex_enter(SD_MUTEX(un)); 15724 15725 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15726 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15727 15728 un->un_reset_throttle_timeid = NULL; 15729 15730 if (un->un_f_use_adaptive_throttle == TRUE) { 15731 /* 15732 * If un_busy_throttle is nonzero, then it contains the 15733 * value that un_throttle was when we got a TRAN_BUSY back 15734 * from scsi_transport(). We want to revert back to this 15735 * value. 15736 */ 15737 if (un->un_busy_throttle > 0) { 15738 un->un_throttle = un->un_busy_throttle; 15739 un->un_busy_throttle = 0; 15740 } 15741 15742 /* 15743 * If un_throttle has fallen below the low-water mark, we 15744 * restore the maximum value here (and allow it to ratchet 15745 * down again if necessary). 15746 */ 15747 if (un->un_throttle < un->un_min_throttle) { 15748 un->un_throttle = un->un_saved_throttle; 15749 } 15750 } else { 15751 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15752 "restoring limit from 0x%x to 0x%x\n", 15753 un->un_throttle, un->un_saved_throttle); 15754 un->un_throttle = un->un_saved_throttle; 15755 } 15756 15757 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15758 "sd_restore_throttle: calling sd_start_cmds!\n"); 15759 15760 sd_start_cmds(un, NULL); 15761 15762 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15763 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15764 un, un->un_throttle); 15765 15766 mutex_exit(SD_MUTEX(un)); 15767 15768 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15769 } 15770 15771 /* 15772 * Function: sdrunout 15773 * 15774 * Description: Callback routine for scsi_init_pkt when a resource allocation 15775 * fails. 15776 * 15777 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15778 * soft state instance. 15779 * 15780 * Return Code: The scsi_init_pkt routine allows for the callback function to 15781 * return a 0 indicating the callback should be rescheduled or a 1 15782 * indicating not to reschedule. This routine always returns 1 15783 * because the driver always provides a callback function to 15784 * scsi_init_pkt. This results in a callback always being scheduled 15785 * (via the scsi_init_pkt callback implementation) if a resource 15786 * failure occurs. 15787 * 15788 * Context: This callback function may not block or call routines that block 15789 * 15790 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15791 * request persisting at the head of the list which cannot be 15792 * satisfied even after multiple retries. In the future the driver 15793 * may implement some time of maximum runout count before failing 15794 * an I/O. 15795 */ 15796 15797 static int 15798 sdrunout(caddr_t arg) 15799 { 15800 struct sd_lun *un = (struct sd_lun *)arg; 15801 15802 ASSERT(un != NULL); 15803 ASSERT(!mutex_owned(SD_MUTEX(un))); 15804 15805 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15806 15807 mutex_enter(SD_MUTEX(un)); 15808 sd_start_cmds(un, NULL); 15809 mutex_exit(SD_MUTEX(un)); 15810 /* 15811 * This callback routine always returns 1 (i.e. do not reschedule) 15812 * because we always specify sdrunout as the callback handler for 15813 * scsi_init_pkt inside the call to sd_start_cmds. 15814 */ 15815 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15816 return (1); 15817 } 15818 15819 15820 /* 15821 * Function: sdintr 15822 * 15823 * Description: Completion callback routine for scsi_pkt(9S) structs 15824 * sent to the HBA driver via scsi_transport(9F). 15825 * 15826 * Context: Interrupt context 15827 */ 15828 15829 static void 15830 sdintr(struct scsi_pkt *pktp) 15831 { 15832 struct buf *bp; 15833 struct sd_xbuf *xp; 15834 struct sd_lun *un; 15835 15836 ASSERT(pktp != NULL); 15837 bp = (struct buf *)pktp->pkt_private; 15838 ASSERT(bp != NULL); 15839 xp = SD_GET_XBUF(bp); 15840 ASSERT(xp != NULL); 15841 ASSERT(xp->xb_pktp != NULL); 15842 un = SD_GET_UN(bp); 15843 ASSERT(un != NULL); 15844 ASSERT(!mutex_owned(SD_MUTEX(un))); 15845 15846 #ifdef SD_FAULT_INJECTION 15847 15848 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15849 /* SD FaultInjection */ 15850 sd_faultinjection(pktp); 15851 15852 #endif /* SD_FAULT_INJECTION */ 15853 15854 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15855 " xp:0x%p, un:0x%p\n", bp, xp, un); 15856 15857 mutex_enter(SD_MUTEX(un)); 15858 15859 /* Reduce the count of the #commands currently in transport */ 15860 un->un_ncmds_in_transport--; 15861 ASSERT(un->un_ncmds_in_transport >= 0); 15862 15863 /* Increment counter to indicate that the callback routine is active */ 15864 un->un_in_callback++; 15865 15866 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15867 15868 #ifdef SDDEBUG 15869 if (bp == un->un_retry_bp) { 15870 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15871 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15872 un, un->un_retry_bp, un->un_ncmds_in_transport); 15873 } 15874 #endif 15875 15876 /* 15877 * If pkt_reason is CMD_DEV_GONE, just fail the command 15878 */ 15879 if (pktp->pkt_reason == CMD_DEV_GONE) { 15880 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15881 "Device is gone\n"); 15882 sd_return_failed_command(un, bp, EIO); 15883 goto exit; 15884 } 15885 15886 /* 15887 * First see if the pkt has auto-request sense data with it.... 15888 * Look at the packet state first so we don't take a performance 15889 * hit looking at the arq enabled flag unless absolutely necessary. 15890 */ 15891 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15892 (un->un_f_arq_enabled == TRUE)) { 15893 /* 15894 * The HBA did an auto request sense for this command so check 15895 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15896 * driver command that should not be retried. 15897 */ 15898 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15899 /* 15900 * Save the relevant sense info into the xp for the 15901 * original cmd. 15902 */ 15903 struct scsi_arq_status *asp; 15904 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15905 xp->xb_sense_status = 15906 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15907 xp->xb_sense_state = asp->sts_rqpkt_state; 15908 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15909 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15910 min(sizeof (struct scsi_extended_sense), 15911 SENSE_LENGTH)); 15912 15913 /* fail the command */ 15914 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15915 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15916 sd_return_failed_command(un, bp, EIO); 15917 goto exit; 15918 } 15919 15920 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15921 /* 15922 * We want to either retry or fail this command, so free 15923 * the DMA resources here. If we retry the command then 15924 * the DMA resources will be reallocated in sd_start_cmds(). 15925 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15926 * causes the *entire* transfer to start over again from the 15927 * beginning of the request, even for PARTIAL chunks that 15928 * have already transferred successfully. 15929 */ 15930 if ((un->un_f_is_fibre == TRUE) && 15931 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15932 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15933 scsi_dmafree(pktp); 15934 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15935 } 15936 #endif 15937 15938 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15939 "sdintr: arq done, sd_handle_auto_request_sense\n"); 15940 15941 sd_handle_auto_request_sense(un, bp, xp, pktp); 15942 goto exit; 15943 } 15944 15945 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15946 if (pktp->pkt_flags & FLAG_SENSING) { 15947 /* This pktp is from the unit's REQUEST_SENSE command */ 15948 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15949 "sdintr: sd_handle_request_sense\n"); 15950 sd_handle_request_sense(un, bp, xp, pktp); 15951 goto exit; 15952 } 15953 15954 /* 15955 * Check to see if the command successfully completed as requested; 15956 * this is the most common case (and also the hot performance path). 15957 * 15958 * Requirements for successful completion are: 15959 * pkt_reason is CMD_CMPLT and packet status is status good. 15960 * In addition: 15961 * - A residual of zero indicates successful completion no matter what 15962 * the command is. 15963 * - If the residual is not zero and the command is not a read or 15964 * write, then it's still defined as successful completion. In other 15965 * words, if the command is a read or write the residual must be 15966 * zero for successful completion. 15967 * - If the residual is not zero and the command is a read or 15968 * write, and it's a USCSICMD, then it's still defined as 15969 * successful completion. 15970 */ 15971 if ((pktp->pkt_reason == CMD_CMPLT) && 15972 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15973 15974 /* 15975 * Since this command is returned with a good status, we 15976 * can reset the count for Sonoma failover. 15977 */ 15978 un->un_sonoma_failure_count = 0; 15979 15980 /* 15981 * Return all USCSI commands on good status 15982 */ 15983 if (pktp->pkt_resid == 0) { 15984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15985 "sdintr: returning command for resid == 0\n"); 15986 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15987 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15988 SD_UPDATE_B_RESID(bp, pktp); 15989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15990 "sdintr: returning command for resid != 0\n"); 15991 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15992 SD_UPDATE_B_RESID(bp, pktp); 15993 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15994 "sdintr: returning uscsi command\n"); 15995 } else { 15996 goto not_successful; 15997 } 15998 sd_return_command(un, bp); 15999 16000 /* 16001 * Decrement counter to indicate that the callback routine 16002 * is done. 16003 */ 16004 un->un_in_callback--; 16005 ASSERT(un->un_in_callback >= 0); 16006 mutex_exit(SD_MUTEX(un)); 16007 16008 return; 16009 } 16010 16011 not_successful: 16012 16013 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16014 /* 16015 * The following is based upon knowledge of the underlying transport 16016 * and its use of DMA resources. This code should be removed when 16017 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16018 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16019 * and sd_start_cmds(). 16020 * 16021 * Free any DMA resources associated with this command if there 16022 * is a chance it could be retried or enqueued for later retry. 16023 * If we keep the DMA binding then mpxio cannot reissue the 16024 * command on another path whenever a path failure occurs. 16025 * 16026 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16027 * causes the *entire* transfer to start over again from the 16028 * beginning of the request, even for PARTIAL chunks that 16029 * have already transferred successfully. 16030 * 16031 * This is only done for non-uscsi commands (and also skipped for the 16032 * driver's internal RQS command). Also just do this for Fibre Channel 16033 * devices as these are the only ones that support mpxio. 16034 */ 16035 if ((un->un_f_is_fibre == TRUE) && 16036 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16037 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16038 scsi_dmafree(pktp); 16039 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16040 } 16041 #endif 16042 16043 /* 16044 * The command did not successfully complete as requested so check 16045 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16046 * driver command that should not be retried so just return. If 16047 * FLAG_DIAGNOSE is not set the error will be processed below. 16048 */ 16049 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16050 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16051 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16052 /* 16053 * Issue a request sense if a check condition caused the error 16054 * (we handle the auto request sense case above), otherwise 16055 * just fail the command. 16056 */ 16057 if ((pktp->pkt_reason == CMD_CMPLT) && 16058 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16059 sd_send_request_sense_command(un, bp, pktp); 16060 } else { 16061 sd_return_failed_command(un, bp, EIO); 16062 } 16063 goto exit; 16064 } 16065 16066 /* 16067 * The command did not successfully complete as requested so process 16068 * the error, retry, and/or attempt recovery. 16069 */ 16070 switch (pktp->pkt_reason) { 16071 case CMD_CMPLT: 16072 switch (SD_GET_PKT_STATUS(pktp)) { 16073 case STATUS_GOOD: 16074 /* 16075 * The command completed successfully with a non-zero 16076 * residual 16077 */ 16078 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16079 "sdintr: STATUS_GOOD \n"); 16080 sd_pkt_status_good(un, bp, xp, pktp); 16081 break; 16082 16083 case STATUS_CHECK: 16084 case STATUS_TERMINATED: 16085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16086 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16087 sd_pkt_status_check_condition(un, bp, xp, pktp); 16088 break; 16089 16090 case STATUS_BUSY: 16091 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16092 "sdintr: STATUS_BUSY\n"); 16093 sd_pkt_status_busy(un, bp, xp, pktp); 16094 break; 16095 16096 case STATUS_RESERVATION_CONFLICT: 16097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16098 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16099 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16100 break; 16101 16102 case STATUS_QFULL: 16103 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16104 "sdintr: STATUS_QFULL\n"); 16105 sd_pkt_status_qfull(un, bp, xp, pktp); 16106 break; 16107 16108 case STATUS_MET: 16109 case STATUS_INTERMEDIATE: 16110 case STATUS_SCSI2: 16111 case STATUS_INTERMEDIATE_MET: 16112 case STATUS_ACA_ACTIVE: 16113 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16114 "Unexpected SCSI status received: 0x%x\n", 16115 SD_GET_PKT_STATUS(pktp)); 16116 sd_return_failed_command(un, bp, EIO); 16117 break; 16118 16119 default: 16120 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16121 "Invalid SCSI status received: 0x%x\n", 16122 SD_GET_PKT_STATUS(pktp)); 16123 sd_return_failed_command(un, bp, EIO); 16124 break; 16125 16126 } 16127 break; 16128 16129 case CMD_INCOMPLETE: 16130 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16131 "sdintr: CMD_INCOMPLETE\n"); 16132 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16133 break; 16134 case CMD_TRAN_ERR: 16135 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16136 "sdintr: CMD_TRAN_ERR\n"); 16137 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16138 break; 16139 case CMD_RESET: 16140 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16141 "sdintr: CMD_RESET \n"); 16142 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16143 break; 16144 case CMD_ABORTED: 16145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16146 "sdintr: CMD_ABORTED \n"); 16147 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16148 break; 16149 case CMD_TIMEOUT: 16150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16151 "sdintr: CMD_TIMEOUT\n"); 16152 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16153 break; 16154 case CMD_UNX_BUS_FREE: 16155 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16156 "sdintr: CMD_UNX_BUS_FREE \n"); 16157 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16158 break; 16159 case CMD_TAG_REJECT: 16160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16161 "sdintr: CMD_TAG_REJECT\n"); 16162 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16163 break; 16164 default: 16165 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16166 "sdintr: default\n"); 16167 sd_pkt_reason_default(un, bp, xp, pktp); 16168 break; 16169 } 16170 16171 exit: 16172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16173 16174 /* Decrement counter to indicate that the callback routine is done. */ 16175 un->un_in_callback--; 16176 ASSERT(un->un_in_callback >= 0); 16177 16178 /* 16179 * At this point, the pkt has been dispatched, ie, it is either 16180 * being re-tried or has been returned to its caller and should 16181 * not be referenced. 16182 */ 16183 16184 mutex_exit(SD_MUTEX(un)); 16185 } 16186 16187 16188 /* 16189 * Function: sd_print_incomplete_msg 16190 * 16191 * Description: Prints the error message for a CMD_INCOMPLETE error. 16192 * 16193 * Arguments: un - ptr to associated softstate for the device. 16194 * bp - ptr to the buf(9S) for the command. 16195 * arg - message string ptr 16196 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16197 * or SD_NO_RETRY_ISSUED. 16198 * 16199 * Context: May be called under interrupt context 16200 */ 16201 16202 static void 16203 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16204 { 16205 struct scsi_pkt *pktp; 16206 char *msgp; 16207 char *cmdp = arg; 16208 16209 ASSERT(un != NULL); 16210 ASSERT(mutex_owned(SD_MUTEX(un))); 16211 ASSERT(bp != NULL); 16212 ASSERT(arg != NULL); 16213 pktp = SD_GET_PKTP(bp); 16214 ASSERT(pktp != NULL); 16215 16216 switch (code) { 16217 case SD_DELAYED_RETRY_ISSUED: 16218 case SD_IMMEDIATE_RETRY_ISSUED: 16219 msgp = "retrying"; 16220 break; 16221 case SD_NO_RETRY_ISSUED: 16222 default: 16223 msgp = "giving up"; 16224 break; 16225 } 16226 16227 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16228 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16229 "incomplete %s- %s\n", cmdp, msgp); 16230 } 16231 } 16232 16233 16234 16235 /* 16236 * Function: sd_pkt_status_good 16237 * 16238 * Description: Processing for a STATUS_GOOD code in pkt_status. 16239 * 16240 * Context: May be called under interrupt context 16241 */ 16242 16243 static void 16244 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16245 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16246 { 16247 char *cmdp; 16248 16249 ASSERT(un != NULL); 16250 ASSERT(mutex_owned(SD_MUTEX(un))); 16251 ASSERT(bp != NULL); 16252 ASSERT(xp != NULL); 16253 ASSERT(pktp != NULL); 16254 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16255 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16256 ASSERT(pktp->pkt_resid != 0); 16257 16258 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16259 16260 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16261 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16262 case SCMD_READ: 16263 cmdp = "read"; 16264 break; 16265 case SCMD_WRITE: 16266 cmdp = "write"; 16267 break; 16268 default: 16269 SD_UPDATE_B_RESID(bp, pktp); 16270 sd_return_command(un, bp); 16271 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16272 return; 16273 } 16274 16275 /* 16276 * See if we can retry the read/write, preferrably immediately. 16277 * If retries are exhaused, then sd_retry_command() will update 16278 * the b_resid count. 16279 */ 16280 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16281 cmdp, EIO, (clock_t)0, NULL); 16282 16283 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16284 } 16285 16286 16287 16288 16289 16290 /* 16291 * Function: sd_handle_request_sense 16292 * 16293 * Description: Processing for non-auto Request Sense command. 16294 * 16295 * Arguments: un - ptr to associated softstate 16296 * sense_bp - ptr to buf(9S) for the RQS command 16297 * sense_xp - ptr to the sd_xbuf for the RQS command 16298 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16299 * 16300 * Context: May be called under interrupt context 16301 */ 16302 16303 static void 16304 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16305 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16306 { 16307 struct buf *cmd_bp; /* buf for the original command */ 16308 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16309 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16310 16311 ASSERT(un != NULL); 16312 ASSERT(mutex_owned(SD_MUTEX(un))); 16313 ASSERT(sense_bp != NULL); 16314 ASSERT(sense_xp != NULL); 16315 ASSERT(sense_pktp != NULL); 16316 16317 /* 16318 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16319 * RQS command and not the original command. 16320 */ 16321 ASSERT(sense_pktp == un->un_rqs_pktp); 16322 ASSERT(sense_bp == un->un_rqs_bp); 16323 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16324 (FLAG_SENSING | FLAG_HEAD)); 16325 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16326 FLAG_SENSING) == FLAG_SENSING); 16327 16328 /* These are the bp, xp, and pktp for the original command */ 16329 cmd_bp = sense_xp->xb_sense_bp; 16330 cmd_xp = SD_GET_XBUF(cmd_bp); 16331 cmd_pktp = SD_GET_PKTP(cmd_bp); 16332 16333 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16334 /* 16335 * The REQUEST SENSE command failed. Release the REQUEST 16336 * SENSE command for re-use, get back the bp for the original 16337 * command, and attempt to re-try the original command if 16338 * FLAG_DIAGNOSE is not set in the original packet. 16339 */ 16340 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16341 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16342 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16343 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16344 NULL, NULL, EIO, (clock_t)0, NULL); 16345 return; 16346 } 16347 } 16348 16349 /* 16350 * Save the relevant sense info into the xp for the original cmd. 16351 * 16352 * Note: if the request sense failed the state info will be zero 16353 * as set in sd_mark_rqs_busy() 16354 */ 16355 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16356 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16357 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16358 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 16359 16360 /* 16361 * Free up the RQS command.... 16362 * NOTE: 16363 * Must do this BEFORE calling sd_validate_sense_data! 16364 * sd_validate_sense_data may return the original command in 16365 * which case the pkt will be freed and the flags can no 16366 * longer be touched. 16367 * SD_MUTEX is held through this process until the command 16368 * is dispatched based upon the sense data, so there are 16369 * no race conditions. 16370 */ 16371 (void) sd_mark_rqs_idle(un, sense_xp); 16372 16373 /* 16374 * For a retryable command see if we have valid sense data, if so then 16375 * turn it over to sd_decode_sense() to figure out the right course of 16376 * action. Just fail a non-retryable command. 16377 */ 16378 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16379 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 16380 SD_SENSE_DATA_IS_VALID) { 16381 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16382 } 16383 } else { 16384 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16385 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16386 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16387 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16388 sd_return_failed_command(un, cmd_bp, EIO); 16389 } 16390 } 16391 16392 16393 16394 16395 /* 16396 * Function: sd_handle_auto_request_sense 16397 * 16398 * Description: Processing for auto-request sense information. 16399 * 16400 * Arguments: un - ptr to associated softstate 16401 * bp - ptr to buf(9S) for the command 16402 * xp - ptr to the sd_xbuf for the command 16403 * pktp - ptr to the scsi_pkt(9S) for the command 16404 * 16405 * Context: May be called under interrupt context 16406 */ 16407 16408 static void 16409 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16410 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16411 { 16412 struct scsi_arq_status *asp; 16413 16414 ASSERT(un != NULL); 16415 ASSERT(mutex_owned(SD_MUTEX(un))); 16416 ASSERT(bp != NULL); 16417 ASSERT(xp != NULL); 16418 ASSERT(pktp != NULL); 16419 ASSERT(pktp != un->un_rqs_pktp); 16420 ASSERT(bp != un->un_rqs_bp); 16421 16422 /* 16423 * For auto-request sense, we get a scsi_arq_status back from 16424 * the HBA, with the sense data in the sts_sensedata member. 16425 * The pkt_scbp of the packet points to this scsi_arq_status. 16426 */ 16427 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16428 16429 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16430 /* 16431 * The auto REQUEST SENSE failed; see if we can re-try 16432 * the original command. 16433 */ 16434 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16435 "auto request sense failed (reason=%s)\n", 16436 scsi_rname(asp->sts_rqpkt_reason)); 16437 16438 sd_reset_target(un, pktp); 16439 16440 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16441 NULL, NULL, EIO, (clock_t)0, NULL); 16442 return; 16443 } 16444 16445 /* Save the relevant sense info into the xp for the original cmd. */ 16446 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16447 xp->xb_sense_state = asp->sts_rqpkt_state; 16448 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16449 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16450 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 16451 16452 /* 16453 * See if we have valid sense data, if so then turn it over to 16454 * sd_decode_sense() to figure out the right course of action. 16455 */ 16456 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 16457 sd_decode_sense(un, bp, xp, pktp); 16458 } 16459 } 16460 16461 16462 /* 16463 * Function: sd_print_sense_failed_msg 16464 * 16465 * Description: Print log message when RQS has failed. 16466 * 16467 * Arguments: un - ptr to associated softstate 16468 * bp - ptr to buf(9S) for the command 16469 * arg - generic message string ptr 16470 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16471 * or SD_NO_RETRY_ISSUED 16472 * 16473 * Context: May be called from interrupt context 16474 */ 16475 16476 static void 16477 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16478 int code) 16479 { 16480 char *msgp = arg; 16481 16482 ASSERT(un != NULL); 16483 ASSERT(mutex_owned(SD_MUTEX(un))); 16484 ASSERT(bp != NULL); 16485 16486 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16487 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16488 } 16489 } 16490 16491 16492 /* 16493 * Function: sd_validate_sense_data 16494 * 16495 * Description: Check the given sense data for validity. 16496 * If the sense data is not valid, the command will 16497 * be either failed or retried! 16498 * 16499 * Return Code: SD_SENSE_DATA_IS_INVALID 16500 * SD_SENSE_DATA_IS_VALID 16501 * 16502 * Context: May be called from interrupt context 16503 */ 16504 16505 static int 16506 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 16507 { 16508 struct scsi_extended_sense *esp; 16509 struct scsi_pkt *pktp; 16510 size_t actual_len; 16511 char *msgp = NULL; 16512 16513 ASSERT(un != NULL); 16514 ASSERT(mutex_owned(SD_MUTEX(un))); 16515 ASSERT(bp != NULL); 16516 ASSERT(bp != un->un_rqs_bp); 16517 ASSERT(xp != NULL); 16518 16519 pktp = SD_GET_PKTP(bp); 16520 ASSERT(pktp != NULL); 16521 16522 /* 16523 * Check the status of the RQS command (auto or manual). 16524 */ 16525 switch (xp->xb_sense_status & STATUS_MASK) { 16526 case STATUS_GOOD: 16527 break; 16528 16529 case STATUS_RESERVATION_CONFLICT: 16530 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16531 return (SD_SENSE_DATA_IS_INVALID); 16532 16533 case STATUS_BUSY: 16534 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16535 "Busy Status on REQUEST SENSE\n"); 16536 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16537 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16538 return (SD_SENSE_DATA_IS_INVALID); 16539 16540 case STATUS_QFULL: 16541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16542 "QFULL Status on REQUEST SENSE\n"); 16543 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16544 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16545 return (SD_SENSE_DATA_IS_INVALID); 16546 16547 case STATUS_CHECK: 16548 case STATUS_TERMINATED: 16549 msgp = "Check Condition on REQUEST SENSE\n"; 16550 goto sense_failed; 16551 16552 default: 16553 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16554 goto sense_failed; 16555 } 16556 16557 /* 16558 * See if we got the minimum required amount of sense data. 16559 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16560 * or less. 16561 */ 16562 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 16563 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16564 (actual_len == 0)) { 16565 msgp = "Request Sense couldn't get sense data\n"; 16566 goto sense_failed; 16567 } 16568 16569 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16570 msgp = "Not enough sense information\n"; 16571 goto sense_failed; 16572 } 16573 16574 /* 16575 * We require the extended sense data 16576 */ 16577 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16578 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16579 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16580 static char tmp[8]; 16581 static char buf[148]; 16582 char *p = (char *)(xp->xb_sense_data); 16583 int i; 16584 16585 mutex_enter(&sd_sense_mutex); 16586 (void) strcpy(buf, "undecodable sense information:"); 16587 for (i = 0; i < actual_len; i++) { 16588 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16589 (void) strcpy(&buf[strlen(buf)], tmp); 16590 } 16591 i = strlen(buf); 16592 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16593 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16594 mutex_exit(&sd_sense_mutex); 16595 } 16596 /* Note: Legacy behavior, fail the command with no retry */ 16597 sd_return_failed_command(un, bp, EIO); 16598 return (SD_SENSE_DATA_IS_INVALID); 16599 } 16600 16601 /* 16602 * Check that es_code is valid (es_class concatenated with es_code 16603 * make up the "response code" field. es_class will always be 7, so 16604 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16605 * format. 16606 */ 16607 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16608 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16609 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16610 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16611 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16612 goto sense_failed; 16613 } 16614 16615 return (SD_SENSE_DATA_IS_VALID); 16616 16617 sense_failed: 16618 /* 16619 * If the request sense failed (for whatever reason), attempt 16620 * to retry the original command. 16621 */ 16622 #if defined(__i386) || defined(__amd64) 16623 /* 16624 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16625 * sddef.h for Sparc platform, and x86 uses 1 binary 16626 * for both SCSI/FC. 16627 * The SD_RETRY_DELAY value need to be adjusted here 16628 * when SD_RETRY_DELAY change in sddef.h 16629 */ 16630 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16631 sd_print_sense_failed_msg, msgp, EIO, 16632 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16633 #else 16634 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16635 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16636 #endif 16637 16638 return (SD_SENSE_DATA_IS_INVALID); 16639 } 16640 16641 16642 16643 /* 16644 * Function: sd_decode_sense 16645 * 16646 * Description: Take recovery action(s) when SCSI Sense Data is received. 16647 * 16648 * Context: Interrupt context. 16649 */ 16650 16651 static void 16652 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16653 struct scsi_pkt *pktp) 16654 { 16655 struct scsi_extended_sense *esp; 16656 struct scsi_descr_sense_hdr *sdsp; 16657 uint8_t asc, ascq, sense_key; 16658 16659 ASSERT(un != NULL); 16660 ASSERT(mutex_owned(SD_MUTEX(un))); 16661 ASSERT(bp != NULL); 16662 ASSERT(bp != un->un_rqs_bp); 16663 ASSERT(xp != NULL); 16664 ASSERT(pktp != NULL); 16665 16666 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16667 16668 switch (esp->es_code) { 16669 case CODE_FMT_DESCR_CURRENT: 16670 case CODE_FMT_DESCR_DEFERRED: 16671 sdsp = (struct scsi_descr_sense_hdr *)xp->xb_sense_data; 16672 sense_key = sdsp->ds_key; 16673 asc = sdsp->ds_add_code; 16674 ascq = sdsp->ds_qual_code; 16675 break; 16676 case CODE_FMT_VENDOR_SPECIFIC: 16677 case CODE_FMT_FIXED_CURRENT: 16678 case CODE_FMT_FIXED_DEFERRED: 16679 default: 16680 sense_key = esp->es_key; 16681 asc = esp->es_add_code; 16682 ascq = esp->es_qual_code; 16683 break; 16684 } 16685 16686 switch (sense_key) { 16687 case KEY_NO_SENSE: 16688 sd_sense_key_no_sense(un, bp, xp, pktp); 16689 break; 16690 case KEY_RECOVERABLE_ERROR: 16691 sd_sense_key_recoverable_error(un, asc, bp, xp, pktp); 16692 break; 16693 case KEY_NOT_READY: 16694 sd_sense_key_not_ready(un, asc, ascq, bp, xp, pktp); 16695 break; 16696 case KEY_MEDIUM_ERROR: 16697 case KEY_HARDWARE_ERROR: 16698 sd_sense_key_medium_or_hardware_error(un, 16699 sense_key, asc, bp, xp, pktp); 16700 break; 16701 case KEY_ILLEGAL_REQUEST: 16702 sd_sense_key_illegal_request(un, bp, xp, pktp); 16703 break; 16704 case KEY_UNIT_ATTENTION: 16705 sd_sense_key_unit_attention(un, asc, bp, xp, pktp); 16706 break; 16707 case KEY_WRITE_PROTECT: 16708 case KEY_VOLUME_OVERFLOW: 16709 case KEY_MISCOMPARE: 16710 sd_sense_key_fail_command(un, bp, xp, pktp); 16711 break; 16712 case KEY_BLANK_CHECK: 16713 sd_sense_key_blank_check(un, bp, xp, pktp); 16714 break; 16715 case KEY_ABORTED_COMMAND: 16716 sd_sense_key_aborted_command(un, bp, xp, pktp); 16717 break; 16718 case KEY_VENDOR_UNIQUE: 16719 case KEY_COPY_ABORTED: 16720 case KEY_EQUAL: 16721 case KEY_RESERVED: 16722 default: 16723 sd_sense_key_default(un, sense_key, bp, xp, pktp); 16724 break; 16725 } 16726 } 16727 16728 16729 /* 16730 * Function: sd_dump_memory 16731 * 16732 * Description: Debug logging routine to print the contents of a user provided 16733 * buffer. The output of the buffer is broken up into 256 byte 16734 * segments due to a size constraint of the scsi_log. 16735 * implementation. 16736 * 16737 * Arguments: un - ptr to softstate 16738 * comp - component mask 16739 * title - "title" string to preceed data when printed 16740 * data - ptr to data block to be printed 16741 * len - size of data block to be printed 16742 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16743 * 16744 * Context: May be called from interrupt context 16745 */ 16746 16747 #define SD_DUMP_MEMORY_BUF_SIZE 256 16748 16749 static char *sd_dump_format_string[] = { 16750 " 0x%02x", 16751 " %c" 16752 }; 16753 16754 static void 16755 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16756 int len, int fmt) 16757 { 16758 int i, j; 16759 int avail_count; 16760 int start_offset; 16761 int end_offset; 16762 size_t entry_len; 16763 char *bufp; 16764 char *local_buf; 16765 char *format_string; 16766 16767 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16768 16769 /* 16770 * In the debug version of the driver, this function is called from a 16771 * number of places which are NOPs in the release driver. 16772 * The debug driver therefore has additional methods of filtering 16773 * debug output. 16774 */ 16775 #ifdef SDDEBUG 16776 /* 16777 * In the debug version of the driver we can reduce the amount of debug 16778 * messages by setting sd_error_level to something other than 16779 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16780 * sd_component_mask. 16781 */ 16782 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16783 (sd_error_level != SCSI_ERR_ALL)) { 16784 return; 16785 } 16786 if (((sd_component_mask & comp) == 0) || 16787 (sd_error_level != SCSI_ERR_ALL)) { 16788 return; 16789 } 16790 #else 16791 if (sd_error_level != SCSI_ERR_ALL) { 16792 return; 16793 } 16794 #endif 16795 16796 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16797 bufp = local_buf; 16798 /* 16799 * Available length is the length of local_buf[], minus the 16800 * length of the title string, minus one for the ":", minus 16801 * one for the newline, minus one for the NULL terminator. 16802 * This gives the #bytes available for holding the printed 16803 * values from the given data buffer. 16804 */ 16805 if (fmt == SD_LOG_HEX) { 16806 format_string = sd_dump_format_string[0]; 16807 } else /* SD_LOG_CHAR */ { 16808 format_string = sd_dump_format_string[1]; 16809 } 16810 /* 16811 * Available count is the number of elements from the given 16812 * data buffer that we can fit into the available length. 16813 * This is based upon the size of the format string used. 16814 * Make one entry and find it's size. 16815 */ 16816 (void) sprintf(bufp, format_string, data[0]); 16817 entry_len = strlen(bufp); 16818 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16819 16820 j = 0; 16821 while (j < len) { 16822 bufp = local_buf; 16823 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16824 start_offset = j; 16825 16826 end_offset = start_offset + avail_count; 16827 16828 (void) sprintf(bufp, "%s:", title); 16829 bufp += strlen(bufp); 16830 for (i = start_offset; ((i < end_offset) && (j < len)); 16831 i++, j++) { 16832 (void) sprintf(bufp, format_string, data[i]); 16833 bufp += entry_len; 16834 } 16835 (void) sprintf(bufp, "\n"); 16836 16837 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16838 } 16839 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16840 } 16841 16842 /* 16843 * Function: sd_print_sense_msg 16844 * 16845 * Description: Log a message based upon the given sense data. 16846 * 16847 * Arguments: un - ptr to associated softstate 16848 * bp - ptr to buf(9S) for the command 16849 * arg - ptr to associate sd_sense_info struct 16850 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16851 * or SD_NO_RETRY_ISSUED 16852 * 16853 * Context: May be called from interrupt context 16854 */ 16855 16856 static void 16857 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16858 { 16859 struct sd_xbuf *xp; 16860 struct scsi_pkt *pktp; 16861 struct scsi_extended_sense *sensep; 16862 daddr_t request_blkno; 16863 diskaddr_t err_blkno; 16864 int severity; 16865 int pfa_flag; 16866 int fixed_format = TRUE; 16867 extern struct scsi_key_strings scsi_cmds[]; 16868 16869 ASSERT(un != NULL); 16870 ASSERT(mutex_owned(SD_MUTEX(un))); 16871 ASSERT(bp != NULL); 16872 xp = SD_GET_XBUF(bp); 16873 ASSERT(xp != NULL); 16874 pktp = SD_GET_PKTP(bp); 16875 ASSERT(pktp != NULL); 16876 ASSERT(arg != NULL); 16877 16878 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16879 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16880 16881 if ((code == SD_DELAYED_RETRY_ISSUED) || 16882 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16883 severity = SCSI_ERR_RETRYABLE; 16884 } 16885 16886 /* Use absolute block number for the request block number */ 16887 request_blkno = xp->xb_blkno; 16888 16889 /* 16890 * Now try to get the error block number from the sense data 16891 */ 16892 sensep = (struct scsi_extended_sense *)xp->xb_sense_data; 16893 switch (sensep->es_code) { 16894 case CODE_FMT_DESCR_CURRENT: 16895 case CODE_FMT_DESCR_DEFERRED: 16896 err_blkno = 16897 sd_extract_sense_info_descr( 16898 (struct scsi_descr_sense_hdr *)sensep); 16899 fixed_format = FALSE; 16900 break; 16901 case CODE_FMT_FIXED_CURRENT: 16902 case CODE_FMT_FIXED_DEFERRED: 16903 case CODE_FMT_VENDOR_SPECIFIC: 16904 default: 16905 /* 16906 * With the es_valid bit set, we assume that the error 16907 * blkno is in the sense data. Also, if xp->xb_blkno is 16908 * greater than 0xffffffff then the target *should* have used 16909 * a descriptor sense format (or it shouldn't have set 16910 * the es_valid bit), and we may as well ignore the 16911 * 32-bit value. 16912 */ 16913 if ((sensep->es_valid != 0) && (xp->xb_blkno <= 0xffffffff)) { 16914 err_blkno = (diskaddr_t) 16915 ((sensep->es_info_1 << 24) | 16916 (sensep->es_info_2 << 16) | 16917 (sensep->es_info_3 << 8) | 16918 (sensep->es_info_4)); 16919 } else { 16920 err_blkno = (diskaddr_t)-1; 16921 } 16922 break; 16923 } 16924 16925 if (err_blkno == (diskaddr_t)-1) { 16926 /* 16927 * Without the es_valid bit set (for fixed format) or an 16928 * information descriptor (for descriptor format) we cannot 16929 * be certain of the error blkno, so just use the 16930 * request_blkno. 16931 */ 16932 err_blkno = (diskaddr_t)request_blkno; 16933 } else { 16934 /* 16935 * We retrieved the error block number from the information 16936 * portion of the sense data. 16937 * 16938 * For USCSI commands we are better off using the error 16939 * block no. as the requested block no. (This is the best 16940 * we can estimate.) 16941 */ 16942 if ((SD_IS_BUFIO(xp) == FALSE) && 16943 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 16944 request_blkno = err_blkno; 16945 } 16946 } 16947 16948 /* 16949 * The following will log the buffer contents for the release driver 16950 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 16951 * level is set to verbose. 16952 */ 16953 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16954 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16955 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16956 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16957 16958 if (pfa_flag == FALSE) { 16959 /* This is normally only set for USCSI */ 16960 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16961 return; 16962 } 16963 16964 if ((SD_IS_BUFIO(xp) == TRUE) && 16965 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16966 (severity < sd_error_level))) { 16967 return; 16968 } 16969 } 16970 16971 /* 16972 * If the data is fixed format then check for Sonoma Failover, 16973 * and keep a count of how many failed I/O's. We should not have 16974 * to worry about Sonoma returning descriptor format sense data, 16975 * and asc/ascq are in a different location in descriptor format. 16976 */ 16977 if (fixed_format && 16978 (SD_IS_LSI(un)) && (sensep->es_key == KEY_ILLEGAL_REQUEST) && 16979 (sensep->es_add_code == 0x94) && (sensep->es_qual_code == 0x01)) { 16980 un->un_sonoma_failure_count++; 16981 if (un->un_sonoma_failure_count > 1) { 16982 return; 16983 } 16984 } 16985 16986 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16987 request_blkno, err_blkno, scsi_cmds, sensep, 16988 un->un_additional_codes, NULL); 16989 } 16990 16991 /* 16992 * Function: sd_extract_sense_info_descr 16993 * 16994 * Description: Retrieve "information" field from descriptor format 16995 * sense data. Iterates through each sense descriptor 16996 * looking for the information descriptor and returns 16997 * the information field from that descriptor. 16998 * 16999 * Context: May be called from interrupt context 17000 */ 17001 17002 static diskaddr_t 17003 sd_extract_sense_info_descr(struct scsi_descr_sense_hdr *sdsp) 17004 { 17005 diskaddr_t result; 17006 uint8_t *descr_offset; 17007 int valid_sense_length; 17008 struct scsi_information_sense_descr *isd; 17009 17010 /* 17011 * Initialize result to -1 indicating there is no information 17012 * descriptor 17013 */ 17014 result = (diskaddr_t)-1; 17015 17016 /* 17017 * The first descriptor will immediately follow the header 17018 */ 17019 descr_offset = (uint8_t *)(sdsp+1); /* Pointer arithmetic */ 17020 17021 /* 17022 * Calculate the amount of valid sense data 17023 */ 17024 valid_sense_length = 17025 min((sizeof (struct scsi_descr_sense_hdr) + 17026 sdsp->ds_addl_sense_length), 17027 SENSE_LENGTH); 17028 17029 /* 17030 * Iterate through the list of descriptors, stopping when we 17031 * run out of sense data 17032 */ 17033 while ((descr_offset + sizeof (struct scsi_information_sense_descr)) <= 17034 (uint8_t *)sdsp + valid_sense_length) { 17035 /* 17036 * Check if this is an information descriptor. We can 17037 * use the scsi_information_sense_descr structure as a 17038 * template sense the first two fields are always the 17039 * same 17040 */ 17041 isd = (struct scsi_information_sense_descr *)descr_offset; 17042 if (isd->isd_descr_type == DESCR_INFORMATION) { 17043 /* 17044 * Found an information descriptor. Copy the 17045 * information field. There will only be one 17046 * information descriptor so we can stop looking. 17047 */ 17048 result = 17049 (((diskaddr_t)isd->isd_information[0] << 56) | 17050 ((diskaddr_t)isd->isd_information[1] << 48) | 17051 ((diskaddr_t)isd->isd_information[2] << 40) | 17052 ((diskaddr_t)isd->isd_information[3] << 32) | 17053 ((diskaddr_t)isd->isd_information[4] << 24) | 17054 ((diskaddr_t)isd->isd_information[5] << 16) | 17055 ((diskaddr_t)isd->isd_information[6] << 8) | 17056 ((diskaddr_t)isd->isd_information[7])); 17057 break; 17058 } 17059 17060 /* 17061 * Get pointer to the next descriptor. The "additional 17062 * length" field holds the length of the descriptor except 17063 * for the "type" and "additional length" fields, so 17064 * we need to add 2 to get the total length. 17065 */ 17066 descr_offset += (isd->isd_addl_length + 2); 17067 } 17068 17069 return (result); 17070 } 17071 17072 /* 17073 * Function: sd_sense_key_no_sense 17074 * 17075 * Description: Recovery action when sense data was not received. 17076 * 17077 * Context: May be called from interrupt context 17078 */ 17079 17080 static void 17081 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17082 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17083 { 17084 struct sd_sense_info si; 17085 17086 ASSERT(un != NULL); 17087 ASSERT(mutex_owned(SD_MUTEX(un))); 17088 ASSERT(bp != NULL); 17089 ASSERT(xp != NULL); 17090 ASSERT(pktp != NULL); 17091 17092 si.ssi_severity = SCSI_ERR_FATAL; 17093 si.ssi_pfa_flag = FALSE; 17094 17095 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17096 17097 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17098 &si, EIO, (clock_t)0, NULL); 17099 } 17100 17101 17102 /* 17103 * Function: sd_sense_key_recoverable_error 17104 * 17105 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17106 * 17107 * Context: May be called from interrupt context 17108 */ 17109 17110 static void 17111 sd_sense_key_recoverable_error(struct sd_lun *un, 17112 uint8_t asc, 17113 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17114 { 17115 struct sd_sense_info si; 17116 17117 ASSERT(un != NULL); 17118 ASSERT(mutex_owned(SD_MUTEX(un))); 17119 ASSERT(bp != NULL); 17120 ASSERT(xp != NULL); 17121 ASSERT(pktp != NULL); 17122 17123 /* 17124 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17125 */ 17126 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17127 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17128 si.ssi_severity = SCSI_ERR_INFO; 17129 si.ssi_pfa_flag = TRUE; 17130 } else { 17131 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17132 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17133 si.ssi_severity = SCSI_ERR_RECOVERED; 17134 si.ssi_pfa_flag = FALSE; 17135 } 17136 17137 if (pktp->pkt_resid == 0) { 17138 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17139 sd_return_command(un, bp); 17140 return; 17141 } 17142 17143 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17144 &si, EIO, (clock_t)0, NULL); 17145 } 17146 17147 17148 17149 17150 /* 17151 * Function: sd_sense_key_not_ready 17152 * 17153 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17154 * 17155 * Context: May be called from interrupt context 17156 */ 17157 17158 static void 17159 sd_sense_key_not_ready(struct sd_lun *un, 17160 uint8_t asc, uint8_t ascq, 17161 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17162 { 17163 struct sd_sense_info si; 17164 17165 ASSERT(un != NULL); 17166 ASSERT(mutex_owned(SD_MUTEX(un))); 17167 ASSERT(bp != NULL); 17168 ASSERT(xp != NULL); 17169 ASSERT(pktp != NULL); 17170 17171 si.ssi_severity = SCSI_ERR_FATAL; 17172 si.ssi_pfa_flag = FALSE; 17173 17174 /* 17175 * Update error stats after first NOT READY error. Disks may have 17176 * been powered down and may need to be restarted. For CDROMs, 17177 * report NOT READY errors only if media is present. 17178 */ 17179 if ((ISCD(un) && (un->un_f_geometry_is_valid == TRUE)) || 17180 (xp->xb_retry_count > 0)) { 17181 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17182 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17183 } 17184 17185 /* 17186 * Just fail if the "not ready" retry limit has been reached. 17187 */ 17188 if (xp->xb_retry_count >= un->un_notready_retry_count) { 17189 /* Special check for error message printing for removables. */ 17190 if ((ISREMOVABLE(un)) && (asc == 0x04) && 17191 (ascq >= 0x04)) { 17192 si.ssi_severity = SCSI_ERR_ALL; 17193 } 17194 goto fail_command; 17195 } 17196 17197 /* 17198 * Check the ASC and ASCQ in the sense data as needed, to determine 17199 * what to do. 17200 */ 17201 switch (asc) { 17202 case 0x04: /* LOGICAL UNIT NOT READY */ 17203 /* 17204 * disk drives that don't spin up result in a very long delay 17205 * in format without warning messages. We will log a message 17206 * if the error level is set to verbose. 17207 */ 17208 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17209 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17210 "logical unit not ready, resetting disk\n"); 17211 } 17212 17213 /* 17214 * There are different requirements for CDROMs and disks for 17215 * the number of retries. If a CD-ROM is giving this, it is 17216 * probably reading TOC and is in the process of getting 17217 * ready, so we should keep on trying for a long time to make 17218 * sure that all types of media are taken in account (for 17219 * some media the drive takes a long time to read TOC). For 17220 * disks we do not want to retry this too many times as this 17221 * can cause a long hang in format when the drive refuses to 17222 * spin up (a very common failure). 17223 */ 17224 switch (ascq) { 17225 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17226 /* 17227 * Disk drives frequently refuse to spin up which 17228 * results in a very long hang in format without 17229 * warning messages. 17230 * 17231 * Note: This code preserves the legacy behavior of 17232 * comparing xb_retry_count against zero for fibre 17233 * channel targets instead of comparing against the 17234 * un_reset_retry_count value. The reason for this 17235 * discrepancy has been so utterly lost beneath the 17236 * Sands of Time that even Indiana Jones could not 17237 * find it. 17238 */ 17239 if (un->un_f_is_fibre == TRUE) { 17240 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17241 (xp->xb_retry_count > 0)) && 17242 (un->un_startstop_timeid == NULL)) { 17243 scsi_log(SD_DEVINFO(un), sd_label, 17244 CE_WARN, "logical unit not ready, " 17245 "resetting disk\n"); 17246 sd_reset_target(un, pktp); 17247 } 17248 } else { 17249 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17250 (xp->xb_retry_count > 17251 un->un_reset_retry_count)) && 17252 (un->un_startstop_timeid == NULL)) { 17253 scsi_log(SD_DEVINFO(un), sd_label, 17254 CE_WARN, "logical unit not ready, " 17255 "resetting disk\n"); 17256 sd_reset_target(un, pktp); 17257 } 17258 } 17259 break; 17260 17261 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17262 /* 17263 * If the target is in the process of becoming 17264 * ready, just proceed with the retry. This can 17265 * happen with CD-ROMs that take a long time to 17266 * read TOC after a power cycle or reset. 17267 */ 17268 goto do_retry; 17269 17270 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17271 break; 17272 17273 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17274 /* 17275 * Retries cannot help here so just fail right away. 17276 */ 17277 goto fail_command; 17278 17279 case 0x88: 17280 /* 17281 * Vendor-unique code for T3/T4: it indicates a 17282 * path problem in a mutipathed config, but as far as 17283 * the target driver is concerned it equates to a fatal 17284 * error, so we should just fail the command right away 17285 * (without printing anything to the console). If this 17286 * is not a T3/T4, fall thru to the default recovery 17287 * action. 17288 * T3/T4 is FC only, don't need to check is_fibre 17289 */ 17290 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17291 sd_return_failed_command(un, bp, EIO); 17292 return; 17293 } 17294 /* FALLTHRU */ 17295 17296 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17297 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17298 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17299 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17300 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17301 default: /* Possible future codes in SCSI spec? */ 17302 /* 17303 * For removable-media devices, do not retry if 17304 * ASCQ > 2 as these result mostly from USCSI commands 17305 * on MMC devices issued to check status of an 17306 * operation initiated in immediate mode. Also for 17307 * ASCQ >= 4 do not print console messages as these 17308 * mainly represent a user-initiated operation 17309 * instead of a system failure. 17310 */ 17311 if (ISREMOVABLE(un)) { 17312 si.ssi_severity = SCSI_ERR_ALL; 17313 goto fail_command; 17314 } 17315 break; 17316 } 17317 17318 /* 17319 * As part of our recovery attempt for the NOT READY 17320 * condition, we issue a START STOP UNIT command. However 17321 * we want to wait for a short delay before attempting this 17322 * as there may still be more commands coming back from the 17323 * target with the check condition. To do this we use 17324 * timeout(9F) to call sd_start_stop_unit_callback() after 17325 * the delay interval expires. (sd_start_stop_unit_callback() 17326 * dispatches sd_start_stop_unit_task(), which will issue 17327 * the actual START STOP UNIT command. The delay interval 17328 * is one-half of the delay that we will use to retry the 17329 * command that generated the NOT READY condition. 17330 * 17331 * Note that we could just dispatch sd_start_stop_unit_task() 17332 * from here and allow it to sleep for the delay interval, 17333 * but then we would be tying up the taskq thread 17334 * uncesessarily for the duration of the delay. 17335 * 17336 * Do not issue the START STOP UNIT if the current command 17337 * is already a START STOP UNIT. 17338 */ 17339 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17340 break; 17341 } 17342 17343 /* 17344 * Do not schedule the timeout if one is already pending. 17345 */ 17346 if (un->un_startstop_timeid != NULL) { 17347 SD_INFO(SD_LOG_ERROR, un, 17348 "sd_sense_key_not_ready: restart already issued to" 17349 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17350 ddi_get_instance(SD_DEVINFO(un))); 17351 break; 17352 } 17353 17354 /* 17355 * Schedule the START STOP UNIT command, then queue the command 17356 * for a retry. 17357 * 17358 * Note: A timeout is not scheduled for this retry because we 17359 * want the retry to be serial with the START_STOP_UNIT. The 17360 * retry will be started when the START_STOP_UNIT is completed 17361 * in sd_start_stop_unit_task. 17362 */ 17363 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17364 un, SD_BSY_TIMEOUT / 2); 17365 xp->xb_retry_count++; 17366 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17367 return; 17368 17369 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17370 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17371 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17372 "unit does not respond to selection\n"); 17373 } 17374 break; 17375 17376 case 0x3A: /* MEDIUM NOT PRESENT */ 17377 if (sd_error_level >= SCSI_ERR_FATAL) { 17378 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17379 "Caddy not inserted in drive\n"); 17380 } 17381 17382 sr_ejected(un); 17383 un->un_mediastate = DKIO_EJECTED; 17384 /* The state has changed, inform the media watch routines */ 17385 cv_broadcast(&un->un_state_cv); 17386 /* Just fail if no media is present in the drive. */ 17387 goto fail_command; 17388 17389 default: 17390 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17391 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17392 "Unit not Ready. Additional sense code 0x%x\n", 17393 asc); 17394 } 17395 break; 17396 } 17397 17398 do_retry: 17399 17400 /* 17401 * Retry the command, as some targets may report NOT READY for 17402 * several seconds after being reset. 17403 */ 17404 xp->xb_retry_count++; 17405 si.ssi_severity = SCSI_ERR_RETRYABLE; 17406 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17407 &si, EIO, SD_BSY_TIMEOUT, NULL); 17408 17409 return; 17410 17411 fail_command: 17412 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17413 sd_return_failed_command(un, bp, EIO); 17414 } 17415 17416 17417 17418 /* 17419 * Function: sd_sense_key_medium_or_hardware_error 17420 * 17421 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17422 * sense key. 17423 * 17424 * Context: May be called from interrupt context 17425 */ 17426 17427 static void 17428 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17429 int sense_key, uint8_t asc, 17430 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17431 { 17432 struct sd_sense_info si; 17433 17434 ASSERT(un != NULL); 17435 ASSERT(mutex_owned(SD_MUTEX(un))); 17436 ASSERT(bp != NULL); 17437 ASSERT(xp != NULL); 17438 ASSERT(pktp != NULL); 17439 17440 si.ssi_severity = SCSI_ERR_FATAL; 17441 si.ssi_pfa_flag = FALSE; 17442 17443 if (sense_key == KEY_MEDIUM_ERROR) { 17444 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17445 } 17446 17447 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17448 17449 if ((un->un_reset_retry_count != 0) && 17450 (xp->xb_retry_count == un->un_reset_retry_count)) { 17451 mutex_exit(SD_MUTEX(un)); 17452 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17453 if (un->un_f_allow_bus_device_reset == TRUE) { 17454 17455 boolean_t try_resetting_target = B_TRUE; 17456 17457 /* 17458 * We need to be able to handle specific ASC when we are 17459 * handling a KEY_HARDWARE_ERROR. In particular 17460 * taking the default action of resetting the target may 17461 * not be the appropriate way to attempt recovery. 17462 * Resetting a target because of a single LUN failure 17463 * victimizes all LUNs on that target. 17464 * 17465 * This is true for the LSI arrays, if an LSI 17466 * array controller returns an ASC of 0x84 (LUN Dead) we 17467 * should trust it. 17468 */ 17469 17470 if (sense_key == KEY_HARDWARE_ERROR) { 17471 switch (asc) { 17472 case 0x84: 17473 if (SD_IS_LSI(un)) { 17474 try_resetting_target = B_FALSE; 17475 } 17476 break; 17477 default: 17478 break; 17479 } 17480 } 17481 17482 if (try_resetting_target == B_TRUE) { 17483 int reset_retval = 0; 17484 if (un->un_f_lun_reset_enabled == TRUE) { 17485 SD_TRACE(SD_LOG_IO_CORE, un, 17486 "sd_sense_key_medium_or_hardware_" 17487 "error: issuing RESET_LUN\n"); 17488 reset_retval = 17489 scsi_reset(SD_ADDRESS(un), 17490 RESET_LUN); 17491 } 17492 if (reset_retval == 0) { 17493 SD_TRACE(SD_LOG_IO_CORE, un, 17494 "sd_sense_key_medium_or_hardware_" 17495 "error: issuing RESET_TARGET\n"); 17496 (void) scsi_reset(SD_ADDRESS(un), 17497 RESET_TARGET); 17498 } 17499 } 17500 } 17501 mutex_enter(SD_MUTEX(un)); 17502 } 17503 17504 /* 17505 * This really ought to be a fatal error, but we will retry anyway 17506 * as some drives report this as a spurious error. 17507 */ 17508 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17509 &si, EIO, (clock_t)0, NULL); 17510 } 17511 17512 17513 17514 /* 17515 * Function: sd_sense_key_illegal_request 17516 * 17517 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17518 * 17519 * Context: May be called from interrupt context 17520 */ 17521 17522 static void 17523 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17524 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17525 { 17526 struct sd_sense_info si; 17527 17528 ASSERT(un != NULL); 17529 ASSERT(mutex_owned(SD_MUTEX(un))); 17530 ASSERT(bp != NULL); 17531 ASSERT(xp != NULL); 17532 ASSERT(pktp != NULL); 17533 17534 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17535 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17536 17537 si.ssi_severity = SCSI_ERR_INFO; 17538 si.ssi_pfa_flag = FALSE; 17539 17540 /* Pointless to retry if the target thinks it's an illegal request */ 17541 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17542 sd_return_failed_command(un, bp, EIO); 17543 } 17544 17545 17546 17547 17548 /* 17549 * Function: sd_sense_key_unit_attention 17550 * 17551 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17552 * 17553 * Context: May be called from interrupt context 17554 */ 17555 17556 static void 17557 sd_sense_key_unit_attention(struct sd_lun *un, 17558 uint8_t asc, 17559 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17560 { 17561 /* 17562 * For UNIT ATTENTION we allow retries for one minute. Devices 17563 * like Sonoma can return UNIT ATTENTION close to a minute 17564 * under certain conditions. 17565 */ 17566 int retry_check_flag = SD_RETRIES_UA; 17567 struct sd_sense_info si; 17568 17569 ASSERT(un != NULL); 17570 ASSERT(mutex_owned(SD_MUTEX(un))); 17571 ASSERT(bp != NULL); 17572 ASSERT(xp != NULL); 17573 ASSERT(pktp != NULL); 17574 17575 si.ssi_severity = SCSI_ERR_INFO; 17576 si.ssi_pfa_flag = FALSE; 17577 17578 17579 switch (asc) { 17580 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17581 if (sd_report_pfa != 0) { 17582 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17583 si.ssi_pfa_flag = TRUE; 17584 retry_check_flag = SD_RETRIES_STANDARD; 17585 goto do_retry; 17586 } 17587 break; 17588 17589 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17590 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17591 un->un_resvd_status |= 17592 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17593 } 17594 /* FALLTHRU */ 17595 17596 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17597 if (!ISREMOVABLE(un)) { 17598 break; 17599 } 17600 17601 /* 17602 * When we get a unit attention from a removable-media device, 17603 * it may be in a state that will take a long time to recover 17604 * (e.g., from a reset). Since we are executing in interrupt 17605 * context here, we cannot wait around for the device to come 17606 * back. So hand this command off to sd_media_change_task() 17607 * for deferred processing under taskq thread context. (Note 17608 * that the command still may be failed if a problem is 17609 * encountered at a later time.) 17610 */ 17611 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17612 KM_NOSLEEP) == 0) { 17613 /* 17614 * Cannot dispatch the request so fail the command. 17615 */ 17616 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17617 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17618 si.ssi_severity = SCSI_ERR_FATAL; 17619 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17620 sd_return_failed_command(un, bp, EIO); 17621 } 17622 /* 17623 * Either the command has been successfully dispatched to a 17624 * task Q for retrying, or the dispatch failed. In either case 17625 * do NOT retry again by calling sd_retry_command. This sets up 17626 * two retries of the same command and when one completes and 17627 * frees the resources the other will access freed memory, 17628 * a bad thing. 17629 */ 17630 return; 17631 17632 default: 17633 break; 17634 } 17635 17636 if (!ISREMOVABLE(un)) { 17637 /* 17638 * Do not update these here for removables. For removables 17639 * these stats are updated (1) above if we failed to dispatch 17640 * sd_media_change_task(), or (2) sd_media_change_task() may 17641 * update these later if it encounters an error. 17642 */ 17643 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17644 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17645 } 17646 17647 do_retry: 17648 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17649 EIO, SD_UA_RETRY_DELAY, NULL); 17650 } 17651 17652 17653 17654 /* 17655 * Function: sd_sense_key_fail_command 17656 * 17657 * Description: Use to fail a command when we don't like the sense key that 17658 * was returned. 17659 * 17660 * Context: May be called from interrupt context 17661 */ 17662 17663 static void 17664 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17665 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17666 { 17667 struct sd_sense_info si; 17668 17669 ASSERT(un != NULL); 17670 ASSERT(mutex_owned(SD_MUTEX(un))); 17671 ASSERT(bp != NULL); 17672 ASSERT(xp != NULL); 17673 ASSERT(pktp != NULL); 17674 17675 si.ssi_severity = SCSI_ERR_FATAL; 17676 si.ssi_pfa_flag = FALSE; 17677 17678 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17679 sd_return_failed_command(un, bp, EIO); 17680 } 17681 17682 17683 17684 /* 17685 * Function: sd_sense_key_blank_check 17686 * 17687 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17688 * Has no monetary connotation. 17689 * 17690 * Context: May be called from interrupt context 17691 */ 17692 17693 static void 17694 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17695 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17696 { 17697 struct sd_sense_info si; 17698 17699 ASSERT(un != NULL); 17700 ASSERT(mutex_owned(SD_MUTEX(un))); 17701 ASSERT(bp != NULL); 17702 ASSERT(xp != NULL); 17703 ASSERT(pktp != NULL); 17704 17705 /* 17706 * Blank check is not fatal for removable devices, therefore 17707 * it does not require a console message. 17708 */ 17709 si.ssi_severity = (ISREMOVABLE(un)) ? SCSI_ERR_ALL : SCSI_ERR_FATAL; 17710 si.ssi_pfa_flag = FALSE; 17711 17712 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17713 sd_return_failed_command(un, bp, EIO); 17714 } 17715 17716 17717 17718 17719 /* 17720 * Function: sd_sense_key_aborted_command 17721 * 17722 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17723 * 17724 * Context: May be called from interrupt context 17725 */ 17726 17727 static void 17728 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17729 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17730 { 17731 struct sd_sense_info si; 17732 17733 ASSERT(un != NULL); 17734 ASSERT(mutex_owned(SD_MUTEX(un))); 17735 ASSERT(bp != NULL); 17736 ASSERT(xp != NULL); 17737 ASSERT(pktp != NULL); 17738 17739 si.ssi_severity = SCSI_ERR_FATAL; 17740 si.ssi_pfa_flag = FALSE; 17741 17742 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17743 17744 /* 17745 * This really ought to be a fatal error, but we will retry anyway 17746 * as some drives report this as a spurious error. 17747 */ 17748 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17749 &si, EIO, (clock_t)0, NULL); 17750 } 17751 17752 17753 17754 /* 17755 * Function: sd_sense_key_default 17756 * 17757 * Description: Default recovery action for several SCSI sense keys (basically 17758 * attempts a retry). 17759 * 17760 * Context: May be called from interrupt context 17761 */ 17762 17763 static void 17764 sd_sense_key_default(struct sd_lun *un, 17765 int sense_key, 17766 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17767 { 17768 struct sd_sense_info si; 17769 17770 ASSERT(un != NULL); 17771 ASSERT(mutex_owned(SD_MUTEX(un))); 17772 ASSERT(bp != NULL); 17773 ASSERT(xp != NULL); 17774 ASSERT(pktp != NULL); 17775 17776 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17777 17778 /* 17779 * Undecoded sense key. Attempt retries and hope that will fix 17780 * the problem. Otherwise, we're dead. 17781 */ 17782 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17783 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17784 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17785 } 17786 17787 si.ssi_severity = SCSI_ERR_FATAL; 17788 si.ssi_pfa_flag = FALSE; 17789 17790 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17791 &si, EIO, (clock_t)0, NULL); 17792 } 17793 17794 17795 17796 /* 17797 * Function: sd_print_retry_msg 17798 * 17799 * Description: Print a message indicating the retry action being taken. 17800 * 17801 * Arguments: un - ptr to associated softstate 17802 * bp - ptr to buf(9S) for the command 17803 * arg - not used. 17804 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17805 * or SD_NO_RETRY_ISSUED 17806 * 17807 * Context: May be called from interrupt context 17808 */ 17809 /* ARGSUSED */ 17810 static void 17811 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17812 { 17813 struct sd_xbuf *xp; 17814 struct scsi_pkt *pktp; 17815 char *reasonp; 17816 char *msgp; 17817 17818 ASSERT(un != NULL); 17819 ASSERT(mutex_owned(SD_MUTEX(un))); 17820 ASSERT(bp != NULL); 17821 pktp = SD_GET_PKTP(bp); 17822 ASSERT(pktp != NULL); 17823 xp = SD_GET_XBUF(bp); 17824 ASSERT(xp != NULL); 17825 17826 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17827 mutex_enter(&un->un_pm_mutex); 17828 if ((un->un_state == SD_STATE_SUSPENDED) || 17829 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17830 (pktp->pkt_flags & FLAG_SILENT)) { 17831 mutex_exit(&un->un_pm_mutex); 17832 goto update_pkt_reason; 17833 } 17834 mutex_exit(&un->un_pm_mutex); 17835 17836 /* 17837 * Suppress messages if they are all the same pkt_reason; with 17838 * TQ, many (up to 256) are returned with the same pkt_reason. 17839 * If we are in panic, then suppress the retry messages. 17840 */ 17841 switch (flag) { 17842 case SD_NO_RETRY_ISSUED: 17843 msgp = "giving up"; 17844 break; 17845 case SD_IMMEDIATE_RETRY_ISSUED: 17846 case SD_DELAYED_RETRY_ISSUED: 17847 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17848 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17849 (sd_error_level != SCSI_ERR_ALL))) { 17850 return; 17851 } 17852 msgp = "retrying command"; 17853 break; 17854 default: 17855 goto update_pkt_reason; 17856 } 17857 17858 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17859 scsi_rname(pktp->pkt_reason)); 17860 17861 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17862 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17863 17864 update_pkt_reason: 17865 /* 17866 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17867 * This is to prevent multiple console messages for the same failure 17868 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17869 * when the command is retried successfully because there still may be 17870 * more commands coming back with the same value of pktp->pkt_reason. 17871 */ 17872 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17873 un->un_last_pkt_reason = pktp->pkt_reason; 17874 } 17875 } 17876 17877 17878 /* 17879 * Function: sd_print_cmd_incomplete_msg 17880 * 17881 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17882 * 17883 * Arguments: un - ptr to associated softstate 17884 * bp - ptr to buf(9S) for the command 17885 * arg - passed to sd_print_retry_msg() 17886 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17887 * or SD_NO_RETRY_ISSUED 17888 * 17889 * Context: May be called from interrupt context 17890 */ 17891 17892 static void 17893 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17894 int code) 17895 { 17896 dev_info_t *dip; 17897 17898 ASSERT(un != NULL); 17899 ASSERT(mutex_owned(SD_MUTEX(un))); 17900 ASSERT(bp != NULL); 17901 17902 switch (code) { 17903 case SD_NO_RETRY_ISSUED: 17904 /* Command was failed. Someone turned off this target? */ 17905 if (un->un_state != SD_STATE_OFFLINE) { 17906 /* 17907 * Suppress message if we are detaching and 17908 * device has been disconnected 17909 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17910 * private interface and not part of the DDI 17911 */ 17912 dip = un->un_sd->sd_dev; 17913 if (!(DEVI_IS_DETACHING(dip) && 17914 DEVI_IS_DEVICE_REMOVED(dip))) { 17915 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17916 "disk not responding to selection\n"); 17917 } 17918 New_state(un, SD_STATE_OFFLINE); 17919 } 17920 break; 17921 17922 case SD_DELAYED_RETRY_ISSUED: 17923 case SD_IMMEDIATE_RETRY_ISSUED: 17924 default: 17925 /* Command was successfully queued for retry */ 17926 sd_print_retry_msg(un, bp, arg, code); 17927 break; 17928 } 17929 } 17930 17931 17932 /* 17933 * Function: sd_pkt_reason_cmd_incomplete 17934 * 17935 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 17936 * 17937 * Context: May be called from interrupt context 17938 */ 17939 17940 static void 17941 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 17942 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17943 { 17944 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 17945 17946 ASSERT(un != NULL); 17947 ASSERT(mutex_owned(SD_MUTEX(un))); 17948 ASSERT(bp != NULL); 17949 ASSERT(xp != NULL); 17950 ASSERT(pktp != NULL); 17951 17952 /* Do not do a reset if selection did not complete */ 17953 /* Note: Should this not just check the bit? */ 17954 if (pktp->pkt_state != STATE_GOT_BUS) { 17955 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17956 sd_reset_target(un, pktp); 17957 } 17958 17959 /* 17960 * If the target was not successfully selected, then set 17961 * SD_RETRIES_FAILFAST to indicate that we lost communication 17962 * with the target, and further retries and/or commands are 17963 * likely to take a long time. 17964 */ 17965 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 17966 flag |= SD_RETRIES_FAILFAST; 17967 } 17968 17969 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17970 17971 sd_retry_command(un, bp, flag, 17972 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17973 } 17974 17975 17976 17977 /* 17978 * Function: sd_pkt_reason_cmd_tran_err 17979 * 17980 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 17981 * 17982 * Context: May be called from interrupt context 17983 */ 17984 17985 static void 17986 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17987 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17988 { 17989 ASSERT(un != NULL); 17990 ASSERT(mutex_owned(SD_MUTEX(un))); 17991 ASSERT(bp != NULL); 17992 ASSERT(xp != NULL); 17993 ASSERT(pktp != NULL); 17994 17995 /* 17996 * Do not reset if we got a parity error, or if 17997 * selection did not complete. 17998 */ 17999 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18000 /* Note: Should this not just check the bit for pkt_state? */ 18001 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18002 (pktp->pkt_state != STATE_GOT_BUS)) { 18003 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18004 sd_reset_target(un, pktp); 18005 } 18006 18007 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18008 18009 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18010 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18011 } 18012 18013 18014 18015 /* 18016 * Function: sd_pkt_reason_cmd_reset 18017 * 18018 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18019 * 18020 * Context: May be called from interrupt context 18021 */ 18022 18023 static void 18024 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18025 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18026 { 18027 ASSERT(un != NULL); 18028 ASSERT(mutex_owned(SD_MUTEX(un))); 18029 ASSERT(bp != NULL); 18030 ASSERT(xp != NULL); 18031 ASSERT(pktp != NULL); 18032 18033 /* The target may still be running the command, so try to reset. */ 18034 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18035 sd_reset_target(un, pktp); 18036 18037 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18038 18039 /* 18040 * If pkt_reason is CMD_RESET chances are that this pkt got 18041 * reset because another target on this bus caused it. The target 18042 * that caused it should get CMD_TIMEOUT with pkt_statistics 18043 * of STAT_TIMEOUT/STAT_DEV_RESET. 18044 */ 18045 18046 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18047 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18048 } 18049 18050 18051 18052 18053 /* 18054 * Function: sd_pkt_reason_cmd_aborted 18055 * 18056 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18057 * 18058 * Context: May be called from interrupt context 18059 */ 18060 18061 static void 18062 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18063 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18064 { 18065 ASSERT(un != NULL); 18066 ASSERT(mutex_owned(SD_MUTEX(un))); 18067 ASSERT(bp != NULL); 18068 ASSERT(xp != NULL); 18069 ASSERT(pktp != NULL); 18070 18071 /* The target may still be running the command, so try to reset. */ 18072 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18073 sd_reset_target(un, pktp); 18074 18075 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18076 18077 /* 18078 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18079 * aborted because another target on this bus caused it. The target 18080 * that caused it should get CMD_TIMEOUT with pkt_statistics 18081 * of STAT_TIMEOUT/STAT_DEV_RESET. 18082 */ 18083 18084 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18085 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18086 } 18087 18088 18089 18090 /* 18091 * Function: sd_pkt_reason_cmd_timeout 18092 * 18093 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18094 * 18095 * Context: May be called from interrupt context 18096 */ 18097 18098 static void 18099 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18100 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18101 { 18102 ASSERT(un != NULL); 18103 ASSERT(mutex_owned(SD_MUTEX(un))); 18104 ASSERT(bp != NULL); 18105 ASSERT(xp != NULL); 18106 ASSERT(pktp != NULL); 18107 18108 18109 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18110 sd_reset_target(un, pktp); 18111 18112 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18113 18114 /* 18115 * A command timeout indicates that we could not establish 18116 * communication with the target, so set SD_RETRIES_FAILFAST 18117 * as further retries/commands are likely to take a long time. 18118 */ 18119 sd_retry_command(un, bp, 18120 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18121 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18122 } 18123 18124 18125 18126 /* 18127 * Function: sd_pkt_reason_cmd_unx_bus_free 18128 * 18129 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18130 * 18131 * Context: May be called from interrupt context 18132 */ 18133 18134 static void 18135 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18136 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18137 { 18138 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18139 18140 ASSERT(un != NULL); 18141 ASSERT(mutex_owned(SD_MUTEX(un))); 18142 ASSERT(bp != NULL); 18143 ASSERT(xp != NULL); 18144 ASSERT(pktp != NULL); 18145 18146 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18147 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18148 18149 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18150 sd_print_retry_msg : NULL; 18151 18152 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18153 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18154 } 18155 18156 18157 /* 18158 * Function: sd_pkt_reason_cmd_tag_reject 18159 * 18160 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18161 * 18162 * Context: May be called from interrupt context 18163 */ 18164 18165 static void 18166 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18167 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18168 { 18169 ASSERT(un != NULL); 18170 ASSERT(mutex_owned(SD_MUTEX(un))); 18171 ASSERT(bp != NULL); 18172 ASSERT(xp != NULL); 18173 ASSERT(pktp != NULL); 18174 18175 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18176 pktp->pkt_flags = 0; 18177 un->un_tagflags = 0; 18178 if (un->un_f_opt_queueing == TRUE) { 18179 un->un_throttle = min(un->un_throttle, 3); 18180 } else { 18181 un->un_throttle = 1; 18182 } 18183 mutex_exit(SD_MUTEX(un)); 18184 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18185 mutex_enter(SD_MUTEX(un)); 18186 18187 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18188 18189 /* Legacy behavior not to check retry counts here. */ 18190 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18191 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18192 } 18193 18194 18195 /* 18196 * Function: sd_pkt_reason_default 18197 * 18198 * Description: Default recovery actions for SCSA pkt_reason values that 18199 * do not have more explicit recovery actions. 18200 * 18201 * Context: May be called from interrupt context 18202 */ 18203 18204 static void 18205 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18206 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18207 { 18208 ASSERT(un != NULL); 18209 ASSERT(mutex_owned(SD_MUTEX(un))); 18210 ASSERT(bp != NULL); 18211 ASSERT(xp != NULL); 18212 ASSERT(pktp != NULL); 18213 18214 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18215 sd_reset_target(un, pktp); 18216 18217 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18218 18219 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18220 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18221 } 18222 18223 18224 18225 /* 18226 * Function: sd_pkt_status_check_condition 18227 * 18228 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18229 * 18230 * Context: May be called from interrupt context 18231 */ 18232 18233 static void 18234 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18235 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18236 { 18237 ASSERT(un != NULL); 18238 ASSERT(mutex_owned(SD_MUTEX(un))); 18239 ASSERT(bp != NULL); 18240 ASSERT(xp != NULL); 18241 ASSERT(pktp != NULL); 18242 18243 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18244 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18245 18246 /* 18247 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18248 * command will be retried after the request sense). Otherwise, retry 18249 * the command. Note: we are issuing the request sense even though the 18250 * retry limit may have been reached for the failed command. 18251 */ 18252 if (un->un_f_arq_enabled == FALSE) { 18253 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18254 "no ARQ, sending request sense command\n"); 18255 sd_send_request_sense_command(un, bp, pktp); 18256 } else { 18257 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18258 "ARQ,retrying request sense command\n"); 18259 #if defined(__i386) || defined(__amd64) 18260 /* 18261 * The SD_RETRY_DELAY value need to be adjusted here 18262 * when SD_RETRY_DELAY change in sddef.h 18263 */ 18264 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 0, 18265 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18266 NULL); 18267 #else 18268 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18269 0, SD_RETRY_DELAY, NULL); 18270 #endif 18271 } 18272 18273 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18274 } 18275 18276 18277 /* 18278 * Function: sd_pkt_status_busy 18279 * 18280 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18281 * 18282 * Context: May be called from interrupt context 18283 */ 18284 18285 static void 18286 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18287 struct scsi_pkt *pktp) 18288 { 18289 ASSERT(un != NULL); 18290 ASSERT(mutex_owned(SD_MUTEX(un))); 18291 ASSERT(bp != NULL); 18292 ASSERT(xp != NULL); 18293 ASSERT(pktp != NULL); 18294 18295 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18296 "sd_pkt_status_busy: entry\n"); 18297 18298 /* If retries are exhausted, just fail the command. */ 18299 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18300 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18301 "device busy too long\n"); 18302 sd_return_failed_command(un, bp, EIO); 18303 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18304 "sd_pkt_status_busy: exit\n"); 18305 return; 18306 } 18307 xp->xb_retry_count++; 18308 18309 /* 18310 * Try to reset the target. However, we do not want to perform 18311 * more than one reset if the device continues to fail. The reset 18312 * will be performed when the retry count reaches the reset 18313 * threshold. This threshold should be set such that at least 18314 * one retry is issued before the reset is performed. 18315 */ 18316 if (xp->xb_retry_count == 18317 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18318 int rval = 0; 18319 mutex_exit(SD_MUTEX(un)); 18320 if (un->un_f_allow_bus_device_reset == TRUE) { 18321 /* 18322 * First try to reset the LUN; if we cannot then 18323 * try to reset the target. 18324 */ 18325 if (un->un_f_lun_reset_enabled == TRUE) { 18326 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18327 "sd_pkt_status_busy: RESET_LUN\n"); 18328 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18329 } 18330 if (rval == 0) { 18331 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18332 "sd_pkt_status_busy: RESET_TARGET\n"); 18333 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18334 } 18335 } 18336 if (rval == 0) { 18337 /* 18338 * If the RESET_LUN and/or RESET_TARGET failed, 18339 * try RESET_ALL 18340 */ 18341 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18342 "sd_pkt_status_busy: RESET_ALL\n"); 18343 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18344 } 18345 mutex_enter(SD_MUTEX(un)); 18346 if (rval == 0) { 18347 /* 18348 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18349 * At this point we give up & fail the command. 18350 */ 18351 sd_return_failed_command(un, bp, EIO); 18352 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18353 "sd_pkt_status_busy: exit (failed cmd)\n"); 18354 return; 18355 } 18356 } 18357 18358 /* 18359 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18360 * we have already checked the retry counts above. 18361 */ 18362 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18363 EIO, SD_BSY_TIMEOUT, NULL); 18364 18365 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18366 "sd_pkt_status_busy: exit\n"); 18367 } 18368 18369 18370 /* 18371 * Function: sd_pkt_status_reservation_conflict 18372 * 18373 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18374 * command status. 18375 * 18376 * Context: May be called from interrupt context 18377 */ 18378 18379 static void 18380 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18381 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18382 { 18383 ASSERT(un != NULL); 18384 ASSERT(mutex_owned(SD_MUTEX(un))); 18385 ASSERT(bp != NULL); 18386 ASSERT(xp != NULL); 18387 ASSERT(pktp != NULL); 18388 18389 /* 18390 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18391 * conflict could be due to various reasons like incorrect keys, not 18392 * registered or not reserved etc. So, we return EACCES to the caller. 18393 */ 18394 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18395 int cmd = SD_GET_PKT_OPCODE(pktp); 18396 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18397 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18398 sd_return_failed_command(un, bp, EACCES); 18399 return; 18400 } 18401 } 18402 18403 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18404 18405 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18406 if (sd_failfast_enable != 0) { 18407 /* By definition, we must panic here.... */ 18408 panic("Reservation Conflict"); 18409 /*NOTREACHED*/ 18410 } 18411 SD_ERROR(SD_LOG_IO, un, 18412 "sd_handle_resv_conflict: Disk Reserved\n"); 18413 sd_return_failed_command(un, bp, EACCES); 18414 return; 18415 } 18416 18417 /* 18418 * 1147670: retry only if sd_retry_on_reservation_conflict 18419 * property is set (default is 1). Retries will not succeed 18420 * on a disk reserved by another initiator. HA systems 18421 * may reset this via sd.conf to avoid these retries. 18422 * 18423 * Note: The legacy return code for this failure is EIO, however EACCES 18424 * seems more appropriate for a reservation conflict. 18425 */ 18426 if (sd_retry_on_reservation_conflict == 0) { 18427 SD_ERROR(SD_LOG_IO, un, 18428 "sd_handle_resv_conflict: Device Reserved\n"); 18429 sd_return_failed_command(un, bp, EIO); 18430 return; 18431 } 18432 18433 /* 18434 * Retry the command if we can. 18435 * 18436 * Note: The legacy return code for this failure is EIO, however EACCES 18437 * seems more appropriate for a reservation conflict. 18438 */ 18439 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18440 (clock_t)2, NULL); 18441 } 18442 18443 18444 18445 /* 18446 * Function: sd_pkt_status_qfull 18447 * 18448 * Description: Handle a QUEUE FULL condition from the target. This can 18449 * occur if the HBA does not handle the queue full condition. 18450 * (Basically this means third-party HBAs as Sun HBAs will 18451 * handle the queue full condition.) Note that if there are 18452 * some commands already in the transport, then the queue full 18453 * has occurred because the queue for this nexus is actually 18454 * full. If there are no commands in the transport, then the 18455 * queue full is resulting from some other initiator or lun 18456 * consuming all the resources at the target. 18457 * 18458 * Context: May be called from interrupt context 18459 */ 18460 18461 static void 18462 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18463 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18464 { 18465 ASSERT(un != NULL); 18466 ASSERT(mutex_owned(SD_MUTEX(un))); 18467 ASSERT(bp != NULL); 18468 ASSERT(xp != NULL); 18469 ASSERT(pktp != NULL); 18470 18471 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18472 "sd_pkt_status_qfull: entry\n"); 18473 18474 /* 18475 * Just lower the QFULL throttle and retry the command. Note that 18476 * we do not limit the number of retries here. 18477 */ 18478 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18479 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18480 SD_RESTART_TIMEOUT, NULL); 18481 18482 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18483 "sd_pkt_status_qfull: exit\n"); 18484 } 18485 18486 18487 /* 18488 * Function: sd_reset_target 18489 * 18490 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18491 * RESET_TARGET, or RESET_ALL. 18492 * 18493 * Context: May be called under interrupt context. 18494 */ 18495 18496 static void 18497 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18498 { 18499 int rval = 0; 18500 18501 ASSERT(un != NULL); 18502 ASSERT(mutex_owned(SD_MUTEX(un))); 18503 ASSERT(pktp != NULL); 18504 18505 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18506 18507 /* 18508 * No need to reset if the transport layer has already done so. 18509 */ 18510 if ((pktp->pkt_statistics & 18511 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18512 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18513 "sd_reset_target: no reset\n"); 18514 return; 18515 } 18516 18517 mutex_exit(SD_MUTEX(un)); 18518 18519 if (un->un_f_allow_bus_device_reset == TRUE) { 18520 if (un->un_f_lun_reset_enabled == TRUE) { 18521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18522 "sd_reset_target: RESET_LUN\n"); 18523 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18524 } 18525 if (rval == 0) { 18526 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18527 "sd_reset_target: RESET_TARGET\n"); 18528 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18529 } 18530 } 18531 18532 if (rval == 0) { 18533 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18534 "sd_reset_target: RESET_ALL\n"); 18535 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18536 } 18537 18538 mutex_enter(SD_MUTEX(un)); 18539 18540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18541 } 18542 18543 18544 /* 18545 * Function: sd_media_change_task 18546 * 18547 * Description: Recovery action for CDROM to become available. 18548 * 18549 * Context: Executes in a taskq() thread context 18550 */ 18551 18552 static void 18553 sd_media_change_task(void *arg) 18554 { 18555 struct scsi_pkt *pktp = arg; 18556 struct sd_lun *un; 18557 struct buf *bp; 18558 struct sd_xbuf *xp; 18559 int err = 0; 18560 int retry_count = 0; 18561 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18562 struct sd_sense_info si; 18563 18564 ASSERT(pktp != NULL); 18565 bp = (struct buf *)pktp->pkt_private; 18566 ASSERT(bp != NULL); 18567 xp = SD_GET_XBUF(bp); 18568 ASSERT(xp != NULL); 18569 un = SD_GET_UN(bp); 18570 ASSERT(un != NULL); 18571 ASSERT(!mutex_owned(SD_MUTEX(un))); 18572 ASSERT(ISREMOVABLE(un)); 18573 18574 si.ssi_severity = SCSI_ERR_INFO; 18575 si.ssi_pfa_flag = FALSE; 18576 18577 /* 18578 * When a reset is issued on a CDROM, it takes a long time to 18579 * recover. First few attempts to read capacity and other things 18580 * related to handling unit attention fail (with a ASC 0x4 and 18581 * ASCQ 0x1). In that case we want to do enough retries and we want 18582 * to limit the retries in other cases of genuine failures like 18583 * no media in drive. 18584 */ 18585 while (retry_count++ < retry_limit) { 18586 if ((err = sd_handle_mchange(un)) == 0) { 18587 break; 18588 } 18589 if (err == EAGAIN) { 18590 retry_limit = SD_UNIT_ATTENTION_RETRY; 18591 } 18592 /* Sleep for 0.5 sec. & try again */ 18593 delay(drv_usectohz(500000)); 18594 } 18595 18596 /* 18597 * Dispatch (retry or fail) the original command here, 18598 * along with appropriate console messages.... 18599 * 18600 * Must grab the mutex before calling sd_retry_command, 18601 * sd_print_sense_msg and sd_return_failed_command. 18602 */ 18603 mutex_enter(SD_MUTEX(un)); 18604 if (err != SD_CMD_SUCCESS) { 18605 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18606 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18607 si.ssi_severity = SCSI_ERR_FATAL; 18608 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18609 sd_return_failed_command(un, bp, EIO); 18610 } else { 18611 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18612 &si, EIO, (clock_t)0, NULL); 18613 } 18614 mutex_exit(SD_MUTEX(un)); 18615 } 18616 18617 18618 18619 /* 18620 * Function: sd_handle_mchange 18621 * 18622 * Description: Perform geometry validation & other recovery when CDROM 18623 * has been removed from drive. 18624 * 18625 * Return Code: 0 for success 18626 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18627 * sd_send_scsi_READ_CAPACITY() 18628 * 18629 * Context: Executes in a taskq() thread context 18630 */ 18631 18632 static int 18633 sd_handle_mchange(struct sd_lun *un) 18634 { 18635 uint64_t capacity; 18636 uint32_t lbasize; 18637 int rval; 18638 18639 ASSERT(!mutex_owned(SD_MUTEX(un))); 18640 ASSERT(ISREMOVABLE(un)); 18641 18642 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 18643 SD_PATH_DIRECT_PRIORITY)) != 0) { 18644 return (rval); 18645 } 18646 18647 mutex_enter(SD_MUTEX(un)); 18648 sd_update_block_info(un, lbasize, capacity); 18649 18650 if (un->un_errstats != NULL) { 18651 struct sd_errstats *stp = 18652 (struct sd_errstats *)un->un_errstats->ks_data; 18653 stp->sd_capacity.value.ui64 = (uint64_t) 18654 ((uint64_t)un->un_blockcount * 18655 (uint64_t)un->un_tgt_blocksize); 18656 } 18657 18658 /* 18659 * Note: Maybe let the strategy/partitioning chain worry about getting 18660 * valid geometry. 18661 */ 18662 un->un_f_geometry_is_valid = FALSE; 18663 (void) sd_validate_geometry(un, SD_PATH_DIRECT_PRIORITY); 18664 if (un->un_f_geometry_is_valid == FALSE) { 18665 mutex_exit(SD_MUTEX(un)); 18666 return (EIO); 18667 } 18668 18669 mutex_exit(SD_MUTEX(un)); 18670 18671 /* 18672 * Try to lock the door 18673 */ 18674 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18675 SD_PATH_DIRECT_PRIORITY)); 18676 } 18677 18678 18679 /* 18680 * Function: sd_send_scsi_DOORLOCK 18681 * 18682 * Description: Issue the scsi DOOR LOCK command 18683 * 18684 * Arguments: un - pointer to driver soft state (unit) structure for 18685 * this target. 18686 * flag - SD_REMOVAL_ALLOW 18687 * SD_REMOVAL_PREVENT 18688 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18689 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18690 * to use the USCSI "direct" chain and bypass the normal 18691 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18692 * command is issued as part of an error recovery action. 18693 * 18694 * Return Code: 0 - Success 18695 * errno return code from sd_send_scsi_cmd() 18696 * 18697 * Context: Can sleep. 18698 */ 18699 18700 static int 18701 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18702 { 18703 union scsi_cdb cdb; 18704 struct uscsi_cmd ucmd_buf; 18705 struct scsi_extended_sense sense_buf; 18706 int status; 18707 18708 ASSERT(un != NULL); 18709 ASSERT(!mutex_owned(SD_MUTEX(un))); 18710 18711 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18712 18713 /* already determined doorlock is not supported, fake success */ 18714 if (un->un_f_doorlock_supported == FALSE) { 18715 return (0); 18716 } 18717 18718 bzero(&cdb, sizeof (cdb)); 18719 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18720 18721 cdb.scc_cmd = SCMD_DOORLOCK; 18722 cdb.cdb_opaque[4] = (uchar_t)flag; 18723 18724 ucmd_buf.uscsi_cdb = (char *)&cdb; 18725 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18726 ucmd_buf.uscsi_bufaddr = NULL; 18727 ucmd_buf.uscsi_buflen = 0; 18728 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18729 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18730 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18731 ucmd_buf.uscsi_timeout = 15; 18732 18733 SD_TRACE(SD_LOG_IO, un, 18734 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18735 18736 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18737 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18738 18739 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18740 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18741 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 18742 /* fake success and skip subsequent doorlock commands */ 18743 un->un_f_doorlock_supported = FALSE; 18744 return (0); 18745 } 18746 18747 return (status); 18748 } 18749 18750 18751 /* 18752 * Function: sd_send_scsi_READ_CAPACITY 18753 * 18754 * Description: This routine uses the scsi READ CAPACITY command to determine 18755 * the device capacity in number of blocks and the device native 18756 * block size. If this function returns a failure, then the 18757 * values in *capp and *lbap are undefined. If the capacity 18758 * returned is 0xffffffff then the lun is too large for a 18759 * normal READ CAPACITY command and the results of a 18760 * READ CAPACITY 16 will be used instead. 18761 * 18762 * Arguments: un - ptr to soft state struct for the target 18763 * capp - ptr to unsigned 64-bit variable to receive the 18764 * capacity value from the command. 18765 * lbap - ptr to unsigned 32-bit varaible to receive the 18766 * block size value from the command 18767 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18768 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18769 * to use the USCSI "direct" chain and bypass the normal 18770 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18771 * command is issued as part of an error recovery action. 18772 * 18773 * Return Code: 0 - Success 18774 * EIO - IO error 18775 * EACCES - Reservation conflict detected 18776 * EAGAIN - Device is becoming ready 18777 * errno return code from sd_send_scsi_cmd() 18778 * 18779 * Context: Can sleep. Blocks until command completes. 18780 */ 18781 18782 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18783 18784 static int 18785 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18786 int path_flag) 18787 { 18788 struct scsi_extended_sense sense_buf; 18789 struct uscsi_cmd ucmd_buf; 18790 union scsi_cdb cdb; 18791 uint32_t *capacity_buf; 18792 uint64_t capacity; 18793 uint32_t lbasize; 18794 int status; 18795 18796 ASSERT(un != NULL); 18797 ASSERT(!mutex_owned(SD_MUTEX(un))); 18798 ASSERT(capp != NULL); 18799 ASSERT(lbap != NULL); 18800 18801 SD_TRACE(SD_LOG_IO, un, 18802 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18803 18804 /* 18805 * First send a READ_CAPACITY command to the target. 18806 * (This command is mandatory under SCSI-2.) 18807 * 18808 * Set up the CDB for the READ_CAPACITY command. The Partial 18809 * Medium Indicator bit is cleared. The address field must be 18810 * zero if the PMI bit is zero. 18811 */ 18812 bzero(&cdb, sizeof (cdb)); 18813 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18814 18815 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18816 18817 cdb.scc_cmd = SCMD_READ_CAPACITY; 18818 18819 ucmd_buf.uscsi_cdb = (char *)&cdb; 18820 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18821 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18822 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18823 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18824 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18825 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18826 ucmd_buf.uscsi_timeout = 60; 18827 18828 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18829 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18830 18831 switch (status) { 18832 case 0: 18833 /* Return failure if we did not get valid capacity data. */ 18834 if (ucmd_buf.uscsi_resid != 0) { 18835 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18836 return (EIO); 18837 } 18838 18839 /* 18840 * Read capacity and block size from the READ CAPACITY 10 data. 18841 * This data may be adjusted later due to device specific 18842 * issues. 18843 * 18844 * According to the SCSI spec, the READ CAPACITY 10 18845 * command returns the following: 18846 * 18847 * bytes 0-3: Maximum logical block address available. 18848 * (MSB in byte:0 & LSB in byte:3) 18849 * 18850 * bytes 4-7: Block length in bytes 18851 * (MSB in byte:4 & LSB in byte:7) 18852 * 18853 */ 18854 capacity = BE_32(capacity_buf[0]); 18855 lbasize = BE_32(capacity_buf[1]); 18856 18857 /* 18858 * Done with capacity_buf 18859 */ 18860 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18861 18862 /* 18863 * if the reported capacity is set to all 0xf's, then 18864 * this disk is too large and requires SBC-2 commands. 18865 * Reissue the request using READ CAPACITY 16. 18866 */ 18867 if (capacity == 0xffffffff) { 18868 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18869 &lbasize, path_flag); 18870 if (status != 0) { 18871 return (status); 18872 } 18873 } 18874 break; /* Success! */ 18875 case EIO: 18876 switch (ucmd_buf.uscsi_status) { 18877 case STATUS_RESERVATION_CONFLICT: 18878 status = EACCES; 18879 break; 18880 case STATUS_CHECK: 18881 /* 18882 * Check condition; look for ASC/ASCQ of 0x04/0x01 18883 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18884 */ 18885 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18886 (sense_buf.es_add_code == 0x04) && 18887 (sense_buf.es_qual_code == 0x01)) { 18888 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18889 return (EAGAIN); 18890 } 18891 break; 18892 default: 18893 break; 18894 } 18895 /* FALLTHRU */ 18896 default: 18897 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18898 return (status); 18899 } 18900 18901 /* 18902 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18903 * (2352 and 0 are common) so for these devices always force the value 18904 * to 2048 as required by the ATAPI specs. 18905 */ 18906 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18907 lbasize = 2048; 18908 } 18909 18910 /* 18911 * Get the maximum LBA value from the READ CAPACITY data. 18912 * Here we assume that the Partial Medium Indicator (PMI) bit 18913 * was cleared when issuing the command. This means that the LBA 18914 * returned from the device is the LBA of the last logical block 18915 * on the logical unit. The actual logical block count will be 18916 * this value plus one. 18917 * 18918 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18919 * so scale the capacity value to reflect this. 18920 */ 18921 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18922 18923 #if defined(__i386) || defined(__amd64) 18924 /* 18925 * On x86, compensate for off-by-1 error (number of sectors on 18926 * media) (1175930) 18927 */ 18928 if (!ISREMOVABLE(un) && (lbasize == un->un_sys_blocksize)) { 18929 capacity -= 1; 18930 } 18931 #endif 18932 18933 /* 18934 * Copy the values from the READ CAPACITY command into the space 18935 * provided by the caller. 18936 */ 18937 *capp = capacity; 18938 *lbap = lbasize; 18939 18940 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18941 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18942 18943 /* 18944 * Both the lbasize and capacity from the device must be nonzero, 18945 * otherwise we assume that the values are not valid and return 18946 * failure to the caller. (4203735) 18947 */ 18948 if ((capacity == 0) || (lbasize == 0)) { 18949 return (EIO); 18950 } 18951 18952 return (0); 18953 } 18954 18955 /* 18956 * Function: sd_send_scsi_READ_CAPACITY_16 18957 * 18958 * Description: This routine uses the scsi READ CAPACITY 16 command to 18959 * determine the device capacity in number of blocks and the 18960 * device native block size. If this function returns a failure, 18961 * then the values in *capp and *lbap are undefined. 18962 * This routine should always be called by 18963 * sd_send_scsi_READ_CAPACITY which will appy any device 18964 * specific adjustments to capacity and lbasize. 18965 * 18966 * Arguments: un - ptr to soft state struct for the target 18967 * capp - ptr to unsigned 64-bit variable to receive the 18968 * capacity value from the command. 18969 * lbap - ptr to unsigned 32-bit varaible to receive the 18970 * block size value from the command 18971 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18972 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18973 * to use the USCSI "direct" chain and bypass the normal 18974 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18975 * this command is issued as part of an error recovery 18976 * action. 18977 * 18978 * Return Code: 0 - Success 18979 * EIO - IO error 18980 * EACCES - Reservation conflict detected 18981 * EAGAIN - Device is becoming ready 18982 * errno return code from sd_send_scsi_cmd() 18983 * 18984 * Context: Can sleep. Blocks until command completes. 18985 */ 18986 18987 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18988 18989 static int 18990 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18991 uint32_t *lbap, int path_flag) 18992 { 18993 struct scsi_extended_sense sense_buf; 18994 struct uscsi_cmd ucmd_buf; 18995 union scsi_cdb cdb; 18996 uint64_t *capacity16_buf; 18997 uint64_t capacity; 18998 uint32_t lbasize; 18999 int status; 19000 19001 ASSERT(un != NULL); 19002 ASSERT(!mutex_owned(SD_MUTEX(un))); 19003 ASSERT(capp != NULL); 19004 ASSERT(lbap != NULL); 19005 19006 SD_TRACE(SD_LOG_IO, un, 19007 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19008 19009 /* 19010 * First send a READ_CAPACITY_16 command to the target. 19011 * 19012 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19013 * Medium Indicator bit is cleared. The address field must be 19014 * zero if the PMI bit is zero. 19015 */ 19016 bzero(&cdb, sizeof (cdb)); 19017 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19018 19019 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19020 19021 ucmd_buf.uscsi_cdb = (char *)&cdb; 19022 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19023 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19024 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19025 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19026 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19027 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19028 ucmd_buf.uscsi_timeout = 60; 19029 19030 /* 19031 * Read Capacity (16) is a Service Action In command. One 19032 * command byte (0x9E) is overloaded for multiple operations, 19033 * with the second CDB byte specifying the desired operation 19034 */ 19035 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19036 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19037 19038 /* 19039 * Fill in allocation length field 19040 */ 19041 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19042 19043 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19044 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19045 19046 switch (status) { 19047 case 0: 19048 /* Return failure if we did not get valid capacity data. */ 19049 if (ucmd_buf.uscsi_resid > 20) { 19050 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19051 return (EIO); 19052 } 19053 19054 /* 19055 * Read capacity and block size from the READ CAPACITY 10 data. 19056 * This data may be adjusted later due to device specific 19057 * issues. 19058 * 19059 * According to the SCSI spec, the READ CAPACITY 10 19060 * command returns the following: 19061 * 19062 * bytes 0-7: Maximum logical block address available. 19063 * (MSB in byte:0 & LSB in byte:7) 19064 * 19065 * bytes 8-11: Block length in bytes 19066 * (MSB in byte:8 & LSB in byte:11) 19067 * 19068 */ 19069 capacity = BE_64(capacity16_buf[0]); 19070 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19071 19072 /* 19073 * Done with capacity16_buf 19074 */ 19075 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19076 19077 /* 19078 * if the reported capacity is set to all 0xf's, then 19079 * this disk is too large. This could only happen with 19080 * a device that supports LBAs larger than 64 bits which 19081 * are not defined by any current T10 standards. 19082 */ 19083 if (capacity == 0xffffffffffffffff) { 19084 return (EIO); 19085 } 19086 break; /* Success! */ 19087 case EIO: 19088 switch (ucmd_buf.uscsi_status) { 19089 case STATUS_RESERVATION_CONFLICT: 19090 status = EACCES; 19091 break; 19092 case STATUS_CHECK: 19093 /* 19094 * Check condition; look for ASC/ASCQ of 0x04/0x01 19095 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19096 */ 19097 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19098 (sense_buf.es_add_code == 0x04) && 19099 (sense_buf.es_qual_code == 0x01)) { 19100 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19101 return (EAGAIN); 19102 } 19103 break; 19104 default: 19105 break; 19106 } 19107 /* FALLTHRU */ 19108 default: 19109 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19110 return (status); 19111 } 19112 19113 *capp = capacity; 19114 *lbap = lbasize; 19115 19116 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19117 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19118 19119 return (0); 19120 } 19121 19122 19123 /* 19124 * Function: sd_send_scsi_START_STOP_UNIT 19125 * 19126 * Description: Issue a scsi START STOP UNIT command to the target. 19127 * 19128 * Arguments: un - pointer to driver soft state (unit) structure for 19129 * this target. 19130 * flag - SD_TARGET_START 19131 * SD_TARGET_STOP 19132 * SD_TARGET_EJECT 19133 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19134 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19135 * to use the USCSI "direct" chain and bypass the normal 19136 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19137 * command is issued as part of an error recovery action. 19138 * 19139 * Return Code: 0 - Success 19140 * EIO - IO error 19141 * EACCES - Reservation conflict detected 19142 * ENXIO - Not Ready, medium not present 19143 * errno return code from sd_send_scsi_cmd() 19144 * 19145 * Context: Can sleep. 19146 */ 19147 19148 static int 19149 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 19150 { 19151 struct scsi_extended_sense sense_buf; 19152 union scsi_cdb cdb; 19153 struct uscsi_cmd ucmd_buf; 19154 int status; 19155 19156 ASSERT(un != NULL); 19157 ASSERT(!mutex_owned(SD_MUTEX(un))); 19158 19159 SD_TRACE(SD_LOG_IO, un, 19160 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19161 19162 if (ISREMOVABLE(un) && 19163 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19164 (un->un_f_start_stop_supported != TRUE)) { 19165 return (0); 19166 } 19167 19168 bzero(&cdb, sizeof (cdb)); 19169 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19170 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19171 19172 cdb.scc_cmd = SCMD_START_STOP; 19173 cdb.cdb_opaque[4] = (uchar_t)flag; 19174 19175 ucmd_buf.uscsi_cdb = (char *)&cdb; 19176 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19177 ucmd_buf.uscsi_bufaddr = NULL; 19178 ucmd_buf.uscsi_buflen = 0; 19179 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19180 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19181 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19182 ucmd_buf.uscsi_timeout = 200; 19183 19184 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19185 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19186 19187 switch (status) { 19188 case 0: 19189 break; /* Success! */ 19190 case EIO: 19191 switch (ucmd_buf.uscsi_status) { 19192 case STATUS_RESERVATION_CONFLICT: 19193 status = EACCES; 19194 break; 19195 case STATUS_CHECK: 19196 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19197 switch (sense_buf.es_key) { 19198 case KEY_ILLEGAL_REQUEST: 19199 status = ENOTSUP; 19200 break; 19201 case KEY_NOT_READY: 19202 if (sense_buf.es_add_code == 0x3A) { 19203 status = ENXIO; 19204 } 19205 break; 19206 default: 19207 break; 19208 } 19209 } 19210 break; 19211 default: 19212 break; 19213 } 19214 break; 19215 default: 19216 break; 19217 } 19218 19219 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19220 19221 return (status); 19222 } 19223 19224 19225 /* 19226 * Function: sd_start_stop_unit_callback 19227 * 19228 * Description: timeout(9F) callback to begin recovery process for a 19229 * device that has spun down. 19230 * 19231 * Arguments: arg - pointer to associated softstate struct. 19232 * 19233 * Context: Executes in a timeout(9F) thread context 19234 */ 19235 19236 static void 19237 sd_start_stop_unit_callback(void *arg) 19238 { 19239 struct sd_lun *un = arg; 19240 ASSERT(un != NULL); 19241 ASSERT(!mutex_owned(SD_MUTEX(un))); 19242 19243 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19244 19245 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19246 } 19247 19248 19249 /* 19250 * Function: sd_start_stop_unit_task 19251 * 19252 * Description: Recovery procedure when a drive is spun down. 19253 * 19254 * Arguments: arg - pointer to associated softstate struct. 19255 * 19256 * Context: Executes in a taskq() thread context 19257 */ 19258 19259 static void 19260 sd_start_stop_unit_task(void *arg) 19261 { 19262 struct sd_lun *un = arg; 19263 19264 ASSERT(un != NULL); 19265 ASSERT(!mutex_owned(SD_MUTEX(un))); 19266 19267 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19268 19269 /* 19270 * Some unformatted drives report not ready error, no need to 19271 * restart if format has been initiated. 19272 */ 19273 mutex_enter(SD_MUTEX(un)); 19274 if (un->un_f_format_in_progress == TRUE) { 19275 mutex_exit(SD_MUTEX(un)); 19276 return; 19277 } 19278 mutex_exit(SD_MUTEX(un)); 19279 19280 /* 19281 * When a START STOP command is issued from here, it is part of a 19282 * failure recovery operation and must be issued before any other 19283 * commands, including any pending retries. Thus it must be sent 19284 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19285 * succeeds or not, we will start I/O after the attempt. 19286 */ 19287 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19288 SD_PATH_DIRECT_PRIORITY); 19289 19290 /* 19291 * The above call blocks until the START_STOP_UNIT command completes. 19292 * Now that it has completed, we must re-try the original IO that 19293 * received the NOT READY condition in the first place. There are 19294 * three possible conditions here: 19295 * 19296 * (1) The original IO is on un_retry_bp. 19297 * (2) The original IO is on the regular wait queue, and un_retry_bp 19298 * is NULL. 19299 * (3) The original IO is on the regular wait queue, and un_retry_bp 19300 * points to some other, unrelated bp. 19301 * 19302 * For each case, we must call sd_start_cmds() with un_retry_bp 19303 * as the argument. If un_retry_bp is NULL, this will initiate 19304 * processing of the regular wait queue. If un_retry_bp is not NULL, 19305 * then this will process the bp on un_retry_bp. That may or may not 19306 * be the original IO, but that does not matter: the important thing 19307 * is to keep the IO processing going at this point. 19308 * 19309 * Note: This is a very specific error recovery sequence associated 19310 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19311 * serialize the I/O with completion of the spin-up. 19312 */ 19313 mutex_enter(SD_MUTEX(un)); 19314 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19315 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19316 un, un->un_retry_bp); 19317 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19318 sd_start_cmds(un, un->un_retry_bp); 19319 mutex_exit(SD_MUTEX(un)); 19320 19321 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19322 } 19323 19324 19325 /* 19326 * Function: sd_send_scsi_INQUIRY 19327 * 19328 * Description: Issue the scsi INQUIRY command. 19329 * 19330 * Arguments: un 19331 * bufaddr 19332 * buflen 19333 * evpd 19334 * page_code 19335 * page_length 19336 * 19337 * Return Code: 0 - Success 19338 * errno return code from sd_send_scsi_cmd() 19339 * 19340 * Context: Can sleep. Does not return until command is completed. 19341 */ 19342 19343 static int 19344 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 19345 uchar_t evpd, uchar_t page_code, size_t *residp) 19346 { 19347 union scsi_cdb cdb; 19348 struct uscsi_cmd ucmd_buf; 19349 int status; 19350 19351 ASSERT(un != NULL); 19352 ASSERT(!mutex_owned(SD_MUTEX(un))); 19353 ASSERT(bufaddr != NULL); 19354 19355 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19356 19357 bzero(&cdb, sizeof (cdb)); 19358 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19359 bzero(bufaddr, buflen); 19360 19361 cdb.scc_cmd = SCMD_INQUIRY; 19362 cdb.cdb_opaque[1] = evpd; 19363 cdb.cdb_opaque[2] = page_code; 19364 FORMG0COUNT(&cdb, buflen); 19365 19366 ucmd_buf.uscsi_cdb = (char *)&cdb; 19367 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19368 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19369 ucmd_buf.uscsi_buflen = buflen; 19370 ucmd_buf.uscsi_rqbuf = NULL; 19371 ucmd_buf.uscsi_rqlen = 0; 19372 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19373 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19374 19375 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19376 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19377 19378 if ((status == 0) && (residp != NULL)) { 19379 *residp = ucmd_buf.uscsi_resid; 19380 } 19381 19382 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19383 19384 return (status); 19385 } 19386 19387 19388 /* 19389 * Function: sd_send_scsi_TEST_UNIT_READY 19390 * 19391 * Description: Issue the scsi TEST UNIT READY command. 19392 * This routine can be told to set the flag USCSI_DIAGNOSE to 19393 * prevent retrying failed commands. Use this when the intent 19394 * is either to check for device readiness, to clear a Unit 19395 * Attention, or to clear any outstanding sense data. 19396 * However under specific conditions the expected behavior 19397 * is for retries to bring a device ready, so use the flag 19398 * with caution. 19399 * 19400 * Arguments: un 19401 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19402 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19403 * 0: dont check for media present, do retries on cmd. 19404 * 19405 * Return Code: 0 - Success 19406 * EIO - IO error 19407 * EACCES - Reservation conflict detected 19408 * ENXIO - Not Ready, medium not present 19409 * errno return code from sd_send_scsi_cmd() 19410 * 19411 * Context: Can sleep. Does not return until command is completed. 19412 */ 19413 19414 static int 19415 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 19416 { 19417 struct scsi_extended_sense sense_buf; 19418 union scsi_cdb cdb; 19419 struct uscsi_cmd ucmd_buf; 19420 int status; 19421 19422 ASSERT(un != NULL); 19423 ASSERT(!mutex_owned(SD_MUTEX(un))); 19424 19425 SD_TRACE(SD_LOG_IO, un, 19426 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19427 19428 /* 19429 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19430 * timeouts when they receive a TUR and the queue is not empty. Check 19431 * the configuration flag set during attach (indicating the drive has 19432 * this firmware bug) and un_ncmds_in_transport before issuing the 19433 * TUR. If there are 19434 * pending commands return success, this is a bit arbitrary but is ok 19435 * for non-removables (i.e. the eliteI disks) and non-clustering 19436 * configurations. 19437 */ 19438 if (un->un_f_cfg_tur_check == TRUE) { 19439 mutex_enter(SD_MUTEX(un)); 19440 if (un->un_ncmds_in_transport != 0) { 19441 mutex_exit(SD_MUTEX(un)); 19442 return (0); 19443 } 19444 mutex_exit(SD_MUTEX(un)); 19445 } 19446 19447 bzero(&cdb, sizeof (cdb)); 19448 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19449 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19450 19451 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19452 19453 ucmd_buf.uscsi_cdb = (char *)&cdb; 19454 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19455 ucmd_buf.uscsi_bufaddr = NULL; 19456 ucmd_buf.uscsi_buflen = 0; 19457 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19458 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19459 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19460 19461 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19462 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19463 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19464 } 19465 ucmd_buf.uscsi_timeout = 60; 19466 19467 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19468 UIO_SYSSPACE, UIO_SYSSPACE, 19469 ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); 19470 19471 switch (status) { 19472 case 0: 19473 break; /* Success! */ 19474 case EIO: 19475 switch (ucmd_buf.uscsi_status) { 19476 case STATUS_RESERVATION_CONFLICT: 19477 status = EACCES; 19478 break; 19479 case STATUS_CHECK: 19480 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19481 break; 19482 } 19483 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19484 (sense_buf.es_key == KEY_NOT_READY) && 19485 (sense_buf.es_add_code == 0x3A)) { 19486 status = ENXIO; 19487 } 19488 break; 19489 default: 19490 break; 19491 } 19492 break; 19493 default: 19494 break; 19495 } 19496 19497 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19498 19499 return (status); 19500 } 19501 19502 19503 /* 19504 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19505 * 19506 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19507 * 19508 * Arguments: un 19509 * 19510 * Return Code: 0 - Success 19511 * EACCES 19512 * ENOTSUP 19513 * errno return code from sd_send_scsi_cmd() 19514 * 19515 * Context: Can sleep. Does not return until command is completed. 19516 */ 19517 19518 static int 19519 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 19520 uint16_t data_len, uchar_t *data_bufp) 19521 { 19522 struct scsi_extended_sense sense_buf; 19523 union scsi_cdb cdb; 19524 struct uscsi_cmd ucmd_buf; 19525 int status; 19526 int no_caller_buf = FALSE; 19527 19528 ASSERT(un != NULL); 19529 ASSERT(!mutex_owned(SD_MUTEX(un))); 19530 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19531 19532 SD_TRACE(SD_LOG_IO, un, 19533 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19534 19535 bzero(&cdb, sizeof (cdb)); 19536 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19537 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19538 if (data_bufp == NULL) { 19539 /* Allocate a default buf if the caller did not give one */ 19540 ASSERT(data_len == 0); 19541 data_len = MHIOC_RESV_KEY_SIZE; 19542 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19543 no_caller_buf = TRUE; 19544 } 19545 19546 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19547 cdb.cdb_opaque[1] = usr_cmd; 19548 FORMG1COUNT(&cdb, data_len); 19549 19550 ucmd_buf.uscsi_cdb = (char *)&cdb; 19551 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19552 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19553 ucmd_buf.uscsi_buflen = data_len; 19554 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19555 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19556 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19557 ucmd_buf.uscsi_timeout = 60; 19558 19559 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19560 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19561 19562 switch (status) { 19563 case 0: 19564 break; /* Success! */ 19565 case EIO: 19566 switch (ucmd_buf.uscsi_status) { 19567 case STATUS_RESERVATION_CONFLICT: 19568 status = EACCES; 19569 break; 19570 case STATUS_CHECK: 19571 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19572 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19573 status = ENOTSUP; 19574 } 19575 break; 19576 default: 19577 break; 19578 } 19579 break; 19580 default: 19581 break; 19582 } 19583 19584 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 19585 19586 if (no_caller_buf == TRUE) { 19587 kmem_free(data_bufp, data_len); 19588 } 19589 19590 return (status); 19591 } 19592 19593 19594 /* 19595 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 19596 * 19597 * Description: This routine is the driver entry point for handling CD-ROM 19598 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 19599 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 19600 * device. 19601 * 19602 * Arguments: un - Pointer to soft state struct for the target. 19603 * usr_cmd SCSI-3 reservation facility command (one of 19604 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 19605 * SD_SCSI3_PREEMPTANDABORT) 19606 * usr_bufp - user provided pointer register, reserve descriptor or 19607 * preempt and abort structure (mhioc_register_t, 19608 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 19609 * 19610 * Return Code: 0 - Success 19611 * EACCES 19612 * ENOTSUP 19613 * errno return code from sd_send_scsi_cmd() 19614 * 19615 * Context: Can sleep. Does not return until command is completed. 19616 */ 19617 19618 static int 19619 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19620 uchar_t *usr_bufp) 19621 { 19622 struct scsi_extended_sense sense_buf; 19623 union scsi_cdb cdb; 19624 struct uscsi_cmd ucmd_buf; 19625 int status; 19626 uchar_t data_len = sizeof (sd_prout_t); 19627 sd_prout_t *prp; 19628 19629 ASSERT(un != NULL); 19630 ASSERT(!mutex_owned(SD_MUTEX(un))); 19631 ASSERT(data_len == 24); /* required by scsi spec */ 19632 19633 SD_TRACE(SD_LOG_IO, un, 19634 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19635 19636 if (usr_bufp == NULL) { 19637 return (EINVAL); 19638 } 19639 19640 bzero(&cdb, sizeof (cdb)); 19641 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19642 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19643 prp = kmem_zalloc(data_len, KM_SLEEP); 19644 19645 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19646 cdb.cdb_opaque[1] = usr_cmd; 19647 FORMG1COUNT(&cdb, data_len); 19648 19649 ucmd_buf.uscsi_cdb = (char *)&cdb; 19650 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19651 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19652 ucmd_buf.uscsi_buflen = data_len; 19653 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19654 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19655 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19656 ucmd_buf.uscsi_timeout = 60; 19657 19658 switch (usr_cmd) { 19659 case SD_SCSI3_REGISTER: { 19660 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19661 19662 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19663 bcopy(ptr->newkey.key, prp->service_key, 19664 MHIOC_RESV_KEY_SIZE); 19665 prp->aptpl = ptr->aptpl; 19666 break; 19667 } 19668 case SD_SCSI3_RESERVE: 19669 case SD_SCSI3_RELEASE: { 19670 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19671 19672 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19673 prp->scope_address = BE_32(ptr->scope_specific_addr); 19674 cdb.cdb_opaque[2] = ptr->type; 19675 break; 19676 } 19677 case SD_SCSI3_PREEMPTANDABORT: { 19678 mhioc_preemptandabort_t *ptr = 19679 (mhioc_preemptandabort_t *)usr_bufp; 19680 19681 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19682 bcopy(ptr->victim_key.key, prp->service_key, 19683 MHIOC_RESV_KEY_SIZE); 19684 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19685 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19686 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19687 break; 19688 } 19689 case SD_SCSI3_REGISTERANDIGNOREKEY: 19690 { 19691 mhioc_registerandignorekey_t *ptr; 19692 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19693 bcopy(ptr->newkey.key, 19694 prp->service_key, MHIOC_RESV_KEY_SIZE); 19695 prp->aptpl = ptr->aptpl; 19696 break; 19697 } 19698 default: 19699 ASSERT(FALSE); 19700 break; 19701 } 19702 19703 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19704 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19705 19706 switch (status) { 19707 case 0: 19708 break; /* Success! */ 19709 case EIO: 19710 switch (ucmd_buf.uscsi_status) { 19711 case STATUS_RESERVATION_CONFLICT: 19712 status = EACCES; 19713 break; 19714 case STATUS_CHECK: 19715 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19716 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19717 status = ENOTSUP; 19718 } 19719 break; 19720 default: 19721 break; 19722 } 19723 break; 19724 default: 19725 break; 19726 } 19727 19728 kmem_free(prp, data_len); 19729 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19730 return (status); 19731 } 19732 19733 19734 /* 19735 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19736 * 19737 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19738 * 19739 * Arguments: un - pointer to the target's soft state struct 19740 * 19741 * Return Code: 0 - success 19742 * errno-type error code 19743 * 19744 * Context: kernel thread context only. 19745 */ 19746 19747 static int 19748 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un) 19749 { 19750 struct scsi_extended_sense sense_buf; 19751 union scsi_cdb cdb; 19752 struct uscsi_cmd ucmd_buf; 19753 int status; 19754 19755 ASSERT(un != NULL); 19756 ASSERT(!mutex_owned(SD_MUTEX(un))); 19757 19758 SD_TRACE(SD_LOG_IO, un, 19759 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19760 19761 bzero(&cdb, sizeof (cdb)); 19762 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19763 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19764 19765 cdb.scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19766 19767 ucmd_buf.uscsi_cdb = (char *)&cdb; 19768 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19769 ucmd_buf.uscsi_bufaddr = NULL; 19770 ucmd_buf.uscsi_buflen = 0; 19771 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19772 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19773 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19774 ucmd_buf.uscsi_timeout = 240; 19775 19776 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19777 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19778 19779 switch (status) { 19780 case 0: 19781 break; /* Success! */ 19782 case EIO: 19783 switch (ucmd_buf.uscsi_status) { 19784 case STATUS_RESERVATION_CONFLICT: 19785 /* Ignore reservation conflict */ 19786 status = 0; 19787 goto done; 19788 19789 case STATUS_CHECK: 19790 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19791 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19792 /* Ignore Illegal Request error */ 19793 status = 0; 19794 goto done; 19795 } 19796 break; 19797 default: 19798 break; 19799 } 19800 /* FALLTHRU */ 19801 default: 19802 /* Ignore error if the media is not present. */ 19803 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 19804 status = 0; 19805 goto done; 19806 } 19807 /* If we reach this, we had an error */ 19808 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19809 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19810 break; 19811 } 19812 19813 done: 19814 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: exit\n"); 19815 19816 return (status); 19817 } 19818 19819 19820 /* 19821 * Function: sd_send_scsi_GET_CONFIGURATION 19822 * 19823 * Description: Issues the get configuration command to the device. 19824 * Called from sd_check_for_writable_cd & sd_get_media_info 19825 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19826 * Arguments: un 19827 * ucmdbuf 19828 * rqbuf 19829 * rqbuflen 19830 * bufaddr 19831 * buflen 19832 * 19833 * Return Code: 0 - Success 19834 * errno return code from sd_send_scsi_cmd() 19835 * 19836 * Context: Can sleep. Does not return until command is completed. 19837 * 19838 */ 19839 19840 static int 19841 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19842 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen) 19843 { 19844 char cdb[CDB_GROUP1]; 19845 int status; 19846 19847 ASSERT(un != NULL); 19848 ASSERT(!mutex_owned(SD_MUTEX(un))); 19849 ASSERT(bufaddr != NULL); 19850 ASSERT(ucmdbuf != NULL); 19851 ASSERT(rqbuf != NULL); 19852 19853 SD_TRACE(SD_LOG_IO, un, 19854 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19855 19856 bzero(cdb, sizeof (cdb)); 19857 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19858 bzero(rqbuf, rqbuflen); 19859 bzero(bufaddr, buflen); 19860 19861 /* 19862 * Set up cdb field for the get configuration command. 19863 */ 19864 cdb[0] = SCMD_GET_CONFIGURATION; 19865 cdb[1] = 0x02; /* Requested Type */ 19866 cdb[8] = SD_PROFILE_HEADER_LEN; 19867 ucmdbuf->uscsi_cdb = cdb; 19868 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19869 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19870 ucmdbuf->uscsi_buflen = buflen; 19871 ucmdbuf->uscsi_timeout = sd_io_time; 19872 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19873 ucmdbuf->uscsi_rqlen = rqbuflen; 19874 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19875 19876 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19877 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19878 19879 switch (status) { 19880 case 0: 19881 break; /* Success! */ 19882 case EIO: 19883 switch (ucmdbuf->uscsi_status) { 19884 case STATUS_RESERVATION_CONFLICT: 19885 status = EACCES; 19886 break; 19887 default: 19888 break; 19889 } 19890 break; 19891 default: 19892 break; 19893 } 19894 19895 if (status == 0) { 19896 SD_DUMP_MEMORY(un, SD_LOG_IO, 19897 "sd_send_scsi_GET_CONFIGURATION: data", 19898 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19899 } 19900 19901 SD_TRACE(SD_LOG_IO, un, 19902 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19903 19904 return (status); 19905 } 19906 19907 /* 19908 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19909 * 19910 * Description: Issues the get configuration command to the device to 19911 * retrieve a specfic feature. Called from 19912 * sd_check_for_writable_cd & sd_set_mmc_caps. 19913 * Arguments: un 19914 * ucmdbuf 19915 * rqbuf 19916 * rqbuflen 19917 * bufaddr 19918 * buflen 19919 * feature 19920 * 19921 * Return Code: 0 - Success 19922 * errno return code from sd_send_scsi_cmd() 19923 * 19924 * Context: Can sleep. Does not return until command is completed. 19925 * 19926 */ 19927 static int 19928 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19929 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19930 uchar_t *bufaddr, uint_t buflen, char feature) 19931 { 19932 char cdb[CDB_GROUP1]; 19933 int status; 19934 19935 ASSERT(un != NULL); 19936 ASSERT(!mutex_owned(SD_MUTEX(un))); 19937 ASSERT(bufaddr != NULL); 19938 ASSERT(ucmdbuf != NULL); 19939 ASSERT(rqbuf != NULL); 19940 19941 SD_TRACE(SD_LOG_IO, un, 19942 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19943 19944 bzero(cdb, sizeof (cdb)); 19945 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19946 bzero(rqbuf, rqbuflen); 19947 bzero(bufaddr, buflen); 19948 19949 /* 19950 * Set up cdb field for the get configuration command. 19951 */ 19952 cdb[0] = SCMD_GET_CONFIGURATION; 19953 cdb[1] = 0x02; /* Requested Type */ 19954 cdb[3] = feature; 19955 cdb[8] = buflen; 19956 ucmdbuf->uscsi_cdb = cdb; 19957 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19958 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19959 ucmdbuf->uscsi_buflen = buflen; 19960 ucmdbuf->uscsi_timeout = sd_io_time; 19961 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19962 ucmdbuf->uscsi_rqlen = rqbuflen; 19963 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19964 19965 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19966 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19967 19968 switch (status) { 19969 case 0: 19970 break; /* Success! */ 19971 case EIO: 19972 switch (ucmdbuf->uscsi_status) { 19973 case STATUS_RESERVATION_CONFLICT: 19974 status = EACCES; 19975 break; 19976 default: 19977 break; 19978 } 19979 break; 19980 default: 19981 break; 19982 } 19983 19984 if (status == 0) { 19985 SD_DUMP_MEMORY(un, SD_LOG_IO, 19986 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19987 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19988 } 19989 19990 SD_TRACE(SD_LOG_IO, un, 19991 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19992 19993 return (status); 19994 } 19995 19996 19997 /* 19998 * Function: sd_send_scsi_MODE_SENSE 19999 * 20000 * Description: Utility function for issuing a scsi MODE SENSE command. 20001 * Note: This routine uses a consistent implementation for Group0, 20002 * Group1, and Group2 commands across all platforms. ATAPI devices 20003 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20004 * 20005 * Arguments: un - pointer to the softstate struct for the target. 20006 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20007 * CDB_GROUP[1|2] (10 byte). 20008 * bufaddr - buffer for page data retrieved from the target. 20009 * buflen - size of page to be retrieved. 20010 * page_code - page code of data to be retrieved from the target. 20011 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20012 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20013 * to use the USCSI "direct" chain and bypass the normal 20014 * command waitq. 20015 * 20016 * Return Code: 0 - Success 20017 * errno return code from sd_send_scsi_cmd() 20018 * 20019 * Context: Can sleep. Does not return until command is completed. 20020 */ 20021 20022 static int 20023 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20024 size_t buflen, uchar_t page_code, int path_flag) 20025 { 20026 struct scsi_extended_sense sense_buf; 20027 union scsi_cdb cdb; 20028 struct uscsi_cmd ucmd_buf; 20029 int status; 20030 20031 ASSERT(un != NULL); 20032 ASSERT(!mutex_owned(SD_MUTEX(un))); 20033 ASSERT(bufaddr != NULL); 20034 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20035 (cdbsize == CDB_GROUP2)); 20036 20037 SD_TRACE(SD_LOG_IO, un, 20038 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20039 20040 bzero(&cdb, sizeof (cdb)); 20041 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20042 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20043 bzero(bufaddr, buflen); 20044 20045 if (cdbsize == CDB_GROUP0) { 20046 cdb.scc_cmd = SCMD_MODE_SENSE; 20047 cdb.cdb_opaque[2] = page_code; 20048 FORMG0COUNT(&cdb, buflen); 20049 } else { 20050 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20051 cdb.cdb_opaque[2] = page_code; 20052 FORMG1COUNT(&cdb, buflen); 20053 } 20054 20055 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20056 20057 ucmd_buf.uscsi_cdb = (char *)&cdb; 20058 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20059 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20060 ucmd_buf.uscsi_buflen = buflen; 20061 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20062 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20063 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20064 ucmd_buf.uscsi_timeout = 60; 20065 20066 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20067 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20068 20069 switch (status) { 20070 case 0: 20071 break; /* Success! */ 20072 case EIO: 20073 switch (ucmd_buf.uscsi_status) { 20074 case STATUS_RESERVATION_CONFLICT: 20075 status = EACCES; 20076 break; 20077 default: 20078 break; 20079 } 20080 break; 20081 default: 20082 break; 20083 } 20084 20085 if (status == 0) { 20086 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20087 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20088 } 20089 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20090 20091 return (status); 20092 } 20093 20094 20095 /* 20096 * Function: sd_send_scsi_MODE_SELECT 20097 * 20098 * Description: Utility function for issuing a scsi MODE SELECT command. 20099 * Note: This routine uses a consistent implementation for Group0, 20100 * Group1, and Group2 commands across all platforms. ATAPI devices 20101 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20102 * 20103 * Arguments: un - pointer to the softstate struct for the target. 20104 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20105 * CDB_GROUP[1|2] (10 byte). 20106 * bufaddr - buffer for page data retrieved from the target. 20107 * buflen - size of page to be retrieved. 20108 * save_page - boolean to determin if SP bit should be set. 20109 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20110 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20111 * to use the USCSI "direct" chain and bypass the normal 20112 * command waitq. 20113 * 20114 * Return Code: 0 - Success 20115 * errno return code from sd_send_scsi_cmd() 20116 * 20117 * Context: Can sleep. Does not return until command is completed. 20118 */ 20119 20120 static int 20121 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20122 size_t buflen, uchar_t save_page, int path_flag) 20123 { 20124 struct scsi_extended_sense sense_buf; 20125 union scsi_cdb cdb; 20126 struct uscsi_cmd ucmd_buf; 20127 int status; 20128 20129 ASSERT(un != NULL); 20130 ASSERT(!mutex_owned(SD_MUTEX(un))); 20131 ASSERT(bufaddr != NULL); 20132 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20133 (cdbsize == CDB_GROUP2)); 20134 20135 SD_TRACE(SD_LOG_IO, un, 20136 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20137 20138 bzero(&cdb, sizeof (cdb)); 20139 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20140 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20141 20142 /* Set the PF bit for many third party drives */ 20143 cdb.cdb_opaque[1] = 0x10; 20144 20145 /* Set the savepage(SP) bit if given */ 20146 if (save_page == SD_SAVE_PAGE) { 20147 cdb.cdb_opaque[1] |= 0x01; 20148 } 20149 20150 if (cdbsize == CDB_GROUP0) { 20151 cdb.scc_cmd = SCMD_MODE_SELECT; 20152 FORMG0COUNT(&cdb, buflen); 20153 } else { 20154 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20155 FORMG1COUNT(&cdb, buflen); 20156 } 20157 20158 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20159 20160 ucmd_buf.uscsi_cdb = (char *)&cdb; 20161 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20162 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20163 ucmd_buf.uscsi_buflen = buflen; 20164 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20165 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20166 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20167 ucmd_buf.uscsi_timeout = 60; 20168 20169 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20170 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20171 20172 switch (status) { 20173 case 0: 20174 break; /* Success! */ 20175 case EIO: 20176 switch (ucmd_buf.uscsi_status) { 20177 case STATUS_RESERVATION_CONFLICT: 20178 status = EACCES; 20179 break; 20180 default: 20181 break; 20182 } 20183 break; 20184 default: 20185 break; 20186 } 20187 20188 if (status == 0) { 20189 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20190 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20191 } 20192 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20193 20194 return (status); 20195 } 20196 20197 20198 /* 20199 * Function: sd_send_scsi_RDWR 20200 * 20201 * Description: Issue a scsi READ or WRITE command with the given parameters. 20202 * 20203 * Arguments: un: Pointer to the sd_lun struct for the target. 20204 * cmd: SCMD_READ or SCMD_WRITE 20205 * bufaddr: Address of caller's buffer to receive the RDWR data 20206 * buflen: Length of caller's buffer receive the RDWR data. 20207 * start_block: Block number for the start of the RDWR operation. 20208 * (Assumes target-native block size.) 20209 * residp: Pointer to variable to receive the redisual of the 20210 * RDWR operation (may be NULL of no residual requested). 20211 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20212 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20213 * to use the USCSI "direct" chain and bypass the normal 20214 * command waitq. 20215 * 20216 * Return Code: 0 - Success 20217 * errno return code from sd_send_scsi_cmd() 20218 * 20219 * Context: Can sleep. Does not return until command is completed. 20220 */ 20221 20222 static int 20223 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 20224 size_t buflen, daddr_t start_block, int path_flag) 20225 { 20226 struct scsi_extended_sense sense_buf; 20227 union scsi_cdb cdb; 20228 struct uscsi_cmd ucmd_buf; 20229 uint32_t block_count; 20230 int status; 20231 int cdbsize; 20232 uchar_t flag; 20233 20234 ASSERT(un != NULL); 20235 ASSERT(!mutex_owned(SD_MUTEX(un))); 20236 ASSERT(bufaddr != NULL); 20237 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20238 20239 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20240 20241 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20242 return (EINVAL); 20243 } 20244 20245 mutex_enter(SD_MUTEX(un)); 20246 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20247 mutex_exit(SD_MUTEX(un)); 20248 20249 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20250 20251 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20252 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20253 bufaddr, buflen, start_block, block_count); 20254 20255 bzero(&cdb, sizeof (cdb)); 20256 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20257 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20258 20259 /* Compute CDB size to use */ 20260 if (start_block > 0xffffffff) 20261 cdbsize = CDB_GROUP4; 20262 else if ((start_block & 0xFFE00000) || 20263 (un->un_f_cfg_is_atapi == TRUE)) 20264 cdbsize = CDB_GROUP1; 20265 else 20266 cdbsize = CDB_GROUP0; 20267 20268 switch (cdbsize) { 20269 case CDB_GROUP0: /* 6-byte CDBs */ 20270 cdb.scc_cmd = cmd; 20271 FORMG0ADDR(&cdb, start_block); 20272 FORMG0COUNT(&cdb, block_count); 20273 break; 20274 case CDB_GROUP1: /* 10-byte CDBs */ 20275 cdb.scc_cmd = cmd | SCMD_GROUP1; 20276 FORMG1ADDR(&cdb, start_block); 20277 FORMG1COUNT(&cdb, block_count); 20278 break; 20279 case CDB_GROUP4: /* 16-byte CDBs */ 20280 cdb.scc_cmd = cmd | SCMD_GROUP4; 20281 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20282 FORMG4COUNT(&cdb, block_count); 20283 break; 20284 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20285 default: 20286 /* All others reserved */ 20287 return (EINVAL); 20288 } 20289 20290 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20291 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20292 20293 ucmd_buf.uscsi_cdb = (char *)&cdb; 20294 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20295 ucmd_buf.uscsi_bufaddr = bufaddr; 20296 ucmd_buf.uscsi_buflen = buflen; 20297 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20298 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20299 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20300 ucmd_buf.uscsi_timeout = 60; 20301 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20302 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20303 switch (status) { 20304 case 0: 20305 break; /* Success! */ 20306 case EIO: 20307 switch (ucmd_buf.uscsi_status) { 20308 case STATUS_RESERVATION_CONFLICT: 20309 status = EACCES; 20310 break; 20311 default: 20312 break; 20313 } 20314 break; 20315 default: 20316 break; 20317 } 20318 20319 if (status == 0) { 20320 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20321 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20322 } 20323 20324 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20325 20326 return (status); 20327 } 20328 20329 20330 /* 20331 * Function: sd_send_scsi_LOG_SENSE 20332 * 20333 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20334 * 20335 * Arguments: un: Pointer to the sd_lun struct for the target. 20336 * 20337 * Return Code: 0 - Success 20338 * errno return code from sd_send_scsi_cmd() 20339 * 20340 * Context: Can sleep. Does not return until command is completed. 20341 */ 20342 20343 static int 20344 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 20345 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20346 int path_flag) 20347 20348 { 20349 struct scsi_extended_sense sense_buf; 20350 union scsi_cdb cdb; 20351 struct uscsi_cmd ucmd_buf; 20352 int status; 20353 20354 ASSERT(un != NULL); 20355 ASSERT(!mutex_owned(SD_MUTEX(un))); 20356 20357 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 20358 20359 bzero(&cdb, sizeof (cdb)); 20360 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20361 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20362 20363 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 20364 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 20365 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 20366 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 20367 FORMG1COUNT(&cdb, buflen); 20368 20369 ucmd_buf.uscsi_cdb = (char *)&cdb; 20370 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20371 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20372 ucmd_buf.uscsi_buflen = buflen; 20373 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20374 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20375 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20376 ucmd_buf.uscsi_timeout = 60; 20377 20378 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20379 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20380 20381 switch (status) { 20382 case 0: 20383 break; 20384 case EIO: 20385 switch (ucmd_buf.uscsi_status) { 20386 case STATUS_RESERVATION_CONFLICT: 20387 status = EACCES; 20388 break; 20389 case STATUS_CHECK: 20390 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20391 (sense_buf.es_key == KEY_ILLEGAL_REQUEST) && 20392 (sense_buf.es_add_code == 0x24)) { 20393 /* 20394 * ASC 0x24: INVALID FIELD IN CDB 20395 */ 20396 switch (page_code) { 20397 case START_STOP_CYCLE_PAGE: 20398 /* 20399 * The start stop cycle counter is 20400 * implemented as page 0x31 in earlier 20401 * generation disks. In new generation 20402 * disks the start stop cycle counter is 20403 * implemented as page 0xE. To properly 20404 * handle this case if an attempt for 20405 * log page 0xE is made and fails we 20406 * will try again using page 0x31. 20407 * 20408 * Network storage BU committed to 20409 * maintain the page 0x31 for this 20410 * purpose and will not have any other 20411 * page implemented with page code 0x31 20412 * until all disks transition to the 20413 * standard page. 20414 */ 20415 mutex_enter(SD_MUTEX(un)); 20416 un->un_start_stop_cycle_page = 20417 START_STOP_CYCLE_VU_PAGE; 20418 cdb.cdb_opaque[2] = 20419 (char)(page_control << 6) | 20420 un->un_start_stop_cycle_page; 20421 mutex_exit(SD_MUTEX(un)); 20422 status = sd_send_scsi_cmd( 20423 SD_GET_DEV(un), &ucmd_buf, 20424 UIO_SYSSPACE, UIO_SYSSPACE, 20425 UIO_SYSSPACE, path_flag); 20426 20427 break; 20428 case TEMPERATURE_PAGE: 20429 status = ENOTTY; 20430 break; 20431 default: 20432 break; 20433 } 20434 } 20435 break; 20436 default: 20437 break; 20438 } 20439 break; 20440 default: 20441 break; 20442 } 20443 20444 if (status == 0) { 20445 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20446 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20447 } 20448 20449 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20450 20451 return (status); 20452 } 20453 20454 20455 /* 20456 * Function: sdioctl 20457 * 20458 * Description: Driver's ioctl(9e) entry point function. 20459 * 20460 * Arguments: dev - device number 20461 * cmd - ioctl operation to be performed 20462 * arg - user argument, contains data to be set or reference 20463 * parameter for get 20464 * flag - bit flag, indicating open settings, 32/64 bit type 20465 * cred_p - user credential pointer 20466 * rval_p - calling process return value (OPT) 20467 * 20468 * Return Code: EINVAL 20469 * ENOTTY 20470 * ENXIO 20471 * EIO 20472 * EFAULT 20473 * ENOTSUP 20474 * EPERM 20475 * 20476 * Context: Called from the device switch at normal priority. 20477 */ 20478 20479 static int 20480 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20481 { 20482 struct sd_lun *un = NULL; 20483 int geom_validated = FALSE; 20484 int err = 0; 20485 int i = 0; 20486 cred_t *cr; 20487 20488 /* 20489 * All device accesses go thru sdstrategy where we check on suspend 20490 * status 20491 */ 20492 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20493 return (ENXIO); 20494 } 20495 20496 ASSERT(!mutex_owned(SD_MUTEX(un))); 20497 20498 /* 20499 * Moved this wait from sd_uscsi_strategy to here for 20500 * reasons of deadlock prevention. Internal driver commands, 20501 * specifically those to change a devices power level, result 20502 * in a call to sd_uscsi_strategy. 20503 */ 20504 mutex_enter(SD_MUTEX(un)); 20505 while ((un->un_state == SD_STATE_SUSPENDED) || 20506 (un->un_state == SD_STATE_PM_CHANGING)) { 20507 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20508 } 20509 /* 20510 * Twiddling the counter here protects commands from now 20511 * through to the top of sd_uscsi_strategy. Without the 20512 * counter inc. a power down, for example, could get in 20513 * after the above check for state is made and before 20514 * execution gets to the top of sd_uscsi_strategy. 20515 * That would cause problems. 20516 */ 20517 un->un_ncmds_in_driver++; 20518 20519 if ((un->un_f_geometry_is_valid == FALSE) && 20520 (flag & (FNDELAY | FNONBLOCK))) { 20521 switch (cmd) { 20522 case CDROMPAUSE: 20523 case CDROMRESUME: 20524 case CDROMPLAYMSF: 20525 case CDROMPLAYTRKIND: 20526 case CDROMREADTOCHDR: 20527 case CDROMREADTOCENTRY: 20528 case CDROMSTOP: 20529 case CDROMSTART: 20530 case CDROMVOLCTRL: 20531 case CDROMSUBCHNL: 20532 case CDROMREADMODE2: 20533 case CDROMREADMODE1: 20534 case CDROMREADOFFSET: 20535 case CDROMSBLKMODE: 20536 case CDROMGBLKMODE: 20537 case CDROMGDRVSPEED: 20538 case CDROMSDRVSPEED: 20539 case CDROMCDDA: 20540 case CDROMCDXA: 20541 case CDROMSUBCODE: 20542 if (!ISCD(un)) { 20543 un->un_ncmds_in_driver--; 20544 ASSERT(un->un_ncmds_in_driver >= 0); 20545 mutex_exit(SD_MUTEX(un)); 20546 return (ENOTTY); 20547 } 20548 break; 20549 case FDEJECT: 20550 case DKIOCEJECT: 20551 case CDROMEJECT: 20552 if (!ISREMOVABLE(un)) { 20553 un->un_ncmds_in_driver--; 20554 ASSERT(un->un_ncmds_in_driver >= 0); 20555 mutex_exit(SD_MUTEX(un)); 20556 return (ENOTTY); 20557 } 20558 break; 20559 case DKIOCSVTOC: 20560 case DKIOCSETEFI: 20561 case DKIOCSMBOOT: 20562 mutex_exit(SD_MUTEX(un)); 20563 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20564 if (err != 0) { 20565 mutex_enter(SD_MUTEX(un)); 20566 un->un_ncmds_in_driver--; 20567 ASSERT(un->un_ncmds_in_driver >= 0); 20568 mutex_exit(SD_MUTEX(un)); 20569 return (EIO); 20570 } 20571 mutex_enter(SD_MUTEX(un)); 20572 /* FALLTHROUGH */ 20573 case DKIOCREMOVABLE: 20574 case DKIOCINFO: 20575 case DKIOCGMEDIAINFO: 20576 case MHIOCENFAILFAST: 20577 case MHIOCSTATUS: 20578 case MHIOCTKOWN: 20579 case MHIOCRELEASE: 20580 case MHIOCGRP_INKEYS: 20581 case MHIOCGRP_INRESV: 20582 case MHIOCGRP_REGISTER: 20583 case MHIOCGRP_RESERVE: 20584 case MHIOCGRP_PREEMPTANDABORT: 20585 case MHIOCGRP_REGISTERANDIGNOREKEY: 20586 case CDROMCLOSETRAY: 20587 case USCSICMD: 20588 goto skip_ready_valid; 20589 default: 20590 break; 20591 } 20592 20593 mutex_exit(SD_MUTEX(un)); 20594 err = sd_ready_and_valid(un); 20595 mutex_enter(SD_MUTEX(un)); 20596 if (err == SD_READY_NOT_VALID) { 20597 switch (cmd) { 20598 case DKIOCGAPART: 20599 case DKIOCGGEOM: 20600 case DKIOCSGEOM: 20601 case DKIOCGVTOC: 20602 case DKIOCSVTOC: 20603 case DKIOCSAPART: 20604 case DKIOCG_PHYGEOM: 20605 case DKIOCG_VIRTGEOM: 20606 err = ENOTSUP; 20607 un->un_ncmds_in_driver--; 20608 ASSERT(un->un_ncmds_in_driver >= 0); 20609 mutex_exit(SD_MUTEX(un)); 20610 return (err); 20611 } 20612 } 20613 if (err != SD_READY_VALID) { 20614 switch (cmd) { 20615 case DKIOCSTATE: 20616 case CDROMGDRVSPEED: 20617 case CDROMSDRVSPEED: 20618 case FDEJECT: /* for eject command */ 20619 case DKIOCEJECT: 20620 case CDROMEJECT: 20621 case DKIOCGETEFI: 20622 case DKIOCSGEOM: 20623 case DKIOCREMOVABLE: 20624 case DKIOCSAPART: 20625 case DKIOCSETEFI: 20626 break; 20627 default: 20628 if (ISREMOVABLE(un)) { 20629 err = ENXIO; 20630 } else { 20631 /* Do not map EACCES to EIO */ 20632 if (err != EACCES) 20633 err = EIO; 20634 } 20635 un->un_ncmds_in_driver--; 20636 ASSERT(un->un_ncmds_in_driver >= 0); 20637 mutex_exit(SD_MUTEX(un)); 20638 return (err); 20639 } 20640 } 20641 geom_validated = TRUE; 20642 } 20643 if ((un->un_f_geometry_is_valid == TRUE) && 20644 (un->un_solaris_size > 0)) { 20645 /* 20646 * the "geometry_is_valid" flag could be true if we 20647 * have an fdisk table but no Solaris partition 20648 */ 20649 if (un->un_vtoc.v_sanity != VTOC_SANE) { 20650 /* it is EFI, so return ENOTSUP for these */ 20651 switch (cmd) { 20652 case DKIOCGAPART: 20653 case DKIOCGGEOM: 20654 case DKIOCGVTOC: 20655 case DKIOCSVTOC: 20656 case DKIOCSAPART: 20657 err = ENOTSUP; 20658 un->un_ncmds_in_driver--; 20659 ASSERT(un->un_ncmds_in_driver >= 0); 20660 mutex_exit(SD_MUTEX(un)); 20661 return (err); 20662 } 20663 } 20664 } 20665 20666 skip_ready_valid: 20667 mutex_exit(SD_MUTEX(un)); 20668 20669 switch (cmd) { 20670 case DKIOCINFO: 20671 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20672 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20673 break; 20674 20675 case DKIOCGMEDIAINFO: 20676 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20677 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20678 break; 20679 20680 case DKIOCGGEOM: 20681 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGGEOM\n"); 20682 err = sd_dkio_get_geometry(dev, (caddr_t)arg, flag, 20683 geom_validated); 20684 break; 20685 20686 case DKIOCSGEOM: 20687 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSGEOM\n"); 20688 err = sd_dkio_set_geometry(dev, (caddr_t)arg, flag); 20689 break; 20690 20691 case DKIOCGAPART: 20692 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGAPART\n"); 20693 err = sd_dkio_get_partition(dev, (caddr_t)arg, flag, 20694 geom_validated); 20695 break; 20696 20697 case DKIOCSAPART: 20698 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSAPART\n"); 20699 err = sd_dkio_set_partition(dev, (caddr_t)arg, flag); 20700 break; 20701 20702 case DKIOCGVTOC: 20703 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGVTOC\n"); 20704 err = sd_dkio_get_vtoc(dev, (caddr_t)arg, flag, 20705 geom_validated); 20706 break; 20707 20708 case DKIOCGETEFI: 20709 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGETEFI\n"); 20710 err = sd_dkio_get_efi(dev, (caddr_t)arg, flag); 20711 break; 20712 20713 case DKIOCPARTITION: 20714 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTITION\n"); 20715 err = sd_dkio_partition(dev, (caddr_t)arg, flag); 20716 break; 20717 20718 case DKIOCSVTOC: 20719 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSVTOC\n"); 20720 err = sd_dkio_set_vtoc(dev, (caddr_t)arg, flag); 20721 break; 20722 20723 case DKIOCSETEFI: 20724 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSETEFI\n"); 20725 err = sd_dkio_set_efi(dev, (caddr_t)arg, flag); 20726 break; 20727 20728 case DKIOCGMBOOT: 20729 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMBOOT\n"); 20730 err = sd_dkio_get_mboot(dev, (caddr_t)arg, flag); 20731 break; 20732 20733 case DKIOCSMBOOT: 20734 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSMBOOT\n"); 20735 err = sd_dkio_set_mboot(dev, (caddr_t)arg, flag); 20736 break; 20737 20738 case DKIOCLOCK: 20739 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20740 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20741 SD_PATH_STANDARD); 20742 break; 20743 20744 case DKIOCUNLOCK: 20745 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20746 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20747 SD_PATH_STANDARD); 20748 break; 20749 20750 case DKIOCSTATE: { 20751 enum dkio_state state; 20752 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20753 20754 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20755 err = EFAULT; 20756 } else { 20757 err = sd_check_media(dev, state); 20758 if (err == 0) { 20759 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20760 sizeof (int), flag) != 0) 20761 err = EFAULT; 20762 } 20763 } 20764 break; 20765 } 20766 20767 case DKIOCREMOVABLE: 20768 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20769 if (ISREMOVABLE(un)) { 20770 i = 1; 20771 } else { 20772 i = 0; 20773 } 20774 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20775 err = EFAULT; 20776 } else { 20777 err = 0; 20778 } 20779 break; 20780 20781 case DKIOCGTEMPERATURE: 20782 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20783 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20784 break; 20785 20786 case MHIOCENFAILFAST: 20787 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20788 if ((err = drv_priv(cred_p)) == 0) { 20789 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20790 } 20791 break; 20792 20793 case MHIOCTKOWN: 20794 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20795 if ((err = drv_priv(cred_p)) == 0) { 20796 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20797 } 20798 break; 20799 20800 case MHIOCRELEASE: 20801 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20802 if ((err = drv_priv(cred_p)) == 0) { 20803 err = sd_mhdioc_release(dev); 20804 } 20805 break; 20806 20807 case MHIOCSTATUS: 20808 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20809 if ((err = drv_priv(cred_p)) == 0) { 20810 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20811 case 0: 20812 err = 0; 20813 break; 20814 case EACCES: 20815 *rval_p = 1; 20816 err = 0; 20817 break; 20818 default: 20819 err = EIO; 20820 break; 20821 } 20822 } 20823 break; 20824 20825 case MHIOCQRESERVE: 20826 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20827 if ((err = drv_priv(cred_p)) == 0) { 20828 err = sd_reserve_release(dev, SD_RESERVE); 20829 } 20830 break; 20831 20832 case MHIOCREREGISTERDEVID: 20833 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20834 if (drv_priv(cred_p) == EPERM) { 20835 err = EPERM; 20836 } else if (ISREMOVABLE(un) || ISCD(un)) { 20837 err = ENOTTY; 20838 } else { 20839 err = sd_mhdioc_register_devid(dev); 20840 } 20841 break; 20842 20843 case MHIOCGRP_INKEYS: 20844 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20845 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20846 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20847 err = ENOTSUP; 20848 } else { 20849 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20850 flag); 20851 } 20852 } 20853 break; 20854 20855 case MHIOCGRP_INRESV: 20856 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20857 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20858 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20859 err = ENOTSUP; 20860 } else { 20861 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20862 } 20863 } 20864 break; 20865 20866 case MHIOCGRP_REGISTER: 20867 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20868 if ((err = drv_priv(cred_p)) != EPERM) { 20869 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20870 err = ENOTSUP; 20871 } else if (arg != NULL) { 20872 mhioc_register_t reg; 20873 if (ddi_copyin((void *)arg, ®, 20874 sizeof (mhioc_register_t), flag) != 0) { 20875 err = EFAULT; 20876 } else { 20877 err = 20878 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20879 un, SD_SCSI3_REGISTER, 20880 (uchar_t *)®); 20881 } 20882 } 20883 } 20884 break; 20885 20886 case MHIOCGRP_RESERVE: 20887 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20888 if ((err = drv_priv(cred_p)) != EPERM) { 20889 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20890 err = ENOTSUP; 20891 } else if (arg != NULL) { 20892 mhioc_resv_desc_t resv_desc; 20893 if (ddi_copyin((void *)arg, &resv_desc, 20894 sizeof (mhioc_resv_desc_t), flag) != 0) { 20895 err = EFAULT; 20896 } else { 20897 err = 20898 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20899 un, SD_SCSI3_RESERVE, 20900 (uchar_t *)&resv_desc); 20901 } 20902 } 20903 } 20904 break; 20905 20906 case MHIOCGRP_PREEMPTANDABORT: 20907 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20908 if ((err = drv_priv(cred_p)) != EPERM) { 20909 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20910 err = ENOTSUP; 20911 } else if (arg != NULL) { 20912 mhioc_preemptandabort_t preempt_abort; 20913 if (ddi_copyin((void *)arg, &preempt_abort, 20914 sizeof (mhioc_preemptandabort_t), 20915 flag) != 0) { 20916 err = EFAULT; 20917 } else { 20918 err = 20919 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20920 un, SD_SCSI3_PREEMPTANDABORT, 20921 (uchar_t *)&preempt_abort); 20922 } 20923 } 20924 } 20925 break; 20926 20927 case MHIOCGRP_REGISTERANDIGNOREKEY: 20928 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20929 if ((err = drv_priv(cred_p)) != EPERM) { 20930 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20931 err = ENOTSUP; 20932 } else if (arg != NULL) { 20933 mhioc_registerandignorekey_t r_and_i; 20934 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20935 sizeof (mhioc_registerandignorekey_t), 20936 flag) != 0) { 20937 err = EFAULT; 20938 } else { 20939 err = 20940 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20941 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20942 (uchar_t *)&r_and_i); 20943 } 20944 } 20945 } 20946 break; 20947 20948 case USCSICMD: 20949 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20950 cr = ddi_get_cred(); 20951 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20952 err = EPERM; 20953 } else { 20954 err = sd_uscsi_ioctl(dev, (caddr_t)arg, flag); 20955 } 20956 break; 20957 20958 case CDROMPAUSE: 20959 case CDROMRESUME: 20960 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20961 if (!ISCD(un)) { 20962 err = ENOTTY; 20963 } else { 20964 err = sr_pause_resume(dev, cmd); 20965 } 20966 break; 20967 20968 case CDROMPLAYMSF: 20969 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20970 if (!ISCD(un)) { 20971 err = ENOTTY; 20972 } else { 20973 err = sr_play_msf(dev, (caddr_t)arg, flag); 20974 } 20975 break; 20976 20977 case CDROMPLAYTRKIND: 20978 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20979 #if defined(__i386) || defined(__amd64) 20980 /* 20981 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20982 */ 20983 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20984 #else 20985 if (!ISCD(un)) { 20986 #endif 20987 err = ENOTTY; 20988 } else { 20989 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20990 } 20991 break; 20992 20993 case CDROMREADTOCHDR: 20994 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20995 if (!ISCD(un)) { 20996 err = ENOTTY; 20997 } else { 20998 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20999 } 21000 break; 21001 21002 case CDROMREADTOCENTRY: 21003 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21004 if (!ISCD(un)) { 21005 err = ENOTTY; 21006 } else { 21007 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21008 } 21009 break; 21010 21011 case CDROMSTOP: 21012 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21013 if (!ISCD(un)) { 21014 err = ENOTTY; 21015 } else { 21016 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 21017 SD_PATH_STANDARD); 21018 } 21019 break; 21020 21021 case CDROMSTART: 21022 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21023 if (!ISCD(un)) { 21024 err = ENOTTY; 21025 } else { 21026 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 21027 SD_PATH_STANDARD); 21028 } 21029 break; 21030 21031 case CDROMCLOSETRAY: 21032 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21033 if (!ISCD(un)) { 21034 err = ENOTTY; 21035 } else { 21036 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 21037 SD_PATH_STANDARD); 21038 } 21039 break; 21040 21041 case FDEJECT: /* for eject command */ 21042 case DKIOCEJECT: 21043 case CDROMEJECT: 21044 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21045 if (!ISREMOVABLE(un)) { 21046 err = ENOTTY; 21047 } else { 21048 err = sr_eject(dev); 21049 } 21050 break; 21051 21052 case CDROMVOLCTRL: 21053 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21054 if (!ISCD(un)) { 21055 err = ENOTTY; 21056 } else { 21057 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21058 } 21059 break; 21060 21061 case CDROMSUBCHNL: 21062 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21063 if (!ISCD(un)) { 21064 err = ENOTTY; 21065 } else { 21066 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21067 } 21068 break; 21069 21070 case CDROMREADMODE2: 21071 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21072 if (!ISCD(un)) { 21073 err = ENOTTY; 21074 } else if (un->un_f_cfg_is_atapi == TRUE) { 21075 /* 21076 * If the drive supports READ CD, use that instead of 21077 * switching the LBA size via a MODE SELECT 21078 * Block Descriptor 21079 */ 21080 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21081 } else { 21082 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21083 } 21084 break; 21085 21086 case CDROMREADMODE1: 21087 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21088 if (!ISCD(un)) { 21089 err = ENOTTY; 21090 } else { 21091 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21092 } 21093 break; 21094 21095 case CDROMREADOFFSET: 21096 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21097 if (!ISCD(un)) { 21098 err = ENOTTY; 21099 } else { 21100 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21101 flag); 21102 } 21103 break; 21104 21105 case CDROMSBLKMODE: 21106 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21107 /* 21108 * There is no means of changing block size in case of atapi 21109 * drives, thus return ENOTTY if drive type is atapi 21110 */ 21111 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21112 err = ENOTTY; 21113 } else if (un->un_f_mmc_cap == TRUE) { 21114 21115 /* 21116 * MMC Devices do not support changing the 21117 * logical block size 21118 * 21119 * Note: EINVAL is being returned instead of ENOTTY to 21120 * maintain consistancy with the original mmc 21121 * driver update. 21122 */ 21123 err = EINVAL; 21124 } else { 21125 mutex_enter(SD_MUTEX(un)); 21126 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21127 (un->un_ncmds_in_transport > 0)) { 21128 mutex_exit(SD_MUTEX(un)); 21129 err = EINVAL; 21130 } else { 21131 mutex_exit(SD_MUTEX(un)); 21132 err = sr_change_blkmode(dev, cmd, arg, flag); 21133 } 21134 } 21135 break; 21136 21137 case CDROMGBLKMODE: 21138 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21139 if (!ISCD(un)) { 21140 err = ENOTTY; 21141 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21142 (un->un_f_blockcount_is_valid != FALSE)) { 21143 /* 21144 * Drive is an ATAPI drive so return target block 21145 * size for ATAPI drives since we cannot change the 21146 * blocksize on ATAPI drives. Used primarily to detect 21147 * if an ATAPI cdrom is present. 21148 */ 21149 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21150 sizeof (int), flag) != 0) { 21151 err = EFAULT; 21152 } else { 21153 err = 0; 21154 } 21155 21156 } else { 21157 /* 21158 * Drive supports changing block sizes via a Mode 21159 * Select. 21160 */ 21161 err = sr_change_blkmode(dev, cmd, arg, flag); 21162 } 21163 break; 21164 21165 case CDROMGDRVSPEED: 21166 case CDROMSDRVSPEED: 21167 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21168 if (!ISCD(un)) { 21169 err = ENOTTY; 21170 } else if (un->un_f_mmc_cap == TRUE) { 21171 /* 21172 * Note: In the future the driver implementation 21173 * for getting and 21174 * setting cd speed should entail: 21175 * 1) If non-mmc try the Toshiba mode page 21176 * (sr_change_speed) 21177 * 2) If mmc but no support for Real Time Streaming try 21178 * the SET CD SPEED (0xBB) command 21179 * (sr_atapi_change_speed) 21180 * 3) If mmc and support for Real Time Streaming 21181 * try the GET PERFORMANCE and SET STREAMING 21182 * commands (not yet implemented, 4380808) 21183 */ 21184 /* 21185 * As per recent MMC spec, CD-ROM speed is variable 21186 * and changes with LBA. Since there is no such 21187 * things as drive speed now, fail this ioctl. 21188 * 21189 * Note: EINVAL is returned for consistancy of original 21190 * implementation which included support for getting 21191 * the drive speed of mmc devices but not setting 21192 * the drive speed. Thus EINVAL would be returned 21193 * if a set request was made for an mmc device. 21194 * We no longer support get or set speed for 21195 * mmc but need to remain consistant with regard 21196 * to the error code returned. 21197 */ 21198 err = EINVAL; 21199 } else if (un->un_f_cfg_is_atapi == TRUE) { 21200 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21201 } else { 21202 err = sr_change_speed(dev, cmd, arg, flag); 21203 } 21204 break; 21205 21206 case CDROMCDDA: 21207 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21208 if (!ISCD(un)) { 21209 err = ENOTTY; 21210 } else { 21211 err = sr_read_cdda(dev, (void *)arg, flag); 21212 } 21213 break; 21214 21215 case CDROMCDXA: 21216 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21217 if (!ISCD(un)) { 21218 err = ENOTTY; 21219 } else { 21220 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21221 } 21222 break; 21223 21224 case CDROMSUBCODE: 21225 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21226 if (!ISCD(un)) { 21227 err = ENOTTY; 21228 } else { 21229 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21230 } 21231 break; 21232 21233 case DKIOCPARTINFO: { 21234 /* 21235 * Return parameters describing the selected disk slice. 21236 * Note: this ioctl is for the intel platform only 21237 */ 21238 #if defined(__i386) || defined(__amd64) 21239 int part; 21240 21241 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21242 part = SDPART(dev); 21243 21244 /* don't check un_solaris_size for pN */ 21245 if (part < P0_RAW_DISK && un->un_solaris_size == 0) { 21246 err = EIO; 21247 } else { 21248 struct part_info p; 21249 21250 p.p_start = (daddr_t)un->un_offset[part]; 21251 p.p_length = (int)un->un_map[part].dkl_nblk; 21252 #ifdef _MULTI_DATAMODEL 21253 switch (ddi_model_convert_from(flag & FMODELS)) { 21254 case DDI_MODEL_ILP32: 21255 { 21256 struct part_info32 p32; 21257 21258 p32.p_start = (daddr32_t)p.p_start; 21259 p32.p_length = p.p_length; 21260 if (ddi_copyout(&p32, (void *)arg, 21261 sizeof (p32), flag)) 21262 err = EFAULT; 21263 break; 21264 } 21265 21266 case DDI_MODEL_NONE: 21267 { 21268 if (ddi_copyout(&p, (void *)arg, sizeof (p), 21269 flag)) 21270 err = EFAULT; 21271 break; 21272 } 21273 } 21274 #else /* ! _MULTI_DATAMODEL */ 21275 if (ddi_copyout(&p, (void *)arg, sizeof (p), flag)) 21276 err = EFAULT; 21277 #endif /* _MULTI_DATAMODEL */ 21278 } 21279 #else 21280 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21281 err = ENOTTY; 21282 #endif 21283 break; 21284 } 21285 21286 case DKIOCG_PHYGEOM: { 21287 /* Return the driver's notion of the media physical geometry */ 21288 #if defined(__i386) || defined(__amd64) 21289 struct dk_geom disk_geom; 21290 struct dk_geom *dkgp = &disk_geom; 21291 21292 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21293 mutex_enter(SD_MUTEX(un)); 21294 21295 if (un->un_g.dkg_nhead != 0 && 21296 un->un_g.dkg_nsect != 0) { 21297 /* 21298 * We succeeded in getting a geometry, but 21299 * right now it is being reported as just the 21300 * Solaris fdisk partition, just like for 21301 * DKIOCGGEOM. We need to change that to be 21302 * correct for the entire disk now. 21303 */ 21304 bcopy(&un->un_g, dkgp, sizeof (*dkgp)); 21305 dkgp->dkg_acyl = 0; 21306 dkgp->dkg_ncyl = un->un_blockcount / 21307 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21308 } else { 21309 bzero(dkgp, sizeof (struct dk_geom)); 21310 /* 21311 * This disk does not have a Solaris VTOC 21312 * so we must present a physical geometry 21313 * that will remain consistent regardless 21314 * of how the disk is used. This will ensure 21315 * that the geometry does not change regardless 21316 * of the fdisk partition type (ie. EFI, FAT32, 21317 * Solaris, etc). 21318 */ 21319 if (ISCD(un)) { 21320 dkgp->dkg_nhead = un->un_pgeom.g_nhead; 21321 dkgp->dkg_nsect = un->un_pgeom.g_nsect; 21322 dkgp->dkg_ncyl = un->un_pgeom.g_ncyl; 21323 dkgp->dkg_acyl = un->un_pgeom.g_acyl; 21324 } else { 21325 sd_convert_geometry(un->un_blockcount, dkgp); 21326 dkgp->dkg_acyl = 0; 21327 dkgp->dkg_ncyl = un->un_blockcount / 21328 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21329 } 21330 } 21331 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21332 21333 if (ddi_copyout(dkgp, (void *)arg, 21334 sizeof (struct dk_geom), flag)) { 21335 mutex_exit(SD_MUTEX(un)); 21336 err = EFAULT; 21337 } else { 21338 mutex_exit(SD_MUTEX(un)); 21339 err = 0; 21340 } 21341 #else 21342 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21343 err = ENOTTY; 21344 #endif 21345 break; 21346 } 21347 21348 case DKIOCG_VIRTGEOM: { 21349 /* Return the driver's notion of the media's logical geometry */ 21350 #if defined(__i386) || defined(__amd64) 21351 struct dk_geom disk_geom; 21352 struct dk_geom *dkgp = &disk_geom; 21353 21354 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21355 mutex_enter(SD_MUTEX(un)); 21356 /* 21357 * If there is no HBA geometry available, or 21358 * if the HBA returned us something that doesn't 21359 * really fit into an Int 13/function 8 geometry 21360 * result, just fail the ioctl. See PSARC 1998/313. 21361 */ 21362 if (un->un_lgeom.g_nhead == 0 || 21363 un->un_lgeom.g_nsect == 0 || 21364 un->un_lgeom.g_ncyl > 1024) { 21365 mutex_exit(SD_MUTEX(un)); 21366 err = EINVAL; 21367 } else { 21368 dkgp->dkg_ncyl = un->un_lgeom.g_ncyl; 21369 dkgp->dkg_acyl = un->un_lgeom.g_acyl; 21370 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21371 dkgp->dkg_nhead = un->un_lgeom.g_nhead; 21372 dkgp->dkg_nsect = un->un_lgeom.g_nsect; 21373 21374 if (ddi_copyout(dkgp, (void *)arg, 21375 sizeof (struct dk_geom), flag)) { 21376 mutex_exit(SD_MUTEX(un)); 21377 err = EFAULT; 21378 } else { 21379 mutex_exit(SD_MUTEX(un)); 21380 err = 0; 21381 } 21382 } 21383 #else 21384 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21385 err = ENOTTY; 21386 #endif 21387 break; 21388 } 21389 #ifdef SDDEBUG 21390 /* RESET/ABORTS testing ioctls */ 21391 case DKIOCRESET: { 21392 int reset_level; 21393 21394 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21395 err = EFAULT; 21396 } else { 21397 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21398 "reset_level = 0x%lx\n", reset_level); 21399 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21400 err = 0; 21401 } else { 21402 err = EIO; 21403 } 21404 } 21405 break; 21406 } 21407 21408 case DKIOCABORT: 21409 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21410 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21411 err = 0; 21412 } else { 21413 err = EIO; 21414 } 21415 break; 21416 #endif 21417 21418 #ifdef SD_FAULT_INJECTION 21419 /* SDIOC FaultInjection testing ioctls */ 21420 case SDIOCSTART: 21421 case SDIOCSTOP: 21422 case SDIOCINSERTPKT: 21423 case SDIOCINSERTXB: 21424 case SDIOCINSERTUN: 21425 case SDIOCINSERTARQ: 21426 case SDIOCPUSH: 21427 case SDIOCRETRIEVE: 21428 case SDIOCRUN: 21429 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21430 "SDIOC detected cmd:0x%X:\n", cmd); 21431 /* call error generator */ 21432 sd_faultinjection_ioctl(cmd, arg, un); 21433 err = 0; 21434 break; 21435 21436 #endif /* SD_FAULT_INJECTION */ 21437 21438 default: 21439 err = ENOTTY; 21440 break; 21441 } 21442 mutex_enter(SD_MUTEX(un)); 21443 un->un_ncmds_in_driver--; 21444 ASSERT(un->un_ncmds_in_driver >= 0); 21445 mutex_exit(SD_MUTEX(un)); 21446 21447 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21448 return (err); 21449 } 21450 21451 21452 /* 21453 * Function: sd_uscsi_ioctl 21454 * 21455 * Description: This routine is the driver entry point for handling USCSI ioctl 21456 * requests (USCSICMD). 21457 * 21458 * Arguments: dev - the device number 21459 * arg - user provided scsi command 21460 * flag - this argument is a pass through to ddi_copyxxx() 21461 * directly from the mode argument of ioctl(). 21462 * 21463 * Return Code: code returned by sd_send_scsi_cmd 21464 * ENXIO 21465 * EFAULT 21466 * EAGAIN 21467 */ 21468 21469 static int 21470 sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag) 21471 { 21472 #ifdef _MULTI_DATAMODEL 21473 /* 21474 * For use when a 32 bit app makes a call into a 21475 * 64 bit ioctl 21476 */ 21477 struct uscsi_cmd32 uscsi_cmd_32_for_64; 21478 struct uscsi_cmd32 *ucmd32 = &uscsi_cmd_32_for_64; 21479 model_t model; 21480 #endif /* _MULTI_DATAMODEL */ 21481 struct uscsi_cmd *scmd = NULL; 21482 struct sd_lun *un = NULL; 21483 enum uio_seg uioseg; 21484 char cdb[CDB_GROUP0]; 21485 int rval = 0; 21486 21487 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21488 return (ENXIO); 21489 } 21490 21491 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: entry: un:0x%p\n", un); 21492 21493 scmd = (struct uscsi_cmd *) 21494 kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21495 21496 #ifdef _MULTI_DATAMODEL 21497 switch (model = ddi_model_convert_from(flag & FMODELS)) { 21498 case DDI_MODEL_ILP32: 21499 { 21500 if (ddi_copyin((void *)arg, ucmd32, sizeof (*ucmd32), flag)) { 21501 rval = EFAULT; 21502 goto done; 21503 } 21504 /* 21505 * Convert the ILP32 uscsi data from the 21506 * application to LP64 for internal use. 21507 */ 21508 uscsi_cmd32touscsi_cmd(ucmd32, scmd); 21509 break; 21510 } 21511 case DDI_MODEL_NONE: 21512 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21513 rval = EFAULT; 21514 goto done; 21515 } 21516 break; 21517 } 21518 #else /* ! _MULTI_DATAMODEL */ 21519 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21520 rval = EFAULT; 21521 goto done; 21522 } 21523 #endif /* _MULTI_DATAMODEL */ 21524 21525 scmd->uscsi_flags &= ~USCSI_NOINTR; 21526 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE; 21527 if (un->un_f_format_in_progress == TRUE) { 21528 rval = EAGAIN; 21529 goto done; 21530 } 21531 21532 /* 21533 * Gotta do the ddi_copyin() here on the uscsi_cdb so that 21534 * we will have a valid cdb[0] to test. 21535 */ 21536 if ((ddi_copyin(scmd->uscsi_cdb, cdb, CDB_GROUP0, flag) == 0) && 21537 (cdb[0] == SCMD_FORMAT)) { 21538 SD_TRACE(SD_LOG_IOCTL, un, 21539 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21540 mutex_enter(SD_MUTEX(un)); 21541 un->un_f_format_in_progress = TRUE; 21542 mutex_exit(SD_MUTEX(un)); 21543 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21544 SD_PATH_STANDARD); 21545 mutex_enter(SD_MUTEX(un)); 21546 un->un_f_format_in_progress = FALSE; 21547 mutex_exit(SD_MUTEX(un)); 21548 } else { 21549 SD_TRACE(SD_LOG_IOCTL, un, 21550 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21551 /* 21552 * It's OK to fall into here even if the ddi_copyin() 21553 * on the uscsi_cdb above fails, because sd_send_scsi_cmd() 21554 * does this same copyin and will return the EFAULT 21555 * if it fails. 21556 */ 21557 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21558 SD_PATH_STANDARD); 21559 } 21560 #ifdef _MULTI_DATAMODEL 21561 switch (model) { 21562 case DDI_MODEL_ILP32: 21563 /* 21564 * Convert back to ILP32 before copyout to the 21565 * application 21566 */ 21567 uscsi_cmdtouscsi_cmd32(scmd, ucmd32); 21568 if (ddi_copyout(ucmd32, (void *)arg, sizeof (*ucmd32), flag)) { 21569 if (rval != 0) { 21570 rval = EFAULT; 21571 } 21572 } 21573 break; 21574 case DDI_MODEL_NONE: 21575 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21576 if (rval != 0) { 21577 rval = EFAULT; 21578 } 21579 } 21580 break; 21581 } 21582 #else /* ! _MULTI_DATAMODE */ 21583 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21584 if (rval != 0) { 21585 rval = EFAULT; 21586 } 21587 } 21588 #endif /* _MULTI_DATAMODE */ 21589 done: 21590 kmem_free(scmd, sizeof (struct uscsi_cmd)); 21591 21592 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: exit: un:0x%p\n", un); 21593 21594 return (rval); 21595 } 21596 21597 21598 /* 21599 * Function: sd_dkio_ctrl_info 21600 * 21601 * Description: This routine is the driver entry point for handling controller 21602 * information ioctl requests (DKIOCINFO). 21603 * 21604 * Arguments: dev - the device number 21605 * arg - pointer to user provided dk_cinfo structure 21606 * specifying the controller type and attributes. 21607 * flag - this argument is a pass through to ddi_copyxxx() 21608 * directly from the mode argument of ioctl(). 21609 * 21610 * Return Code: 0 21611 * EFAULT 21612 * ENXIO 21613 */ 21614 21615 static int 21616 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21617 { 21618 struct sd_lun *un = NULL; 21619 struct dk_cinfo *info; 21620 dev_info_t *pdip; 21621 int lun, tgt; 21622 21623 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21624 return (ENXIO); 21625 } 21626 21627 info = (struct dk_cinfo *) 21628 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21629 21630 switch (un->un_ctype) { 21631 case CTYPE_CDROM: 21632 info->dki_ctype = DKC_CDROM; 21633 break; 21634 default: 21635 info->dki_ctype = DKC_SCSI_CCS; 21636 break; 21637 } 21638 pdip = ddi_get_parent(SD_DEVINFO(un)); 21639 info->dki_cnum = ddi_get_instance(pdip); 21640 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21641 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21642 } else { 21643 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21644 DK_DEVLEN - 1); 21645 } 21646 21647 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21648 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21649 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21650 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21651 21652 /* Unit Information */ 21653 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21654 info->dki_slave = ((tgt << 3) | lun); 21655 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21656 DK_DEVLEN - 1); 21657 info->dki_flags = DKI_FMTVOL; 21658 info->dki_partition = SDPART(dev); 21659 21660 /* Max Transfer size of this device in blocks */ 21661 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21662 info->dki_addr = 0; 21663 info->dki_space = 0; 21664 info->dki_prio = 0; 21665 info->dki_vec = 0; 21666 21667 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21668 kmem_free(info, sizeof (struct dk_cinfo)); 21669 return (EFAULT); 21670 } else { 21671 kmem_free(info, sizeof (struct dk_cinfo)); 21672 return (0); 21673 } 21674 } 21675 21676 21677 /* 21678 * Function: sd_get_media_info 21679 * 21680 * Description: This routine is the driver entry point for handling ioctl 21681 * requests for the media type or command set profile used by the 21682 * drive to operate on the media (DKIOCGMEDIAINFO). 21683 * 21684 * Arguments: dev - the device number 21685 * arg - pointer to user provided dk_minfo structure 21686 * specifying the media type, logical block size and 21687 * drive capacity. 21688 * flag - this argument is a pass through to ddi_copyxxx() 21689 * directly from the mode argument of ioctl(). 21690 * 21691 * Return Code: 0 21692 * EACCESS 21693 * EFAULT 21694 * ENXIO 21695 * EIO 21696 */ 21697 21698 static int 21699 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21700 { 21701 struct sd_lun *un = NULL; 21702 struct uscsi_cmd com; 21703 struct scsi_inquiry *sinq; 21704 struct dk_minfo media_info; 21705 u_longlong_t media_capacity; 21706 uint64_t capacity; 21707 uint_t lbasize; 21708 uchar_t *out_data; 21709 uchar_t *rqbuf; 21710 int rval = 0; 21711 int rtn; 21712 21713 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21714 (un->un_state == SD_STATE_OFFLINE)) { 21715 return (ENXIO); 21716 } 21717 21718 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21719 21720 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21721 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21722 21723 /* Issue a TUR to determine if the drive is ready with media present */ 21724 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21725 if (rval == ENXIO) { 21726 goto done; 21727 } 21728 21729 /* Now get configuration data */ 21730 if (ISCD(un)) { 21731 media_info.dki_media_type = DK_CDROM; 21732 21733 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21734 if (un->un_f_mmc_cap == TRUE) { 21735 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21736 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN); 21737 21738 if (rtn) { 21739 /* 21740 * Failed for other than an illegal request 21741 * or command not supported 21742 */ 21743 if ((com.uscsi_status == STATUS_CHECK) && 21744 (com.uscsi_rqstatus == STATUS_GOOD)) { 21745 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21746 (rqbuf[12] != 0x20)) { 21747 rval = EIO; 21748 goto done; 21749 } 21750 } 21751 } else { 21752 /* 21753 * The GET CONFIGURATION command succeeded 21754 * so set the media type according to the 21755 * returned data 21756 */ 21757 media_info.dki_media_type = out_data[6]; 21758 media_info.dki_media_type <<= 8; 21759 media_info.dki_media_type |= out_data[7]; 21760 } 21761 } 21762 } else { 21763 /* 21764 * The profile list is not available, so we attempt to identify 21765 * the media type based on the inquiry data 21766 */ 21767 sinq = un->un_sd->sd_inq; 21768 if (sinq->inq_qual == 0) { 21769 /* This is a direct access device */ 21770 media_info.dki_media_type = DK_FIXED_DISK; 21771 21772 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21773 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21774 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21775 media_info.dki_media_type = DK_ZIP; 21776 } else if ( 21777 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21778 media_info.dki_media_type = DK_JAZ; 21779 } 21780 } 21781 } else { 21782 /* Not a CD or direct access so return unknown media */ 21783 media_info.dki_media_type = DK_UNKNOWN; 21784 } 21785 } 21786 21787 /* Now read the capacity so we can provide the lbasize and capacity */ 21788 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21789 SD_PATH_DIRECT)) { 21790 case 0: 21791 break; 21792 case EACCES: 21793 rval = EACCES; 21794 goto done; 21795 default: 21796 rval = EIO; 21797 goto done; 21798 } 21799 21800 media_info.dki_lbsize = lbasize; 21801 media_capacity = capacity; 21802 21803 /* 21804 * sd_send_scsi_READ_CAPACITY() reports capacity in 21805 * un->un_sys_blocksize chunks. So we need to convert it into 21806 * cap.lbasize chunks. 21807 */ 21808 media_capacity *= un->un_sys_blocksize; 21809 media_capacity /= lbasize; 21810 media_info.dki_capacity = media_capacity; 21811 21812 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21813 rval = EFAULT; 21814 /* Put goto. Anybody might add some code below in future */ 21815 goto done; 21816 } 21817 done: 21818 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21819 kmem_free(rqbuf, SENSE_LENGTH); 21820 return (rval); 21821 } 21822 21823 21824 /* 21825 * Function: sd_dkio_get_geometry 21826 * 21827 * Description: This routine is the driver entry point for handling user 21828 * requests to get the device geometry (DKIOCGGEOM). 21829 * 21830 * Arguments: dev - the device number 21831 * arg - pointer to user provided dk_geom structure specifying 21832 * the controller's notion of the current geometry. 21833 * flag - this argument is a pass through to ddi_copyxxx() 21834 * directly from the mode argument of ioctl(). 21835 * geom_validated - flag indicating if the device geometry has been 21836 * previously validated in the sdioctl routine. 21837 * 21838 * Return Code: 0 21839 * EFAULT 21840 * ENXIO 21841 * EIO 21842 */ 21843 21844 static int 21845 sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, int geom_validated) 21846 { 21847 struct sd_lun *un = NULL; 21848 struct dk_geom *tmp_geom = NULL; 21849 int rval = 0; 21850 21851 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21852 return (ENXIO); 21853 } 21854 21855 #if defined(__i386) || defined(__amd64) 21856 if (un->un_solaris_size == 0) { 21857 return (EIO); 21858 } 21859 #endif 21860 if (geom_validated == FALSE) { 21861 /* 21862 * sd_validate_geometry does not spin a disk up 21863 * if it was spun down. We need to make sure it 21864 * is ready. 21865 */ 21866 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21867 return (rval); 21868 } 21869 mutex_enter(SD_MUTEX(un)); 21870 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 21871 mutex_exit(SD_MUTEX(un)); 21872 } 21873 if (rval) 21874 return (rval); 21875 21876 /* 21877 * Make a local copy of the soft state geometry to avoid some potential 21878 * race conditions associated with holding the mutex and updating the 21879 * write_reinstruct value 21880 */ 21881 tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21882 mutex_enter(SD_MUTEX(un)); 21883 bcopy(&un->un_g, tmp_geom, sizeof (struct dk_geom)); 21884 mutex_exit(SD_MUTEX(un)); 21885 21886 if (tmp_geom->dkg_write_reinstruct == 0) { 21887 tmp_geom->dkg_write_reinstruct = 21888 (int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm * 21889 sd_rot_delay) / (int)60000); 21890 } 21891 21892 rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom), 21893 flag); 21894 if (rval != 0) { 21895 rval = EFAULT; 21896 } 21897 21898 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21899 return (rval); 21900 21901 } 21902 21903 21904 /* 21905 * Function: sd_dkio_set_geometry 21906 * 21907 * Description: This routine is the driver entry point for handling user 21908 * requests to set the device geometry (DKIOCSGEOM). The actual 21909 * device geometry is not updated, just the driver "notion" of it. 21910 * 21911 * Arguments: dev - the device number 21912 * arg - pointer to user provided dk_geom structure used to set 21913 * the controller's notion of the current geometry. 21914 * flag - this argument is a pass through to ddi_copyxxx() 21915 * directly from the mode argument of ioctl(). 21916 * 21917 * Return Code: 0 21918 * EFAULT 21919 * ENXIO 21920 * EIO 21921 */ 21922 21923 static int 21924 sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag) 21925 { 21926 struct sd_lun *un = NULL; 21927 struct dk_geom *tmp_geom; 21928 struct dk_map *lp; 21929 int rval = 0; 21930 int i; 21931 21932 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21933 return (ENXIO); 21934 } 21935 21936 #if defined(__i386) || defined(__amd64) 21937 if (un->un_solaris_size == 0) { 21938 return (EIO); 21939 } 21940 #endif 21941 /* 21942 * We need to copy the user specified geometry into local 21943 * storage and then update the softstate. We don't want to hold 21944 * the mutex and copyin directly from the user to the soft state 21945 */ 21946 tmp_geom = (struct dk_geom *) 21947 kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21948 rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag); 21949 if (rval != 0) { 21950 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21951 return (EFAULT); 21952 } 21953 21954 mutex_enter(SD_MUTEX(un)); 21955 bcopy(tmp_geom, &un->un_g, sizeof (struct dk_geom)); 21956 for (i = 0; i < NDKMAP; i++) { 21957 lp = &un->un_map[i]; 21958 un->un_offset[i] = 21959 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 21960 #if defined(__i386) || defined(__amd64) 21961 un->un_offset[i] += un->un_solaris_offset; 21962 #endif 21963 } 21964 un->un_f_geometry_is_valid = FALSE; 21965 mutex_exit(SD_MUTEX(un)); 21966 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21967 21968 return (rval); 21969 } 21970 21971 21972 /* 21973 * Function: sd_dkio_get_partition 21974 * 21975 * Description: This routine is the driver entry point for handling user 21976 * requests to get the partition table (DKIOCGAPART). 21977 * 21978 * Arguments: dev - the device number 21979 * arg - pointer to user provided dk_allmap structure specifying 21980 * the controller's notion of the current partition table. 21981 * flag - this argument is a pass through to ddi_copyxxx() 21982 * directly from the mode argument of ioctl(). 21983 * geom_validated - flag indicating if the device geometry has been 21984 * previously validated in the sdioctl routine. 21985 * 21986 * Return Code: 0 21987 * EFAULT 21988 * ENXIO 21989 * EIO 21990 */ 21991 21992 static int 21993 sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, int geom_validated) 21994 { 21995 struct sd_lun *un = NULL; 21996 int rval = 0; 21997 int size; 21998 21999 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22000 return (ENXIO); 22001 } 22002 22003 #if defined(__i386) || defined(__amd64) 22004 if (un->un_solaris_size == 0) { 22005 return (EIO); 22006 } 22007 #endif 22008 /* 22009 * Make sure the geometry is valid before getting the partition 22010 * information. 22011 */ 22012 mutex_enter(SD_MUTEX(un)); 22013 if (geom_validated == FALSE) { 22014 /* 22015 * sd_validate_geometry does not spin a disk up 22016 * if it was spun down. We need to make sure it 22017 * is ready before validating the geometry. 22018 */ 22019 mutex_exit(SD_MUTEX(un)); 22020 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22021 return (rval); 22022 } 22023 mutex_enter(SD_MUTEX(un)); 22024 22025 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22026 mutex_exit(SD_MUTEX(un)); 22027 return (rval); 22028 } 22029 } 22030 mutex_exit(SD_MUTEX(un)); 22031 22032 #ifdef _MULTI_DATAMODEL 22033 switch (ddi_model_convert_from(flag & FMODELS)) { 22034 case DDI_MODEL_ILP32: { 22035 struct dk_map32 dk_map32[NDKMAP]; 22036 int i; 22037 22038 for (i = 0; i < NDKMAP; i++) { 22039 dk_map32[i].dkl_cylno = un->un_map[i].dkl_cylno; 22040 dk_map32[i].dkl_nblk = un->un_map[i].dkl_nblk; 22041 } 22042 size = NDKMAP * sizeof (struct dk_map32); 22043 rval = ddi_copyout(dk_map32, (void *)arg, size, flag); 22044 if (rval != 0) { 22045 rval = EFAULT; 22046 } 22047 break; 22048 } 22049 case DDI_MODEL_NONE: 22050 size = NDKMAP * sizeof (struct dk_map); 22051 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22052 if (rval != 0) { 22053 rval = EFAULT; 22054 } 22055 break; 22056 } 22057 #else /* ! _MULTI_DATAMODEL */ 22058 size = NDKMAP * sizeof (struct dk_map); 22059 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22060 if (rval != 0) { 22061 rval = EFAULT; 22062 } 22063 #endif /* _MULTI_DATAMODEL */ 22064 return (rval); 22065 } 22066 22067 22068 /* 22069 * Function: sd_dkio_set_partition 22070 * 22071 * Description: This routine is the driver entry point for handling user 22072 * requests to set the partition table (DKIOCSAPART). The actual 22073 * device partition is not updated. 22074 * 22075 * Arguments: dev - the device number 22076 * arg - pointer to user provided dk_allmap structure used to set 22077 * the controller's notion of the partition table. 22078 * flag - this argument is a pass through to ddi_copyxxx() 22079 * directly from the mode argument of ioctl(). 22080 * 22081 * Return Code: 0 22082 * EINVAL 22083 * EFAULT 22084 * ENXIO 22085 * EIO 22086 */ 22087 22088 static int 22089 sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag) 22090 { 22091 struct sd_lun *un = NULL; 22092 struct dk_map dk_map[NDKMAP]; 22093 struct dk_map *lp; 22094 int rval = 0; 22095 int size; 22096 int i; 22097 #if defined(_SUNOS_VTOC_16) 22098 struct dkl_partition *vp; 22099 #endif 22100 22101 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22102 return (ENXIO); 22103 } 22104 22105 /* 22106 * Set the map for all logical partitions. We lock 22107 * the priority just to make sure an interrupt doesn't 22108 * come in while the map is half updated. 22109 */ 22110 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_solaris_size)) 22111 mutex_enter(SD_MUTEX(un)); 22112 if (un->un_blockcount > DK_MAX_BLOCKS) { 22113 mutex_exit(SD_MUTEX(un)); 22114 return (ENOTSUP); 22115 } 22116 mutex_exit(SD_MUTEX(un)); 22117 if (un->un_solaris_size == 0) { 22118 return (EIO); 22119 } 22120 22121 #ifdef _MULTI_DATAMODEL 22122 switch (ddi_model_convert_from(flag & FMODELS)) { 22123 case DDI_MODEL_ILP32: { 22124 struct dk_map32 dk_map32[NDKMAP]; 22125 22126 size = NDKMAP * sizeof (struct dk_map32); 22127 rval = ddi_copyin((void *)arg, dk_map32, size, flag); 22128 if (rval != 0) { 22129 return (EFAULT); 22130 } 22131 for (i = 0; i < NDKMAP; i++) { 22132 dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno; 22133 dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk; 22134 } 22135 break; 22136 } 22137 case DDI_MODEL_NONE: 22138 size = NDKMAP * sizeof (struct dk_map); 22139 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22140 if (rval != 0) { 22141 return (EFAULT); 22142 } 22143 break; 22144 } 22145 #else /* ! _MULTI_DATAMODEL */ 22146 size = NDKMAP * sizeof (struct dk_map); 22147 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22148 if (rval != 0) { 22149 return (EFAULT); 22150 } 22151 #endif /* _MULTI_DATAMODEL */ 22152 22153 mutex_enter(SD_MUTEX(un)); 22154 /* Note: The size used in this bcopy is set based upon the data model */ 22155 bcopy(dk_map, un->un_map, size); 22156 #if defined(_SUNOS_VTOC_16) 22157 vp = (struct dkl_partition *)&(un->un_vtoc); 22158 #endif /* defined(_SUNOS_VTOC_16) */ 22159 for (i = 0; i < NDKMAP; i++) { 22160 lp = &un->un_map[i]; 22161 un->un_offset[i] = 22162 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22163 #if defined(_SUNOS_VTOC_16) 22164 vp->p_start = un->un_offset[i]; 22165 vp->p_size = lp->dkl_nblk; 22166 vp++; 22167 #endif /* defined(_SUNOS_VTOC_16) */ 22168 #if defined(__i386) || defined(__amd64) 22169 un->un_offset[i] += un->un_solaris_offset; 22170 #endif 22171 } 22172 mutex_exit(SD_MUTEX(un)); 22173 return (rval); 22174 } 22175 22176 22177 /* 22178 * Function: sd_dkio_get_vtoc 22179 * 22180 * Description: This routine is the driver entry point for handling user 22181 * requests to get the current volume table of contents 22182 * (DKIOCGVTOC). 22183 * 22184 * Arguments: dev - the device number 22185 * arg - pointer to user provided vtoc structure specifying 22186 * the current vtoc. 22187 * flag - this argument is a pass through to ddi_copyxxx() 22188 * directly from the mode argument of ioctl(). 22189 * geom_validated - flag indicating if the device geometry has been 22190 * previously validated in the sdioctl routine. 22191 * 22192 * Return Code: 0 22193 * EFAULT 22194 * ENXIO 22195 * EIO 22196 */ 22197 22198 static int 22199 sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, int geom_validated) 22200 { 22201 struct sd_lun *un = NULL; 22202 #if defined(_SUNOS_VTOC_8) 22203 struct vtoc user_vtoc; 22204 #endif /* defined(_SUNOS_VTOC_8) */ 22205 int rval = 0; 22206 22207 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22208 return (ENXIO); 22209 } 22210 22211 mutex_enter(SD_MUTEX(un)); 22212 if (geom_validated == FALSE) { 22213 /* 22214 * sd_validate_geometry does not spin a disk up 22215 * if it was spun down. We need to make sure it 22216 * is ready. 22217 */ 22218 mutex_exit(SD_MUTEX(un)); 22219 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22220 return (rval); 22221 } 22222 mutex_enter(SD_MUTEX(un)); 22223 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22224 mutex_exit(SD_MUTEX(un)); 22225 return (rval); 22226 } 22227 } 22228 22229 #if defined(_SUNOS_VTOC_8) 22230 sd_build_user_vtoc(un, &user_vtoc); 22231 mutex_exit(SD_MUTEX(un)); 22232 22233 #ifdef _MULTI_DATAMODEL 22234 switch (ddi_model_convert_from(flag & FMODELS)) { 22235 case DDI_MODEL_ILP32: { 22236 struct vtoc32 user_vtoc32; 22237 22238 vtoctovtoc32(user_vtoc, user_vtoc32); 22239 if (ddi_copyout(&user_vtoc32, (void *)arg, 22240 sizeof (struct vtoc32), flag)) { 22241 return (EFAULT); 22242 } 22243 break; 22244 } 22245 22246 case DDI_MODEL_NONE: 22247 if (ddi_copyout(&user_vtoc, (void *)arg, 22248 sizeof (struct vtoc), flag)) { 22249 return (EFAULT); 22250 } 22251 break; 22252 } 22253 #else /* ! _MULTI_DATAMODEL */ 22254 if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) { 22255 return (EFAULT); 22256 } 22257 #endif /* _MULTI_DATAMODEL */ 22258 22259 #elif defined(_SUNOS_VTOC_16) 22260 mutex_exit(SD_MUTEX(un)); 22261 22262 #ifdef _MULTI_DATAMODEL 22263 /* 22264 * The un_vtoc structure is a "struct dk_vtoc" which is always 22265 * 32-bit to maintain compatibility with existing on-disk 22266 * structures. Thus, we need to convert the structure when copying 22267 * it out to a datamodel-dependent "struct vtoc" in a 64-bit 22268 * program. If the target is a 32-bit program, then no conversion 22269 * is necessary. 22270 */ 22271 /* LINTED: logical expression always true: op "||" */ 22272 ASSERT(sizeof (un->un_vtoc) == sizeof (struct vtoc32)); 22273 switch (ddi_model_convert_from(flag & FMODELS)) { 22274 case DDI_MODEL_ILP32: 22275 if (ddi_copyout(&(un->un_vtoc), (void *)arg, 22276 sizeof (un->un_vtoc), flag)) { 22277 return (EFAULT); 22278 } 22279 break; 22280 22281 case DDI_MODEL_NONE: { 22282 struct vtoc user_vtoc; 22283 22284 vtoc32tovtoc(un->un_vtoc, user_vtoc); 22285 if (ddi_copyout(&user_vtoc, (void *)arg, 22286 sizeof (struct vtoc), flag)) { 22287 return (EFAULT); 22288 } 22289 break; 22290 } 22291 } 22292 #else /* ! _MULTI_DATAMODEL */ 22293 if (ddi_copyout(&(un->un_vtoc), (void *)arg, sizeof (un->un_vtoc), 22294 flag)) { 22295 return (EFAULT); 22296 } 22297 #endif /* _MULTI_DATAMODEL */ 22298 #else 22299 #error "No VTOC format defined." 22300 #endif 22301 22302 return (rval); 22303 } 22304 22305 static int 22306 sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag) 22307 { 22308 struct sd_lun *un = NULL; 22309 dk_efi_t user_efi; 22310 int rval = 0; 22311 void *buffer; 22312 22313 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22314 return (ENXIO); 22315 22316 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22317 return (EFAULT); 22318 22319 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22320 22321 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22322 (user_efi.dki_length > un->un_max_xfer_size)) 22323 return (EINVAL); 22324 22325 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22326 rval = sd_send_scsi_READ(un, buffer, user_efi.dki_length, 22327 user_efi.dki_lba, SD_PATH_DIRECT); 22328 if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data, 22329 user_efi.dki_length, flag) != 0) 22330 rval = EFAULT; 22331 22332 kmem_free(buffer, user_efi.dki_length); 22333 return (rval); 22334 } 22335 22336 /* 22337 * Function: sd_build_user_vtoc 22338 * 22339 * Description: This routine populates a pass by reference variable with the 22340 * current volume table of contents. 22341 * 22342 * Arguments: un - driver soft state (unit) structure 22343 * user_vtoc - pointer to vtoc structure to be populated 22344 */ 22345 22346 static void 22347 sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22348 { 22349 struct dk_map2 *lpart; 22350 struct dk_map *lmap; 22351 struct partition *vpart; 22352 int nblks; 22353 int i; 22354 22355 ASSERT(mutex_owned(SD_MUTEX(un))); 22356 22357 /* 22358 * Return vtoc structure fields in the provided VTOC area, addressed 22359 * by *vtoc. 22360 */ 22361 bzero(user_vtoc, sizeof (struct vtoc)); 22362 user_vtoc->v_bootinfo[0] = un->un_vtoc.v_bootinfo[0]; 22363 user_vtoc->v_bootinfo[1] = un->un_vtoc.v_bootinfo[1]; 22364 user_vtoc->v_bootinfo[2] = un->un_vtoc.v_bootinfo[2]; 22365 user_vtoc->v_sanity = VTOC_SANE; 22366 user_vtoc->v_version = un->un_vtoc.v_version; 22367 bcopy(un->un_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL); 22368 user_vtoc->v_sectorsz = un->un_sys_blocksize; 22369 user_vtoc->v_nparts = un->un_vtoc.v_nparts; 22370 bcopy(un->un_vtoc.v_reserved, user_vtoc->v_reserved, 22371 sizeof (un->un_vtoc.v_reserved)); 22372 /* 22373 * Convert partitioning information. 22374 * 22375 * Note the conversion from starting cylinder number 22376 * to starting sector number. 22377 */ 22378 lmap = un->un_map; 22379 lpart = (struct dk_map2 *)un->un_vtoc.v_part; 22380 vpart = user_vtoc->v_part; 22381 22382 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22383 22384 for (i = 0; i < V_NUMPAR; i++) { 22385 vpart->p_tag = lpart->p_tag; 22386 vpart->p_flag = lpart->p_flag; 22387 vpart->p_start = lmap->dkl_cylno * nblks; 22388 vpart->p_size = lmap->dkl_nblk; 22389 lmap++; 22390 lpart++; 22391 vpart++; 22392 22393 /* (4364927) */ 22394 user_vtoc->timestamp[i] = (time_t)un->un_vtoc.v_timestamp[i]; 22395 } 22396 22397 bcopy(un->un_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII); 22398 } 22399 22400 static int 22401 sd_dkio_partition(dev_t dev, caddr_t arg, int flag) 22402 { 22403 struct sd_lun *un = NULL; 22404 struct partition64 p64; 22405 int rval = 0; 22406 uint_t nparts; 22407 efi_gpe_t *partitions; 22408 efi_gpt_t *buffer; 22409 diskaddr_t gpe_lba; 22410 22411 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22412 return (ENXIO); 22413 } 22414 22415 if (ddi_copyin((const void *)arg, &p64, 22416 sizeof (struct partition64), flag)) { 22417 return (EFAULT); 22418 } 22419 22420 buffer = kmem_alloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 22421 rval = sd_send_scsi_READ(un, buffer, DEV_BSIZE, 22422 1, SD_PATH_DIRECT); 22423 if (rval != 0) 22424 goto done_error; 22425 22426 sd_swap_efi_gpt(buffer); 22427 22428 if ((rval = sd_validate_efi(buffer)) != 0) 22429 goto done_error; 22430 22431 nparts = buffer->efi_gpt_NumberOfPartitionEntries; 22432 gpe_lba = buffer->efi_gpt_PartitionEntryLBA; 22433 if (p64.p_partno > nparts) { 22434 /* couldn't find it */ 22435 rval = ESRCH; 22436 goto done_error; 22437 } 22438 /* 22439 * if we're dealing with a partition that's out of the normal 22440 * 16K block, adjust accordingly 22441 */ 22442 gpe_lba += p64.p_partno / sizeof (efi_gpe_t); 22443 rval = sd_send_scsi_READ(un, buffer, EFI_MIN_ARRAY_SIZE, 22444 gpe_lba, SD_PATH_DIRECT); 22445 if (rval) { 22446 goto done_error; 22447 } 22448 partitions = (efi_gpe_t *)buffer; 22449 22450 sd_swap_efi_gpe(nparts, partitions); 22451 22452 partitions += p64.p_partno; 22453 bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type, 22454 sizeof (struct uuid)); 22455 p64.p_start = partitions->efi_gpe_StartingLBA; 22456 p64.p_size = partitions->efi_gpe_EndingLBA - 22457 p64.p_start + 1; 22458 22459 if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag)) 22460 rval = EFAULT; 22461 22462 done_error: 22463 kmem_free(buffer, EFI_MIN_ARRAY_SIZE); 22464 return (rval); 22465 } 22466 22467 22468 /* 22469 * Function: sd_dkio_set_vtoc 22470 * 22471 * Description: This routine is the driver entry point for handling user 22472 * requests to set the current volume table of contents 22473 * (DKIOCSVTOC). 22474 * 22475 * Arguments: dev - the device number 22476 * arg - pointer to user provided vtoc structure used to set the 22477 * current vtoc. 22478 * flag - this argument is a pass through to ddi_copyxxx() 22479 * directly from the mode argument of ioctl(). 22480 * 22481 * Return Code: 0 22482 * EFAULT 22483 * ENXIO 22484 * EINVAL 22485 * ENOTSUP 22486 */ 22487 22488 static int 22489 sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag) 22490 { 22491 struct sd_lun *un = NULL; 22492 struct vtoc user_vtoc; 22493 int rval = 0; 22494 22495 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22496 return (ENXIO); 22497 } 22498 22499 #if defined(__i386) || defined(__amd64) 22500 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 22501 return (EINVAL); 22502 } 22503 #endif 22504 22505 #ifdef _MULTI_DATAMODEL 22506 switch (ddi_model_convert_from(flag & FMODELS)) { 22507 case DDI_MODEL_ILP32: { 22508 struct vtoc32 user_vtoc32; 22509 22510 if (ddi_copyin((const void *)arg, &user_vtoc32, 22511 sizeof (struct vtoc32), flag)) { 22512 return (EFAULT); 22513 } 22514 vtoc32tovtoc(user_vtoc32, user_vtoc); 22515 break; 22516 } 22517 22518 case DDI_MODEL_NONE: 22519 if (ddi_copyin((const void *)arg, &user_vtoc, 22520 sizeof (struct vtoc), flag)) { 22521 return (EFAULT); 22522 } 22523 break; 22524 } 22525 #else /* ! _MULTI_DATAMODEL */ 22526 if (ddi_copyin((const void *)arg, &user_vtoc, 22527 sizeof (struct vtoc), flag)) { 22528 return (EFAULT); 22529 } 22530 #endif /* _MULTI_DATAMODEL */ 22531 22532 mutex_enter(SD_MUTEX(un)); 22533 if (un->un_blockcount > DK_MAX_BLOCKS) { 22534 mutex_exit(SD_MUTEX(un)); 22535 return (ENOTSUP); 22536 } 22537 if (un->un_g.dkg_ncyl == 0) { 22538 mutex_exit(SD_MUTEX(un)); 22539 return (EINVAL); 22540 } 22541 22542 mutex_exit(SD_MUTEX(un)); 22543 sd_clear_efi(un); 22544 ddi_remove_minor_node(SD_DEVINFO(un), "wd"); 22545 ddi_remove_minor_node(SD_DEVINFO(un), "wd,raw"); 22546 (void) ddi_create_minor_node(SD_DEVINFO(un), "h", 22547 S_IFBLK, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22548 un->un_node_type, NULL); 22549 (void) ddi_create_minor_node(SD_DEVINFO(un), "h,raw", 22550 S_IFCHR, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22551 un->un_node_type, NULL); 22552 mutex_enter(SD_MUTEX(un)); 22553 22554 if ((rval = sd_build_label_vtoc(un, &user_vtoc)) == 0) { 22555 if ((rval = sd_write_label(dev)) == 0) { 22556 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) 22557 != 0) { 22558 SD_ERROR(SD_LOG_IOCTL_DKIO, un, 22559 "sd_dkio_set_vtoc: " 22560 "Failed validate geometry\n"); 22561 } 22562 } 22563 } 22564 22565 /* 22566 * If sd_build_label_vtoc, or sd_write_label failed above write the 22567 * devid anyway, what can it hurt? Also preserve the device id by 22568 * writing to the disk acyl for the case where a devid has been 22569 * fabricated. 22570 */ 22571 if (!ISREMOVABLE(un) && !ISCD(un) && 22572 (un->un_f_opt_fab_devid == TRUE)) { 22573 if (un->un_devid == NULL) { 22574 sd_register_devid(un, SD_DEVINFO(un), 22575 SD_TARGET_IS_UNRESERVED); 22576 } else { 22577 /* 22578 * The device id for this disk has been 22579 * fabricated. Fabricated device id's are 22580 * managed by storing them in the last 2 22581 * available sectors on the drive. The device 22582 * id must be preserved by writing it back out 22583 * to this location. 22584 */ 22585 if (sd_write_deviceid(un) != 0) { 22586 ddi_devid_free(un->un_devid); 22587 un->un_devid = NULL; 22588 } 22589 } 22590 } 22591 mutex_exit(SD_MUTEX(un)); 22592 return (rval); 22593 } 22594 22595 22596 /* 22597 * Function: sd_build_label_vtoc 22598 * 22599 * Description: This routine updates the driver soft state current volume table 22600 * of contents based on a user specified vtoc. 22601 * 22602 * Arguments: un - driver soft state (unit) structure 22603 * user_vtoc - pointer to vtoc structure specifying vtoc to be used 22604 * to update the driver soft state. 22605 * 22606 * Return Code: 0 22607 * EINVAL 22608 */ 22609 22610 static int 22611 sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22612 { 22613 struct dk_map *lmap; 22614 struct partition *vpart; 22615 int nblks; 22616 #if defined(_SUNOS_VTOC_8) 22617 int ncyl; 22618 struct dk_map2 *lpart; 22619 #endif /* defined(_SUNOS_VTOC_8) */ 22620 int i; 22621 22622 ASSERT(mutex_owned(SD_MUTEX(un))); 22623 22624 /* Sanity-check the vtoc */ 22625 if (user_vtoc->v_sanity != VTOC_SANE || 22626 user_vtoc->v_sectorsz != un->un_sys_blocksize || 22627 user_vtoc->v_nparts != V_NUMPAR) { 22628 return (EINVAL); 22629 } 22630 22631 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22632 if (nblks == 0) { 22633 return (EINVAL); 22634 } 22635 22636 #if defined(_SUNOS_VTOC_8) 22637 vpart = user_vtoc->v_part; 22638 for (i = 0; i < V_NUMPAR; i++) { 22639 if ((vpart->p_start % nblks) != 0) { 22640 return (EINVAL); 22641 } 22642 ncyl = vpart->p_start / nblks; 22643 ncyl += vpart->p_size / nblks; 22644 if ((vpart->p_size % nblks) != 0) { 22645 ncyl++; 22646 } 22647 if (ncyl > (int)un->un_g.dkg_ncyl) { 22648 return (EINVAL); 22649 } 22650 vpart++; 22651 } 22652 #endif /* defined(_SUNOS_VTOC_8) */ 22653 22654 /* Put appropriate vtoc structure fields into the disk label */ 22655 #if defined(_SUNOS_VTOC_16) 22656 /* 22657 * The vtoc is always a 32bit data structure to maintain the 22658 * on-disk format. Convert "in place" instead of bcopying it. 22659 */ 22660 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(un->un_vtoc)))); 22661 22662 /* 22663 * in the 16-slice vtoc, starting sectors are expressed in 22664 * numbers *relative* to the start of the Solaris fdisk partition. 22665 */ 22666 lmap = un->un_map; 22667 vpart = user_vtoc->v_part; 22668 22669 for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) { 22670 lmap->dkl_cylno = vpart->p_start / nblks; 22671 lmap->dkl_nblk = vpart->p_size; 22672 } 22673 22674 #elif defined(_SUNOS_VTOC_8) 22675 22676 un->un_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0]; 22677 un->un_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1]; 22678 un->un_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2]; 22679 22680 un->un_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity; 22681 un->un_vtoc.v_version = (uint32_t)user_vtoc->v_version; 22682 22683 bcopy(user_vtoc->v_volume, un->un_vtoc.v_volume, LEN_DKL_VVOL); 22684 22685 un->un_vtoc.v_nparts = user_vtoc->v_nparts; 22686 22687 bcopy(user_vtoc->v_reserved, un->un_vtoc.v_reserved, 22688 sizeof (un->un_vtoc.v_reserved)); 22689 22690 /* 22691 * Note the conversion from starting sector number 22692 * to starting cylinder number. 22693 * Return error if division results in a remainder. 22694 */ 22695 lmap = un->un_map; 22696 lpart = un->un_vtoc.v_part; 22697 vpart = user_vtoc->v_part; 22698 22699 for (i = 0; i < (int)user_vtoc->v_nparts; i++) { 22700 lpart->p_tag = vpart->p_tag; 22701 lpart->p_flag = vpart->p_flag; 22702 lmap->dkl_cylno = vpart->p_start / nblks; 22703 lmap->dkl_nblk = vpart->p_size; 22704 22705 lmap++; 22706 lpart++; 22707 vpart++; 22708 22709 /* (4387723) */ 22710 #ifdef _LP64 22711 if (user_vtoc->timestamp[i] > TIME32_MAX) { 22712 un->un_vtoc.v_timestamp[i] = TIME32_MAX; 22713 } else { 22714 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22715 } 22716 #else 22717 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22718 #endif 22719 } 22720 22721 bcopy(user_vtoc->v_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 22722 #else 22723 #error "No VTOC format defined." 22724 #endif 22725 return (0); 22726 } 22727 22728 /* 22729 * Function: sd_clear_efi 22730 * 22731 * Description: This routine clears all EFI labels. 22732 * 22733 * Arguments: un - driver soft state (unit) structure 22734 * 22735 * Return Code: void 22736 */ 22737 22738 static void 22739 sd_clear_efi(struct sd_lun *un) 22740 { 22741 efi_gpt_t *gpt; 22742 uint_t lbasize; 22743 uint64_t cap; 22744 int rval; 22745 22746 ASSERT(!mutex_owned(SD_MUTEX(un))); 22747 22748 gpt = kmem_alloc(sizeof (efi_gpt_t), KM_SLEEP); 22749 22750 if (sd_send_scsi_READ(un, gpt, DEV_BSIZE, 1, SD_PATH_DIRECT) != 0) { 22751 goto done; 22752 } 22753 22754 sd_swap_efi_gpt(gpt); 22755 rval = sd_validate_efi(gpt); 22756 if (rval == 0) { 22757 /* clear primary */ 22758 bzero(gpt, sizeof (efi_gpt_t)); 22759 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 1, 22760 SD_PATH_DIRECT))) { 22761 SD_INFO(SD_LOG_IO_PARTITION, un, 22762 "sd_clear_efi: clear primary label failed\n"); 22763 } 22764 } 22765 /* the backup */ 22766 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 22767 SD_PATH_DIRECT); 22768 if (rval) { 22769 goto done; 22770 } 22771 if ((rval = sd_send_scsi_READ(un, gpt, lbasize, 22772 cap - 1, SD_PATH_DIRECT)) != 0) { 22773 goto done; 22774 } 22775 sd_swap_efi_gpt(gpt); 22776 rval = sd_validate_efi(gpt); 22777 if (rval == 0) { 22778 /* clear backup */ 22779 SD_TRACE(SD_LOG_IOCTL, un, "sd_clear_efi clear backup@%lu\n", 22780 cap-1); 22781 bzero(gpt, sizeof (efi_gpt_t)); 22782 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 22783 cap-1, SD_PATH_DIRECT))) { 22784 SD_INFO(SD_LOG_IO_PARTITION, un, 22785 "sd_clear_efi: clear backup label failed\n"); 22786 } 22787 } 22788 22789 done: 22790 kmem_free(gpt, sizeof (efi_gpt_t)); 22791 } 22792 22793 /* 22794 * Function: sd_set_vtoc 22795 * 22796 * Description: This routine writes data to the appropriate positions 22797 * 22798 * Arguments: un - driver soft state (unit) structure 22799 * dkl - the data to be written 22800 * 22801 * Return: void 22802 */ 22803 22804 static int 22805 sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl) 22806 { 22807 void *shadow_buf; 22808 uint_t label_addr; 22809 int sec; 22810 int blk; 22811 int head; 22812 int cyl; 22813 int rval; 22814 22815 #if defined(__i386) || defined(__amd64) 22816 label_addr = un->un_solaris_offset + DK_LABEL_LOC; 22817 #else 22818 /* Write the primary label at block 0 of the solaris partition. */ 22819 label_addr = 0; 22820 #endif 22821 22822 if (NOT_DEVBSIZE(un)) { 22823 shadow_buf = kmem_zalloc(un->un_tgt_blocksize, KM_SLEEP); 22824 /* 22825 * Read the target's first block. 22826 */ 22827 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22828 un->un_tgt_blocksize, label_addr, 22829 SD_PATH_STANDARD)) != 0) { 22830 goto exit; 22831 } 22832 /* 22833 * Copy the contents of the label into the shadow buffer 22834 * which is of the size of target block size. 22835 */ 22836 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22837 } 22838 22839 /* Write the primary label */ 22840 if (NOT_DEVBSIZE(un)) { 22841 rval = sd_send_scsi_WRITE(un, shadow_buf, un->un_tgt_blocksize, 22842 label_addr, SD_PATH_STANDARD); 22843 } else { 22844 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22845 label_addr, SD_PATH_STANDARD); 22846 } 22847 if (rval != 0) { 22848 return (rval); 22849 } 22850 22851 /* 22852 * Calculate where the backup labels go. They are always on 22853 * the last alternate cylinder, but some older drives put them 22854 * on head 2 instead of the last head. They are always on the 22855 * first 5 odd sectors of the appropriate track. 22856 * 22857 * We have no choice at this point, but to believe that the 22858 * disk label is valid. Use the geometry of the disk 22859 * as described in the label. 22860 */ 22861 cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1; 22862 head = dkl->dkl_nhead - 1; 22863 22864 /* 22865 * Write and verify the backup labels. Make sure we don't try to 22866 * write past the last cylinder. 22867 */ 22868 for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) { 22869 blk = (daddr_t)( 22870 (cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) + 22871 (head * dkl->dkl_nsect) + sec); 22872 #if defined(__i386) || defined(__amd64) 22873 blk += un->un_solaris_offset; 22874 #endif 22875 if (NOT_DEVBSIZE(un)) { 22876 uint64_t tblk; 22877 /* 22878 * Need to read the block first for read modify write. 22879 */ 22880 tblk = (uint64_t)blk; 22881 blk = (int)((tblk * un->un_sys_blocksize) / 22882 un->un_tgt_blocksize); 22883 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22884 un->un_tgt_blocksize, blk, 22885 SD_PATH_STANDARD)) != 0) { 22886 goto exit; 22887 } 22888 /* 22889 * Modify the shadow buffer with the label. 22890 */ 22891 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22892 rval = sd_send_scsi_WRITE(un, shadow_buf, 22893 un->un_tgt_blocksize, blk, SD_PATH_STANDARD); 22894 } else { 22895 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22896 blk, SD_PATH_STANDARD); 22897 SD_INFO(SD_LOG_IO_PARTITION, un, 22898 "sd_set_vtoc: wrote backup label %d\n", blk); 22899 } 22900 if (rval != 0) { 22901 goto exit; 22902 } 22903 } 22904 exit: 22905 if (NOT_DEVBSIZE(un)) { 22906 kmem_free(shadow_buf, un->un_tgt_blocksize); 22907 } 22908 return (rval); 22909 } 22910 22911 /* 22912 * Function: sd_clear_vtoc 22913 * 22914 * Description: This routine clears out the VTOC labels. 22915 * 22916 * Arguments: un - driver soft state (unit) structure 22917 * 22918 * Return: void 22919 */ 22920 22921 static void 22922 sd_clear_vtoc(struct sd_lun *un) 22923 { 22924 struct dk_label *dkl; 22925 22926 mutex_exit(SD_MUTEX(un)); 22927 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22928 mutex_enter(SD_MUTEX(un)); 22929 /* 22930 * sd_set_vtoc uses these fields in order to figure out 22931 * where to overwrite the backup labels 22932 */ 22933 dkl->dkl_apc = un->un_g.dkg_apc; 22934 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22935 dkl->dkl_acyl = un->un_g.dkg_acyl; 22936 dkl->dkl_nhead = un->un_g.dkg_nhead; 22937 dkl->dkl_nsect = un->un_g.dkg_nsect; 22938 mutex_exit(SD_MUTEX(un)); 22939 (void) sd_set_vtoc(un, dkl); 22940 kmem_free(dkl, sizeof (struct dk_label)); 22941 22942 mutex_enter(SD_MUTEX(un)); 22943 } 22944 22945 /* 22946 * Function: sd_write_label 22947 * 22948 * Description: This routine will validate and write the driver soft state vtoc 22949 * contents to the device. 22950 * 22951 * Arguments: dev - the device number 22952 * 22953 * Return Code: the code returned by sd_send_scsi_cmd() 22954 * 0 22955 * EINVAL 22956 * ENXIO 22957 * ENOMEM 22958 */ 22959 22960 static int 22961 sd_write_label(dev_t dev) 22962 { 22963 struct sd_lun *un; 22964 struct dk_label *dkl; 22965 short sum; 22966 short *sp; 22967 int i; 22968 int rval; 22969 22970 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 22971 (un->un_state == SD_STATE_OFFLINE)) { 22972 return (ENXIO); 22973 } 22974 ASSERT(mutex_owned(SD_MUTEX(un))); 22975 mutex_exit(SD_MUTEX(un)); 22976 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22977 mutex_enter(SD_MUTEX(un)); 22978 22979 bcopy(&un->un_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc)); 22980 dkl->dkl_rpm = un->un_g.dkg_rpm; 22981 dkl->dkl_pcyl = un->un_g.dkg_pcyl; 22982 dkl->dkl_apc = un->un_g.dkg_apc; 22983 dkl->dkl_intrlv = un->un_g.dkg_intrlv; 22984 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22985 dkl->dkl_acyl = un->un_g.dkg_acyl; 22986 dkl->dkl_nhead = un->un_g.dkg_nhead; 22987 dkl->dkl_nsect = un->un_g.dkg_nsect; 22988 22989 #if defined(_SUNOS_VTOC_8) 22990 dkl->dkl_obs1 = un->un_g.dkg_obs1; 22991 dkl->dkl_obs2 = un->un_g.dkg_obs2; 22992 dkl->dkl_obs3 = un->un_g.dkg_obs3; 22993 for (i = 0; i < NDKMAP; i++) { 22994 dkl->dkl_map[i].dkl_cylno = un->un_map[i].dkl_cylno; 22995 dkl->dkl_map[i].dkl_nblk = un->un_map[i].dkl_nblk; 22996 } 22997 bcopy(un->un_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII); 22998 #elif defined(_SUNOS_VTOC_16) 22999 dkl->dkl_skew = un->un_dkg_skew; 23000 #else 23001 #error "No VTOC format defined." 23002 #endif 23003 23004 dkl->dkl_magic = DKL_MAGIC; 23005 dkl->dkl_write_reinstruct = un->un_g.dkg_write_reinstruct; 23006 dkl->dkl_read_reinstruct = un->un_g.dkg_read_reinstruct; 23007 23008 /* Construct checksum for the new disk label */ 23009 sum = 0; 23010 sp = (short *)dkl; 23011 i = sizeof (struct dk_label) / sizeof (short); 23012 while (i--) { 23013 sum ^= *sp++; 23014 } 23015 dkl->dkl_cksum = sum; 23016 23017 mutex_exit(SD_MUTEX(un)); 23018 23019 rval = sd_set_vtoc(un, dkl); 23020 exit: 23021 kmem_free(dkl, sizeof (struct dk_label)); 23022 mutex_enter(SD_MUTEX(un)); 23023 return (rval); 23024 } 23025 23026 static int 23027 sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag) 23028 { 23029 struct sd_lun *un = NULL; 23030 dk_efi_t user_efi; 23031 int rval = 0; 23032 void *buffer; 23033 23034 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 23035 return (ENXIO); 23036 23037 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 23038 return (EFAULT); 23039 23040 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 23041 23042 if ((user_efi.dki_length % un->un_tgt_blocksize) || 23043 (user_efi.dki_length > un->un_max_xfer_size)) 23044 return (EINVAL); 23045 23046 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 23047 if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) { 23048 rval = EFAULT; 23049 } else { 23050 /* 23051 * let's clear the vtoc labels and clear the softstate 23052 * vtoc. 23053 */ 23054 mutex_enter(SD_MUTEX(un)); 23055 if (un->un_vtoc.v_sanity == VTOC_SANE) { 23056 SD_TRACE(SD_LOG_IO_PARTITION, un, 23057 "sd_dkio_set_efi: CLEAR VTOC\n"); 23058 sd_clear_vtoc(un); 23059 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23060 mutex_exit(SD_MUTEX(un)); 23061 ddi_remove_minor_node(SD_DEVINFO(un), "h"); 23062 ddi_remove_minor_node(SD_DEVINFO(un), "h,raw"); 23063 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd", 23064 S_IFBLK, 23065 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23066 un->un_node_type, NULL); 23067 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd,raw", 23068 S_IFCHR, 23069 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23070 un->un_node_type, NULL); 23071 } else 23072 mutex_exit(SD_MUTEX(un)); 23073 rval = sd_send_scsi_WRITE(un, buffer, user_efi.dki_length, 23074 user_efi.dki_lba, SD_PATH_DIRECT); 23075 if (rval == 0) { 23076 mutex_enter(SD_MUTEX(un)); 23077 un->un_f_geometry_is_valid = FALSE; 23078 mutex_exit(SD_MUTEX(un)); 23079 } 23080 } 23081 kmem_free(buffer, user_efi.dki_length); 23082 return (rval); 23083 } 23084 23085 /* 23086 * Function: sd_dkio_get_mboot 23087 * 23088 * Description: This routine is the driver entry point for handling user 23089 * requests to get the current device mboot (DKIOCGMBOOT) 23090 * 23091 * Arguments: dev - the device number 23092 * arg - pointer to user provided mboot structure specifying 23093 * the current mboot. 23094 * flag - this argument is a pass through to ddi_copyxxx() 23095 * directly from the mode argument of ioctl(). 23096 * 23097 * Return Code: 0 23098 * EINVAL 23099 * EFAULT 23100 * ENXIO 23101 */ 23102 23103 static int 23104 sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag) 23105 { 23106 struct sd_lun *un; 23107 struct mboot *mboot; 23108 int rval; 23109 size_t buffer_size; 23110 23111 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23112 (un->un_state == SD_STATE_OFFLINE)) { 23113 return (ENXIO); 23114 } 23115 23116 #if defined(_SUNOS_VTOC_8) 23117 if ((!ISREMOVABLE(un)) || (arg == NULL)) { 23118 #elif defined(_SUNOS_VTOC_16) 23119 if (arg == NULL) { 23120 #endif 23121 return (EINVAL); 23122 } 23123 23124 /* 23125 * Read the mboot block, located at absolute block 0 on the target. 23126 */ 23127 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct mboot)); 23128 23129 SD_TRACE(SD_LOG_IO_PARTITION, un, 23130 "sd_dkio_get_mboot: allocation size: 0x%x\n", buffer_size); 23131 23132 mboot = kmem_zalloc(buffer_size, KM_SLEEP); 23133 if ((rval = sd_send_scsi_READ(un, mboot, buffer_size, 0, 23134 SD_PATH_STANDARD)) == 0) { 23135 if (ddi_copyout(mboot, (void *)arg, 23136 sizeof (struct mboot), flag) != 0) { 23137 rval = EFAULT; 23138 } 23139 } 23140 kmem_free(mboot, buffer_size); 23141 return (rval); 23142 } 23143 23144 23145 /* 23146 * Function: sd_dkio_set_mboot 23147 * 23148 * Description: This routine is the driver entry point for handling user 23149 * requests to validate and set the device master boot 23150 * (DKIOCSMBOOT). 23151 * 23152 * Arguments: dev - the device number 23153 * arg - pointer to user provided mboot structure used to set the 23154 * master boot. 23155 * flag - this argument is a pass through to ddi_copyxxx() 23156 * directly from the mode argument of ioctl(). 23157 * 23158 * Return Code: 0 23159 * EINVAL 23160 * EFAULT 23161 * ENXIO 23162 */ 23163 23164 static int 23165 sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag) 23166 { 23167 struct sd_lun *un = NULL; 23168 struct mboot *mboot = NULL; 23169 int rval; 23170 ushort_t magic; 23171 23172 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23173 return (ENXIO); 23174 } 23175 23176 ASSERT(!mutex_owned(SD_MUTEX(un))); 23177 23178 #if defined(_SUNOS_VTOC_8) 23179 if (!ISREMOVABLE(un)) { 23180 return (EINVAL); 23181 } 23182 #endif 23183 23184 if (arg == NULL) { 23185 return (EINVAL); 23186 } 23187 23188 mboot = kmem_zalloc(sizeof (struct mboot), KM_SLEEP); 23189 23190 if (ddi_copyin((const void *)arg, mboot, 23191 sizeof (struct mboot), flag) != 0) { 23192 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23193 return (EFAULT); 23194 } 23195 23196 /* Is this really a master boot record? */ 23197 magic = LE_16(mboot->signature); 23198 if (magic != MBB_MAGIC) { 23199 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23200 return (EINVAL); 23201 } 23202 23203 rval = sd_send_scsi_WRITE(un, mboot, un->un_sys_blocksize, 0, 23204 SD_PATH_STANDARD); 23205 23206 mutex_enter(SD_MUTEX(un)); 23207 #if defined(__i386) || defined(__amd64) 23208 if (rval == 0) { 23209 /* 23210 * mboot has been written successfully. 23211 * update the fdisk and vtoc tables in memory 23212 */ 23213 rval = sd_update_fdisk_and_vtoc(un); 23214 if ((un->un_f_geometry_is_valid == FALSE) || (rval != 0)) { 23215 mutex_exit(SD_MUTEX(un)); 23216 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23217 return (rval); 23218 } 23219 } 23220 23221 /* 23222 * If the mboot write fails, write the devid anyway, what can it hurt? 23223 * Also preserve the device id by writing to the disk acyl for the case 23224 * where a devid has been fabricated. 23225 */ 23226 if (!ISREMOVABLE(un) && !ISCD(un) && 23227 (un->un_f_opt_fab_devid == TRUE)) { 23228 if (un->un_devid == NULL) { 23229 sd_register_devid(un, SD_DEVINFO(un), 23230 SD_TARGET_IS_UNRESERVED); 23231 } else { 23232 /* 23233 * The device id for this disk has been 23234 * fabricated. Fabricated device id's are 23235 * managed by storing them in the last 2 23236 * available sectors on the drive. The device 23237 * id must be preserved by writing it back out 23238 * to this location. 23239 */ 23240 if (sd_write_deviceid(un) != 0) { 23241 ddi_devid_free(un->un_devid); 23242 un->un_devid = NULL; 23243 } 23244 } 23245 } 23246 #else 23247 if (rval == 0) { 23248 /* 23249 * mboot has been written successfully. 23250 * set up the default geometry and VTOC 23251 */ 23252 if (un->un_blockcount <= DK_MAX_BLOCKS) 23253 sd_setup_default_geometry(un); 23254 } 23255 #endif 23256 mutex_exit(SD_MUTEX(un)); 23257 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23258 return (rval); 23259 } 23260 23261 23262 /* 23263 * Function: sd_setup_default_geometry 23264 * 23265 * Description: This local utility routine sets the default geometry as part of 23266 * setting the device mboot. 23267 * 23268 * Arguments: un - driver soft state (unit) structure 23269 * 23270 * Note: This may be redundant with sd_build_default_label. 23271 */ 23272 23273 static void 23274 sd_setup_default_geometry(struct sd_lun *un) 23275 { 23276 /* zero out the soft state geometry and partition table. */ 23277 bzero(&un->un_g, sizeof (struct dk_geom)); 23278 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23279 bzero(un->un_map, NDKMAP * (sizeof (struct dk_map))); 23280 un->un_asciilabel[0] = '\0'; 23281 23282 /* 23283 * For the rpm, we use the minimum for the disk. 23284 * For the head, cyl and number of sector per track, 23285 * if the capacity <= 1GB, head = 64, sect = 32. 23286 * else head = 255, sect 63 23287 * Note: the capacity should be equal to C*H*S values. 23288 * This will cause some truncation of size due to 23289 * round off errors. For CD-ROMs, this truncation can 23290 * have adverse side effects, so returning ncyl and 23291 * nhead as 1. The nsect will overflow for most of 23292 * CD-ROMs as nsect is of type ushort. 23293 */ 23294 if (ISCD(un)) { 23295 un->un_g.dkg_ncyl = 1; 23296 un->un_g.dkg_nhead = 1; 23297 un->un_g.dkg_nsect = un->un_blockcount; 23298 } else { 23299 if (un->un_blockcount <= 0x1000) { 23300 /* Needed for unlabeled SCSI floppies. */ 23301 un->un_g.dkg_nhead = 2; 23302 un->un_g.dkg_ncyl = 80; 23303 un->un_g.dkg_pcyl = 80; 23304 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 23305 } else if (un->un_blockcount <= 0x200000) { 23306 un->un_g.dkg_nhead = 64; 23307 un->un_g.dkg_nsect = 32; 23308 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 23309 } else { 23310 un->un_g.dkg_nhead = 255; 23311 un->un_g.dkg_nsect = 63; 23312 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 23313 } 23314 un->un_blockcount = un->un_g.dkg_ncyl * 23315 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 23316 } 23317 un->un_g.dkg_acyl = 0; 23318 un->un_g.dkg_bcyl = 0; 23319 un->un_g.dkg_intrlv = 1; 23320 un->un_g.dkg_rpm = 200; 23321 un->un_g.dkg_read_reinstruct = 0; 23322 un->un_g.dkg_write_reinstruct = 0; 23323 if (un->un_g.dkg_pcyl == 0) { 23324 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl; 23325 } 23326 23327 un->un_map['a'-'a'].dkl_cylno = 0; 23328 un->un_map['a'-'a'].dkl_nblk = un->un_blockcount; 23329 un->un_map['c'-'a'].dkl_cylno = 0; 23330 un->un_map['c'-'a'].dkl_nblk = un->un_blockcount; 23331 un->un_f_geometry_is_valid = FALSE; 23332 } 23333 23334 23335 #if defined(__i386) || defined(__amd64) 23336 /* 23337 * Function: sd_update_fdisk_and_vtoc 23338 * 23339 * Description: This local utility routine updates the device fdisk and vtoc 23340 * as part of setting the device mboot. 23341 * 23342 * Arguments: un - driver soft state (unit) structure 23343 * 23344 * Return Code: 0 for success or errno-type return code. 23345 * 23346 * Note:x86: This looks like a duplicate of sd_validate_geometry(), but 23347 * these did exist seperately in x86 sd.c!!! 23348 */ 23349 23350 static int 23351 sd_update_fdisk_and_vtoc(struct sd_lun *un) 23352 { 23353 static char labelstring[128]; 23354 static char buf[256]; 23355 char *label = 0; 23356 int count; 23357 int label_rc = 0; 23358 int gvalid = un->un_f_geometry_is_valid; 23359 int fdisk_rval; 23360 int lbasize; 23361 int capacity; 23362 23363 ASSERT(mutex_owned(SD_MUTEX(un))); 23364 23365 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 23366 return (EINVAL); 23367 } 23368 23369 if (un->un_f_blockcount_is_valid == FALSE) { 23370 return (EINVAL); 23371 } 23372 23373 #if defined(_SUNOS_VTOC_16) 23374 /* 23375 * Set up the "whole disk" fdisk partition; this should always 23376 * exist, regardless of whether the disk contains an fdisk table 23377 * or vtoc. 23378 */ 23379 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 23380 un->un_map[P0_RAW_DISK].dkl_nblk = un->un_blockcount; 23381 #endif /* defined(_SUNOS_VTOC_16) */ 23382 23383 /* 23384 * copy the lbasize and capacity so that if they're 23385 * reset while we're not holding the SD_MUTEX(un), we will 23386 * continue to use valid values after the SD_MUTEX(un) is 23387 * reacquired. 23388 */ 23389 lbasize = un->un_tgt_blocksize; 23390 capacity = un->un_blockcount; 23391 23392 /* 23393 * refresh the logical and physical geometry caches. 23394 * (data from mode sense format/rigid disk geometry pages, 23395 * and scsi_ifgetcap("geometry"). 23396 */ 23397 sd_resync_geom_caches(un, capacity, lbasize, SD_PATH_DIRECT); 23398 23399 /* 23400 * Only DIRECT ACCESS devices will have Sun labels. 23401 * CD's supposedly have a Sun label, too 23402 */ 23403 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 23404 fdisk_rval = sd_read_fdisk(un, capacity, lbasize, 23405 SD_PATH_DIRECT); 23406 if (fdisk_rval == SD_CMD_FAILURE) { 23407 ASSERT(mutex_owned(SD_MUTEX(un))); 23408 return (EIO); 23409 } 23410 23411 if (fdisk_rval == SD_CMD_RESERVATION_CONFLICT) { 23412 ASSERT(mutex_owned(SD_MUTEX(un))); 23413 return (EACCES); 23414 } 23415 23416 if (un->un_solaris_size <= DK_LABEL_LOC) { 23417 /* 23418 * Found fdisk table but no Solaris partition entry, 23419 * so don't call sd_uselabel() and don't create 23420 * a default label. 23421 */ 23422 label_rc = 0; 23423 un->un_f_geometry_is_valid = TRUE; 23424 goto no_solaris_partition; 23425 } 23426 23427 #if defined(_SUNOS_VTOC_8) 23428 label = (char *)un->un_asciilabel; 23429 #elif defined(_SUNOS_VTOC_16) 23430 label = (char *)un->un_vtoc.v_asciilabel; 23431 #else 23432 #error "No VTOC format defined." 23433 #endif 23434 } else if (capacity < 0) { 23435 ASSERT(mutex_owned(SD_MUTEX(un))); 23436 return (EINVAL); 23437 } 23438 23439 /* 23440 * For Removable media We reach here if we have found a 23441 * SOLARIS PARTITION. 23442 * If un_f_geometry_is_valid is FALSE it indicates that the SOLARIS 23443 * PARTITION has changed from the previous one, hence we will setup a 23444 * default VTOC in this case. 23445 */ 23446 if (un->un_f_geometry_is_valid == FALSE) { 23447 sd_build_default_label(un); 23448 label_rc = 0; 23449 } 23450 23451 no_solaris_partition: 23452 if ((!ISREMOVABLE(un) || 23453 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 23454 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 23455 /* 23456 * Print out a message indicating who and what we are. 23457 * We do this only when we happen to really validate the 23458 * geometry. We may call sd_validate_geometry() at other 23459 * times, ioctl()'s like Get VTOC in which case we 23460 * don't want to print the label. 23461 * If the geometry is valid, print the label string, 23462 * else print vendor and product info, if available 23463 */ 23464 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 23465 SD_INFO(SD_LOG_IOCTL_DKIO, un, "?<%s>\n", label); 23466 } else { 23467 mutex_enter(&sd_label_mutex); 23468 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 23469 labelstring); 23470 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 23471 &labelstring[64]); 23472 (void) sprintf(buf, "?Vendor '%s', product '%s'", 23473 labelstring, &labelstring[64]); 23474 if (un->un_f_blockcount_is_valid == TRUE) { 23475 (void) sprintf(&buf[strlen(buf)], 23476 ", %" PRIu64 " %u byte blocks\n", 23477 un->un_blockcount, 23478 un->un_tgt_blocksize); 23479 } else { 23480 (void) sprintf(&buf[strlen(buf)], 23481 ", (unknown capacity)\n"); 23482 } 23483 SD_INFO(SD_LOG_IOCTL_DKIO, un, buf); 23484 mutex_exit(&sd_label_mutex); 23485 } 23486 } 23487 23488 #if defined(_SUNOS_VTOC_16) 23489 /* 23490 * If we have valid geometry, set up the remaining fdisk partitions. 23491 * Note that dkl_cylno is not used for the fdisk map entries, so 23492 * we set it to an entirely bogus value. 23493 */ 23494 for (count = 0; count < FD_NUMPART; count++) { 23495 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 23496 un->un_map[FDISK_P1 + count].dkl_nblk = 23497 un->un_fmap[count].fmap_nblk; 23498 un->un_offset[FDISK_P1 + count] = 23499 un->un_fmap[count].fmap_start; 23500 } 23501 #endif 23502 23503 for (count = 0; count < NDKMAP; count++) { 23504 #if defined(_SUNOS_VTOC_8) 23505 struct dk_map *lp = &un->un_map[count]; 23506 un->un_offset[count] = 23507 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 23508 #elif defined(_SUNOS_VTOC_16) 23509 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 23510 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 23511 #else 23512 #error "No VTOC format defined." 23513 #endif 23514 } 23515 23516 ASSERT(mutex_owned(SD_MUTEX(un))); 23517 return (label_rc); 23518 } 23519 #endif 23520 23521 23522 /* 23523 * Function: sd_check_media 23524 * 23525 * Description: This utility routine implements the functionality for the 23526 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23527 * driver state changes from that specified by the user 23528 * (inserted or ejected). For example, if the user specifies 23529 * DKIO_EJECTED and the current media state is inserted this 23530 * routine will immediately return DKIO_INSERTED. However, if the 23531 * current media state is not inserted the user thread will be 23532 * blocked until the drive state changes. If DKIO_NONE is specified 23533 * the user thread will block until a drive state change occurs. 23534 * 23535 * Arguments: dev - the device number 23536 * state - user pointer to a dkio_state, updated with the current 23537 * drive state at return. 23538 * 23539 * Return Code: ENXIO 23540 * EIO 23541 * EAGAIN 23542 * EINTR 23543 */ 23544 23545 static int 23546 sd_check_media(dev_t dev, enum dkio_state state) 23547 { 23548 struct sd_lun *un = NULL; 23549 enum dkio_state prev_state; 23550 opaque_t token = NULL; 23551 int rval = 0; 23552 23553 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23554 return (ENXIO); 23555 } 23556 23557 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23558 23559 mutex_enter(SD_MUTEX(un)); 23560 23561 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23562 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23563 23564 prev_state = un->un_mediastate; 23565 23566 /* is there anything to do? */ 23567 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23568 /* 23569 * submit the request to the scsi_watch service; 23570 * scsi_media_watch_cb() does the real work 23571 */ 23572 mutex_exit(SD_MUTEX(un)); 23573 23574 /* 23575 * This change handles the case where a scsi watch request is 23576 * added to a device that is powered down. To accomplish this 23577 * we power up the device before adding the scsi watch request, 23578 * since the scsi watch sends a TUR directly to the device 23579 * which the device cannot handle if it is powered down. 23580 */ 23581 if (sd_pm_entry(un) != DDI_SUCCESS) { 23582 mutex_enter(SD_MUTEX(un)); 23583 goto done; 23584 } 23585 23586 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23587 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23588 (caddr_t)dev); 23589 23590 sd_pm_exit(un); 23591 23592 mutex_enter(SD_MUTEX(un)); 23593 if (token == NULL) { 23594 rval = EAGAIN; 23595 goto done; 23596 } 23597 23598 /* 23599 * This is a special case IOCTL that doesn't return 23600 * until the media state changes. Routine sdpower 23601 * knows about and handles this so don't count it 23602 * as an active cmd in the driver, which would 23603 * keep the device busy to the pm framework. 23604 * If the count isn't decremented the device can't 23605 * be powered down. 23606 */ 23607 un->un_ncmds_in_driver--; 23608 ASSERT(un->un_ncmds_in_driver >= 0); 23609 23610 /* 23611 * if a prior request had been made, this will be the same 23612 * token, as scsi_watch was designed that way. 23613 */ 23614 un->un_swr_token = token; 23615 un->un_specified_mediastate = state; 23616 23617 /* 23618 * now wait for media change 23619 * we will not be signalled unless mediastate == state but it is 23620 * still better to test for this condition, since there is a 23621 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23622 */ 23623 SD_TRACE(SD_LOG_COMMON, un, 23624 "sd_check_media: waiting for media state change\n"); 23625 while (un->un_mediastate == state) { 23626 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23627 SD_TRACE(SD_LOG_COMMON, un, 23628 "sd_check_media: waiting for media state " 23629 "was interrupted\n"); 23630 un->un_ncmds_in_driver++; 23631 rval = EINTR; 23632 goto done; 23633 } 23634 SD_TRACE(SD_LOG_COMMON, un, 23635 "sd_check_media: received signal, state=%x\n", 23636 un->un_mediastate); 23637 } 23638 /* 23639 * Inc the counter to indicate the device once again 23640 * has an active outstanding cmd. 23641 */ 23642 un->un_ncmds_in_driver++; 23643 } 23644 23645 /* invalidate geometry */ 23646 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23647 sr_ejected(un); 23648 } 23649 23650 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23651 uint64_t capacity; 23652 uint_t lbasize; 23653 23654 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23655 mutex_exit(SD_MUTEX(un)); 23656 /* 23657 * Since the following routines use SD_PATH_DIRECT, we must 23658 * call PM directly before the upcoming disk accesses. This 23659 * may cause the disk to be power/spin up. 23660 */ 23661 23662 if (sd_pm_entry(un) == DDI_SUCCESS) { 23663 rval = sd_send_scsi_READ_CAPACITY(un, 23664 &capacity, 23665 &lbasize, SD_PATH_DIRECT); 23666 if (rval != 0) { 23667 sd_pm_exit(un); 23668 mutex_enter(SD_MUTEX(un)); 23669 goto done; 23670 } 23671 } else { 23672 rval = EIO; 23673 mutex_enter(SD_MUTEX(un)); 23674 goto done; 23675 } 23676 mutex_enter(SD_MUTEX(un)); 23677 23678 sd_update_block_info(un, lbasize, capacity); 23679 23680 un->un_f_geometry_is_valid = FALSE; 23681 (void) sd_validate_geometry(un, SD_PATH_DIRECT); 23682 23683 mutex_exit(SD_MUTEX(un)); 23684 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 23685 SD_PATH_DIRECT); 23686 sd_pm_exit(un); 23687 23688 mutex_enter(SD_MUTEX(un)); 23689 } 23690 done: 23691 un->un_f_watcht_stopped = FALSE; 23692 if (un->un_swr_token) { 23693 /* 23694 * Use of this local token and the mutex ensures that we avoid 23695 * some race conditions associated with terminating the 23696 * scsi watch. 23697 */ 23698 token = un->un_swr_token; 23699 un->un_swr_token = (opaque_t)NULL; 23700 mutex_exit(SD_MUTEX(un)); 23701 (void) scsi_watch_request_terminate(token, 23702 SCSI_WATCH_TERMINATE_WAIT); 23703 mutex_enter(SD_MUTEX(un)); 23704 } 23705 23706 /* 23707 * Update the capacity kstat value, if no media previously 23708 * (capacity kstat is 0) and a media has been inserted 23709 * (un_f_blockcount_is_valid == TRUE) 23710 * This is a more generic way then checking for ISREMOVABLE. 23711 */ 23712 if (un->un_errstats) { 23713 struct sd_errstats *stp = NULL; 23714 23715 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23716 if ((stp->sd_capacity.value.ui64 == 0) && 23717 (un->un_f_blockcount_is_valid == TRUE)) { 23718 stp->sd_capacity.value.ui64 = 23719 (uint64_t)((uint64_t)un->un_blockcount * 23720 un->un_sys_blocksize); 23721 } 23722 } 23723 mutex_exit(SD_MUTEX(un)); 23724 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23725 return (rval); 23726 } 23727 23728 23729 /* 23730 * Function: sd_delayed_cv_broadcast 23731 * 23732 * Description: Delayed cv_broadcast to allow for target to recover from media 23733 * insertion. 23734 * 23735 * Arguments: arg - driver soft state (unit) structure 23736 */ 23737 23738 static void 23739 sd_delayed_cv_broadcast(void *arg) 23740 { 23741 struct sd_lun *un = arg; 23742 23743 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23744 23745 mutex_enter(SD_MUTEX(un)); 23746 un->un_dcvb_timeid = NULL; 23747 cv_broadcast(&un->un_state_cv); 23748 mutex_exit(SD_MUTEX(un)); 23749 } 23750 23751 23752 /* 23753 * Function: sd_media_watch_cb 23754 * 23755 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23756 * routine processes the TUR sense data and updates the driver 23757 * state if a transition has occurred. The user thread 23758 * (sd_check_media) is then signalled. 23759 * 23760 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23761 * among multiple watches that share this callback function 23762 * resultp - scsi watch facility result packet containing scsi 23763 * packet, status byte and sense data 23764 * 23765 * Return Code: 0 for success, -1 for failure 23766 */ 23767 23768 static int 23769 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23770 { 23771 struct sd_lun *un; 23772 struct scsi_status *statusp = resultp->statusp; 23773 struct scsi_extended_sense *sensep = resultp->sensep; 23774 enum dkio_state state = DKIO_NONE; 23775 dev_t dev = (dev_t)arg; 23776 uchar_t actual_sense_length; 23777 23778 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23779 return (-1); 23780 } 23781 actual_sense_length = resultp->actual_sense_length; 23782 23783 mutex_enter(SD_MUTEX(un)); 23784 SD_TRACE(SD_LOG_COMMON, un, 23785 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23786 *((char *)statusp), (void *)sensep, actual_sense_length); 23787 23788 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23789 un->un_mediastate = DKIO_DEV_GONE; 23790 printf("sd_media_watch_cb: dev gone\n"); 23791 cv_broadcast(&un->un_state_cv); 23792 mutex_exit(SD_MUTEX(un)); 23793 23794 return (0); 23795 } 23796 23797 /* 23798 * If there was a check condition then sensep points to valid sense data 23799 * If status was not a check condition but a reservation or busy status 23800 * then the new state is DKIO_NONE 23801 */ 23802 if (sensep != NULL) { 23803 SD_INFO(SD_LOG_COMMON, un, 23804 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23805 sensep->es_key, sensep->es_add_code, sensep->es_qual_code); 23806 /* This routine only uses up to 13 bytes of sense data. */ 23807 if (actual_sense_length >= 13) { 23808 if (sensep->es_key == KEY_UNIT_ATTENTION) { 23809 if (sensep->es_add_code == 0x28) { 23810 state = DKIO_INSERTED; 23811 } 23812 } else { 23813 /* 23814 * if 02/04/02 means that the host 23815 * should send start command. Explicitly 23816 * leave the media state as is 23817 * (inserted) as the media is inserted 23818 * and host has stopped device for PM 23819 * reasons. Upon next true read/write 23820 * to this media will bring the 23821 * device to the right state good for 23822 * media access. 23823 */ 23824 if ((sensep->es_key == KEY_NOT_READY) && 23825 (sensep->es_add_code == 0x3a)) { 23826 state = DKIO_EJECTED; 23827 } 23828 23829 /* 23830 * If the drivge is busy with an operation 23831 * or long write, keep the media in an 23832 * inserted state. 23833 */ 23834 23835 if ((sensep->es_key == KEY_NOT_READY) && 23836 (sensep->es_add_code == 0x04) && 23837 ((sensep->es_qual_code == 0x02) || 23838 (sensep->es_qual_code == 0x07) || 23839 (sensep->es_qual_code == 0x08))) { 23840 state = DKIO_INSERTED; 23841 } 23842 } 23843 } 23844 } else if ((*((char *)statusp) == STATUS_GOOD) && 23845 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23846 state = DKIO_INSERTED; 23847 } 23848 23849 SD_TRACE(SD_LOG_COMMON, un, 23850 "sd_media_watch_cb: state=%x, specified=%x\n", 23851 state, un->un_specified_mediastate); 23852 23853 /* 23854 * now signal the waiting thread if this is *not* the specified state; 23855 * delay the signal if the state is DKIO_INSERTED to allow the target 23856 * to recover 23857 */ 23858 if (state != un->un_specified_mediastate) { 23859 un->un_mediastate = state; 23860 if (state == DKIO_INSERTED) { 23861 /* 23862 * delay the signal to give the drive a chance 23863 * to do what it apparently needs to do 23864 */ 23865 SD_TRACE(SD_LOG_COMMON, un, 23866 "sd_media_watch_cb: delayed cv_broadcast\n"); 23867 if (un->un_dcvb_timeid == NULL) { 23868 un->un_dcvb_timeid = 23869 timeout(sd_delayed_cv_broadcast, un, 23870 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23871 } 23872 } else { 23873 SD_TRACE(SD_LOG_COMMON, un, 23874 "sd_media_watch_cb: immediate cv_broadcast\n"); 23875 cv_broadcast(&un->un_state_cv); 23876 } 23877 } 23878 mutex_exit(SD_MUTEX(un)); 23879 return (0); 23880 } 23881 23882 23883 /* 23884 * Function: sd_dkio_get_temp 23885 * 23886 * Description: This routine is the driver entry point for handling ioctl 23887 * requests to get the disk temperature. 23888 * 23889 * Arguments: dev - the device number 23890 * arg - pointer to user provided dk_temperature structure. 23891 * flag - this argument is a pass through to ddi_copyxxx() 23892 * directly from the mode argument of ioctl(). 23893 * 23894 * Return Code: 0 23895 * EFAULT 23896 * ENXIO 23897 * EAGAIN 23898 */ 23899 23900 static int 23901 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23902 { 23903 struct sd_lun *un = NULL; 23904 struct dk_temperature *dktemp = NULL; 23905 uchar_t *temperature_page; 23906 int rval = 0; 23907 int path_flag = SD_PATH_STANDARD; 23908 23909 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23910 return (ENXIO); 23911 } 23912 23913 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23914 23915 /* copyin the disk temp argument to get the user flags */ 23916 if (ddi_copyin((void *)arg, dktemp, 23917 sizeof (struct dk_temperature), flag) != 0) { 23918 rval = EFAULT; 23919 goto done; 23920 } 23921 23922 /* Initialize the temperature to invalid. */ 23923 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23924 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23925 23926 /* 23927 * Note: Investigate removing the "bypass pm" semantic. 23928 * Can we just bypass PM always? 23929 */ 23930 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23931 path_flag = SD_PATH_DIRECT; 23932 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23933 mutex_enter(&un->un_pm_mutex); 23934 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23935 /* 23936 * If DKT_BYPASS_PM is set, and the drive happens to be 23937 * in low power mode, we can not wake it up, Need to 23938 * return EAGAIN. 23939 */ 23940 mutex_exit(&un->un_pm_mutex); 23941 rval = EAGAIN; 23942 goto done; 23943 } else { 23944 /* 23945 * Indicate to PM the device is busy. This is required 23946 * to avoid a race - i.e. the ioctl is issuing a 23947 * command and the pm framework brings down the device 23948 * to low power mode (possible power cut-off on some 23949 * platforms). 23950 */ 23951 mutex_exit(&un->un_pm_mutex); 23952 if (sd_pm_entry(un) != DDI_SUCCESS) { 23953 rval = EAGAIN; 23954 goto done; 23955 } 23956 } 23957 } 23958 23959 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23960 23961 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 23962 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 23963 goto done2; 23964 } 23965 23966 /* 23967 * For the current temperature verify that the parameter length is 0x02 23968 * and the parameter code is 0x00 23969 */ 23970 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23971 (temperature_page[5] == 0x00)) { 23972 if (temperature_page[9] == 0xFF) { 23973 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23974 } else { 23975 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23976 } 23977 } 23978 23979 /* 23980 * For the reference temperature verify that the parameter 23981 * length is 0x02 and the parameter code is 0x01 23982 */ 23983 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23984 (temperature_page[11] == 0x01)) { 23985 if (temperature_page[15] == 0xFF) { 23986 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23987 } else { 23988 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23989 } 23990 } 23991 23992 /* Do the copyout regardless of the temperature commands status. */ 23993 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23994 flag) != 0) { 23995 rval = EFAULT; 23996 } 23997 23998 done2: 23999 if (path_flag == SD_PATH_DIRECT) { 24000 sd_pm_exit(un); 24001 } 24002 24003 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24004 done: 24005 if (dktemp != NULL) { 24006 kmem_free(dktemp, sizeof (struct dk_temperature)); 24007 } 24008 24009 return (rval); 24010 } 24011 24012 24013 /* 24014 * Function: sd_log_page_supported 24015 * 24016 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24017 * supported log pages. 24018 * 24019 * Arguments: un - 24020 * log_page - 24021 * 24022 * Return Code: -1 - on error (log sense is optional and may not be supported). 24023 * 0 - log page not found. 24024 * 1 - log page found. 24025 */ 24026 24027 static int 24028 sd_log_page_supported(struct sd_lun *un, int log_page) 24029 { 24030 uchar_t *log_page_data; 24031 int i; 24032 int match = 0; 24033 int log_size; 24034 24035 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24036 24037 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 24038 SD_PATH_DIRECT) != 0) { 24039 SD_ERROR(SD_LOG_COMMON, un, 24040 "sd_log_page_supported: failed log page retrieval\n"); 24041 kmem_free(log_page_data, 0xFF); 24042 return (-1); 24043 } 24044 log_size = log_page_data[3]; 24045 24046 /* 24047 * The list of supported log pages start from the fourth byte. Check 24048 * until we run out of log pages or a match is found. 24049 */ 24050 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24051 if (log_page_data[i] == log_page) { 24052 match++; 24053 } 24054 } 24055 kmem_free(log_page_data, 0xFF); 24056 return (match); 24057 } 24058 24059 24060 /* 24061 * Function: sd_mhdioc_failfast 24062 * 24063 * Description: This routine is the driver entry point for handling ioctl 24064 * requests to enable/disable the multihost failfast option. 24065 * (MHIOCENFAILFAST) 24066 * 24067 * Arguments: dev - the device number 24068 * arg - user specified probing interval. 24069 * flag - this argument is a pass through to ddi_copyxxx() 24070 * directly from the mode argument of ioctl(). 24071 * 24072 * Return Code: 0 24073 * EFAULT 24074 * ENXIO 24075 */ 24076 24077 static int 24078 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24079 { 24080 struct sd_lun *un = NULL; 24081 int mh_time; 24082 int rval = 0; 24083 24084 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24085 return (ENXIO); 24086 } 24087 24088 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24089 return (EFAULT); 24090 24091 if (mh_time) { 24092 mutex_enter(SD_MUTEX(un)); 24093 un->un_resvd_status |= SD_FAILFAST; 24094 mutex_exit(SD_MUTEX(un)); 24095 /* 24096 * If mh_time is INT_MAX, then this ioctl is being used for 24097 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24098 */ 24099 if (mh_time != INT_MAX) { 24100 rval = sd_check_mhd(dev, mh_time); 24101 } 24102 } else { 24103 (void) sd_check_mhd(dev, 0); 24104 mutex_enter(SD_MUTEX(un)); 24105 un->un_resvd_status &= ~SD_FAILFAST; 24106 mutex_exit(SD_MUTEX(un)); 24107 } 24108 return (rval); 24109 } 24110 24111 24112 /* 24113 * Function: sd_mhdioc_takeown 24114 * 24115 * Description: This routine is the driver entry point for handling ioctl 24116 * requests to forcefully acquire exclusive access rights to the 24117 * multihost disk (MHIOCTKOWN). 24118 * 24119 * Arguments: dev - the device number 24120 * arg - user provided structure specifying the delay 24121 * parameters in milliseconds 24122 * flag - this argument is a pass through to ddi_copyxxx() 24123 * directly from the mode argument of ioctl(). 24124 * 24125 * Return Code: 0 24126 * EFAULT 24127 * ENXIO 24128 */ 24129 24130 static int 24131 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24132 { 24133 struct sd_lun *un = NULL; 24134 struct mhioctkown *tkown = NULL; 24135 int rval = 0; 24136 24137 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24138 return (ENXIO); 24139 } 24140 24141 if (arg != NULL) { 24142 tkown = (struct mhioctkown *) 24143 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24144 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24145 if (rval != 0) { 24146 rval = EFAULT; 24147 goto error; 24148 } 24149 } 24150 24151 rval = sd_take_ownership(dev, tkown); 24152 mutex_enter(SD_MUTEX(un)); 24153 if (rval == 0) { 24154 un->un_resvd_status |= SD_RESERVE; 24155 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24156 sd_reinstate_resv_delay = 24157 tkown->reinstate_resv_delay * 1000; 24158 } else { 24159 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24160 } 24161 /* 24162 * Give the scsi_watch routine interval set by 24163 * the MHIOCENFAILFAST ioctl precedence here. 24164 */ 24165 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24166 mutex_exit(SD_MUTEX(un)); 24167 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24168 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24169 "sd_mhdioc_takeown : %d\n", 24170 sd_reinstate_resv_delay); 24171 } else { 24172 mutex_exit(SD_MUTEX(un)); 24173 } 24174 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24175 sd_mhd_reset_notify_cb, (caddr_t)un); 24176 } else { 24177 un->un_resvd_status &= ~SD_RESERVE; 24178 mutex_exit(SD_MUTEX(un)); 24179 } 24180 24181 error: 24182 if (tkown != NULL) { 24183 kmem_free(tkown, sizeof (struct mhioctkown)); 24184 } 24185 return (rval); 24186 } 24187 24188 24189 /* 24190 * Function: sd_mhdioc_release 24191 * 24192 * Description: This routine is the driver entry point for handling ioctl 24193 * requests to release exclusive access rights to the multihost 24194 * disk (MHIOCRELEASE). 24195 * 24196 * Arguments: dev - the device number 24197 * 24198 * Return Code: 0 24199 * ENXIO 24200 */ 24201 24202 static int 24203 sd_mhdioc_release(dev_t dev) 24204 { 24205 struct sd_lun *un = NULL; 24206 timeout_id_t resvd_timeid_save; 24207 int resvd_status_save; 24208 int rval = 0; 24209 24210 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24211 return (ENXIO); 24212 } 24213 24214 mutex_enter(SD_MUTEX(un)); 24215 resvd_status_save = un->un_resvd_status; 24216 un->un_resvd_status &= 24217 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24218 if (un->un_resvd_timeid) { 24219 resvd_timeid_save = un->un_resvd_timeid; 24220 un->un_resvd_timeid = NULL; 24221 mutex_exit(SD_MUTEX(un)); 24222 (void) untimeout(resvd_timeid_save); 24223 } else { 24224 mutex_exit(SD_MUTEX(un)); 24225 } 24226 24227 /* 24228 * destroy any pending timeout thread that may be attempting to 24229 * reinstate reservation on this device. 24230 */ 24231 sd_rmv_resv_reclaim_req(dev); 24232 24233 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24234 mutex_enter(SD_MUTEX(un)); 24235 if ((un->un_mhd_token) && 24236 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24237 mutex_exit(SD_MUTEX(un)); 24238 (void) sd_check_mhd(dev, 0); 24239 } else { 24240 mutex_exit(SD_MUTEX(un)); 24241 } 24242 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24243 sd_mhd_reset_notify_cb, (caddr_t)un); 24244 } else { 24245 /* 24246 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24247 */ 24248 mutex_enter(SD_MUTEX(un)); 24249 un->un_resvd_status = resvd_status_save; 24250 mutex_exit(SD_MUTEX(un)); 24251 } 24252 return (rval); 24253 } 24254 24255 24256 /* 24257 * Function: sd_mhdioc_register_devid 24258 * 24259 * Description: This routine is the driver entry point for handling ioctl 24260 * requests to register the device id (MHIOCREREGISTERDEVID). 24261 * 24262 * Note: The implementation for this ioctl has been updated to 24263 * be consistent with the original PSARC case (1999/357) 24264 * (4375899, 4241671, 4220005) 24265 * 24266 * Arguments: dev - the device number 24267 * 24268 * Return Code: 0 24269 * ENXIO 24270 */ 24271 24272 static int 24273 sd_mhdioc_register_devid(dev_t dev) 24274 { 24275 struct sd_lun *un = NULL; 24276 int rval = 0; 24277 24278 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24279 return (ENXIO); 24280 } 24281 24282 ASSERT(!mutex_owned(SD_MUTEX(un))); 24283 24284 mutex_enter(SD_MUTEX(un)); 24285 24286 /* If a devid already exists, de-register it */ 24287 if (un->un_devid != NULL) { 24288 ddi_devid_unregister(SD_DEVINFO(un)); 24289 /* 24290 * After unregister devid, needs to free devid memory 24291 */ 24292 ddi_devid_free(un->un_devid); 24293 un->un_devid = NULL; 24294 } 24295 24296 /* Check for reservation conflict */ 24297 mutex_exit(SD_MUTEX(un)); 24298 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 24299 mutex_enter(SD_MUTEX(un)); 24300 24301 switch (rval) { 24302 case 0: 24303 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24304 break; 24305 case EACCES: 24306 break; 24307 default: 24308 rval = EIO; 24309 } 24310 24311 mutex_exit(SD_MUTEX(un)); 24312 return (rval); 24313 } 24314 24315 24316 /* 24317 * Function: sd_mhdioc_inkeys 24318 * 24319 * Description: This routine is the driver entry point for handling ioctl 24320 * requests to issue the SCSI-3 Persistent In Read Keys command 24321 * to the device (MHIOCGRP_INKEYS). 24322 * 24323 * Arguments: dev - the device number 24324 * arg - user provided in_keys structure 24325 * flag - this argument is a pass through to ddi_copyxxx() 24326 * directly from the mode argument of ioctl(). 24327 * 24328 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24329 * ENXIO 24330 * EFAULT 24331 */ 24332 24333 static int 24334 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24335 { 24336 struct sd_lun *un; 24337 mhioc_inkeys_t inkeys; 24338 int rval = 0; 24339 24340 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24341 return (ENXIO); 24342 } 24343 24344 #ifdef _MULTI_DATAMODEL 24345 switch (ddi_model_convert_from(flag & FMODELS)) { 24346 case DDI_MODEL_ILP32: { 24347 struct mhioc_inkeys32 inkeys32; 24348 24349 if (ddi_copyin(arg, &inkeys32, 24350 sizeof (struct mhioc_inkeys32), flag) != 0) { 24351 return (EFAULT); 24352 } 24353 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24354 if ((rval = sd_persistent_reservation_in_read_keys(un, 24355 &inkeys, flag)) != 0) { 24356 return (rval); 24357 } 24358 inkeys32.generation = inkeys.generation; 24359 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24360 flag) != 0) { 24361 return (EFAULT); 24362 } 24363 break; 24364 } 24365 case DDI_MODEL_NONE: 24366 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24367 flag) != 0) { 24368 return (EFAULT); 24369 } 24370 if ((rval = sd_persistent_reservation_in_read_keys(un, 24371 &inkeys, flag)) != 0) { 24372 return (rval); 24373 } 24374 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24375 flag) != 0) { 24376 return (EFAULT); 24377 } 24378 break; 24379 } 24380 24381 #else /* ! _MULTI_DATAMODEL */ 24382 24383 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24384 return (EFAULT); 24385 } 24386 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24387 if (rval != 0) { 24388 return (rval); 24389 } 24390 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24391 return (EFAULT); 24392 } 24393 24394 #endif /* _MULTI_DATAMODEL */ 24395 24396 return (rval); 24397 } 24398 24399 24400 /* 24401 * Function: sd_mhdioc_inresv 24402 * 24403 * Description: This routine is the driver entry point for handling ioctl 24404 * requests to issue the SCSI-3 Persistent In Read Reservations 24405 * command to the device (MHIOCGRP_INKEYS). 24406 * 24407 * Arguments: dev - the device number 24408 * arg - user provided in_resv structure 24409 * flag - this argument is a pass through to ddi_copyxxx() 24410 * directly from the mode argument of ioctl(). 24411 * 24412 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24413 * ENXIO 24414 * EFAULT 24415 */ 24416 24417 static int 24418 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24419 { 24420 struct sd_lun *un; 24421 mhioc_inresvs_t inresvs; 24422 int rval = 0; 24423 24424 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24425 return (ENXIO); 24426 } 24427 24428 #ifdef _MULTI_DATAMODEL 24429 24430 switch (ddi_model_convert_from(flag & FMODELS)) { 24431 case DDI_MODEL_ILP32: { 24432 struct mhioc_inresvs32 inresvs32; 24433 24434 if (ddi_copyin(arg, &inresvs32, 24435 sizeof (struct mhioc_inresvs32), flag) != 0) { 24436 return (EFAULT); 24437 } 24438 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24439 if ((rval = sd_persistent_reservation_in_read_resv(un, 24440 &inresvs, flag)) != 0) { 24441 return (rval); 24442 } 24443 inresvs32.generation = inresvs.generation; 24444 if (ddi_copyout(&inresvs32, arg, 24445 sizeof (struct mhioc_inresvs32), flag) != 0) { 24446 return (EFAULT); 24447 } 24448 break; 24449 } 24450 case DDI_MODEL_NONE: 24451 if (ddi_copyin(arg, &inresvs, 24452 sizeof (mhioc_inresvs_t), flag) != 0) { 24453 return (EFAULT); 24454 } 24455 if ((rval = sd_persistent_reservation_in_read_resv(un, 24456 &inresvs, flag)) != 0) { 24457 return (rval); 24458 } 24459 if (ddi_copyout(&inresvs, arg, 24460 sizeof (mhioc_inresvs_t), flag) != 0) { 24461 return (EFAULT); 24462 } 24463 break; 24464 } 24465 24466 #else /* ! _MULTI_DATAMODEL */ 24467 24468 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24469 return (EFAULT); 24470 } 24471 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24472 if (rval != 0) { 24473 return (rval); 24474 } 24475 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24476 return (EFAULT); 24477 } 24478 24479 #endif /* ! _MULTI_DATAMODEL */ 24480 24481 return (rval); 24482 } 24483 24484 24485 /* 24486 * The following routines support the clustering functionality described below 24487 * and implement lost reservation reclaim functionality. 24488 * 24489 * Clustering 24490 * ---------- 24491 * The clustering code uses two different, independent forms of SCSI 24492 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24493 * Persistent Group Reservations. For any particular disk, it will use either 24494 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24495 * 24496 * SCSI-2 24497 * The cluster software takes ownership of a multi-hosted disk by issuing the 24498 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24499 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 24500 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 24501 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 24502 * meaning of failfast is that if the driver (on this host) ever encounters the 24503 * scsi error return code RESERVATION_CONFLICT from the device, it should 24504 * immediately panic the host. The motivation for this ioctl is that if this 24505 * host does encounter reservation conflict, the underlying cause is that some 24506 * other host of the cluster has decided that this host is no longer in the 24507 * cluster and has seized control of the disks for itself. Since this host is no 24508 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 24509 * does two things: 24510 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24511 * error to panic the host 24512 * (b) it sets up a periodic timer to test whether this host still has 24513 * "access" (in that no other host has reserved the device): if the 24514 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24515 * purpose of that periodic timer is to handle scenarios where the host is 24516 * otherwise temporarily quiescent, temporarily doing no real i/o. 24517 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24518 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24519 * the device itself. 24520 * 24521 * SCSI-3 PGR 24522 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24523 * facility is supported through the shared multihost disk ioctls 24524 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24525 * MHIOCGRP_PREEMPTANDABORT) 24526 * 24527 * Reservation Reclaim: 24528 * -------------------- 24529 * To support the lost reservation reclaim operations this driver creates a 24530 * single thread to handle reinstating reservations on all devices that have 24531 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24532 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24533 * and the reservation reclaim thread loops through the requests to regain the 24534 * lost reservations. 24535 */ 24536 24537 /* 24538 * Function: sd_check_mhd() 24539 * 24540 * Description: This function sets up and submits a scsi watch request or 24541 * terminates an existing watch request. This routine is used in 24542 * support of reservation reclaim. 24543 * 24544 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24545 * among multiple watches that share the callback function 24546 * interval - the number of microseconds specifying the watch 24547 * interval for issuing TEST UNIT READY commands. If 24548 * set to 0 the watch should be terminated. If the 24549 * interval is set to 0 and if the device is required 24550 * to hold reservation while disabling failfast, the 24551 * watch is restarted with an interval of 24552 * reinstate_resv_delay. 24553 * 24554 * Return Code: 0 - Successful submit/terminate of scsi watch request 24555 * ENXIO - Indicates an invalid device was specified 24556 * EAGAIN - Unable to submit the scsi watch request 24557 */ 24558 24559 static int 24560 sd_check_mhd(dev_t dev, int interval) 24561 { 24562 struct sd_lun *un; 24563 opaque_t token; 24564 24565 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24566 return (ENXIO); 24567 } 24568 24569 /* is this a watch termination request? */ 24570 if (interval == 0) { 24571 mutex_enter(SD_MUTEX(un)); 24572 /* if there is an existing watch task then terminate it */ 24573 if (un->un_mhd_token) { 24574 token = un->un_mhd_token; 24575 un->un_mhd_token = NULL; 24576 mutex_exit(SD_MUTEX(un)); 24577 (void) scsi_watch_request_terminate(token, 24578 SCSI_WATCH_TERMINATE_WAIT); 24579 mutex_enter(SD_MUTEX(un)); 24580 } else { 24581 mutex_exit(SD_MUTEX(un)); 24582 /* 24583 * Note: If we return here we don't check for the 24584 * failfast case. This is the original legacy 24585 * implementation but perhaps we should be checking 24586 * the failfast case. 24587 */ 24588 return (0); 24589 } 24590 /* 24591 * If the device is required to hold reservation while 24592 * disabling failfast, we need to restart the scsi_watch 24593 * routine with an interval of reinstate_resv_delay. 24594 */ 24595 if (un->un_resvd_status & SD_RESERVE) { 24596 interval = sd_reinstate_resv_delay/1000; 24597 } else { 24598 /* no failfast so bail */ 24599 mutex_exit(SD_MUTEX(un)); 24600 return (0); 24601 } 24602 mutex_exit(SD_MUTEX(un)); 24603 } 24604 24605 /* 24606 * adjust minimum time interval to 1 second, 24607 * and convert from msecs to usecs 24608 */ 24609 if (interval > 0 && interval < 1000) { 24610 interval = 1000; 24611 } 24612 interval *= 1000; 24613 24614 /* 24615 * submit the request to the scsi_watch service 24616 */ 24617 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24618 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24619 if (token == NULL) { 24620 return (EAGAIN); 24621 } 24622 24623 /* 24624 * save token for termination later on 24625 */ 24626 mutex_enter(SD_MUTEX(un)); 24627 un->un_mhd_token = token; 24628 mutex_exit(SD_MUTEX(un)); 24629 return (0); 24630 } 24631 24632 24633 /* 24634 * Function: sd_mhd_watch_cb() 24635 * 24636 * Description: This function is the call back function used by the scsi watch 24637 * facility. The scsi watch facility sends the "Test Unit Ready" 24638 * and processes the status. If applicable (i.e. a "Unit Attention" 24639 * status and automatic "Request Sense" not used) the scsi watch 24640 * facility will send a "Request Sense" and retrieve the sense data 24641 * to be passed to this callback function. In either case the 24642 * automatic "Request Sense" or the facility submitting one, this 24643 * callback is passed the status and sense data. 24644 * 24645 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24646 * among multiple watches that share this callback function 24647 * resultp - scsi watch facility result packet containing scsi 24648 * packet, status byte and sense data 24649 * 24650 * Return Code: 0 - continue the watch task 24651 * non-zero - terminate the watch task 24652 */ 24653 24654 static int 24655 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24656 { 24657 struct sd_lun *un; 24658 struct scsi_status *statusp; 24659 struct scsi_extended_sense *sensep; 24660 struct scsi_pkt *pkt; 24661 uchar_t actual_sense_length; 24662 dev_t dev = (dev_t)arg; 24663 24664 ASSERT(resultp != NULL); 24665 statusp = resultp->statusp; 24666 sensep = resultp->sensep; 24667 pkt = resultp->pkt; 24668 actual_sense_length = resultp->actual_sense_length; 24669 24670 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24671 return (ENXIO); 24672 } 24673 24674 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24675 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24676 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24677 24678 /* Begin processing of the status and/or sense data */ 24679 if (pkt->pkt_reason != CMD_CMPLT) { 24680 /* Handle the incomplete packet */ 24681 sd_mhd_watch_incomplete(un, pkt); 24682 return (0); 24683 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24684 if (*((unsigned char *)statusp) 24685 == STATUS_RESERVATION_CONFLICT) { 24686 /* 24687 * Handle a reservation conflict by panicking if 24688 * configured for failfast or by logging the conflict 24689 * and updating the reservation status 24690 */ 24691 mutex_enter(SD_MUTEX(un)); 24692 if ((un->un_resvd_status & SD_FAILFAST) && 24693 (sd_failfast_enable)) { 24694 panic("Reservation Conflict"); 24695 /*NOTREACHED*/ 24696 } 24697 SD_INFO(SD_LOG_IOCTL_MHD, un, 24698 "sd_mhd_watch_cb: Reservation Conflict\n"); 24699 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24700 mutex_exit(SD_MUTEX(un)); 24701 } 24702 } 24703 24704 if (sensep != NULL) { 24705 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24706 mutex_enter(SD_MUTEX(un)); 24707 if ((sensep->es_add_code == SD_SCSI_RESET_SENSE_CODE) && 24708 (un->un_resvd_status & SD_RESERVE)) { 24709 /* 24710 * The additional sense code indicates a power 24711 * on or bus device reset has occurred; update 24712 * the reservation status. 24713 */ 24714 un->un_resvd_status |= 24715 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24716 SD_INFO(SD_LOG_IOCTL_MHD, un, 24717 "sd_mhd_watch_cb: Lost Reservation\n"); 24718 } 24719 } else { 24720 return (0); 24721 } 24722 } else { 24723 mutex_enter(SD_MUTEX(un)); 24724 } 24725 24726 if ((un->un_resvd_status & SD_RESERVE) && 24727 (un->un_resvd_status & SD_LOST_RESERVE)) { 24728 if (un->un_resvd_status & SD_WANT_RESERVE) { 24729 /* 24730 * A reset occurred in between the last probe and this 24731 * one so if a timeout is pending cancel it. 24732 */ 24733 if (un->un_resvd_timeid) { 24734 timeout_id_t temp_id = un->un_resvd_timeid; 24735 un->un_resvd_timeid = NULL; 24736 mutex_exit(SD_MUTEX(un)); 24737 (void) untimeout(temp_id); 24738 mutex_enter(SD_MUTEX(un)); 24739 } 24740 un->un_resvd_status &= ~SD_WANT_RESERVE; 24741 } 24742 if (un->un_resvd_timeid == 0) { 24743 /* Schedule a timeout to handle the lost reservation */ 24744 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24745 (void *)dev, 24746 drv_usectohz(sd_reinstate_resv_delay)); 24747 } 24748 } 24749 mutex_exit(SD_MUTEX(un)); 24750 return (0); 24751 } 24752 24753 24754 /* 24755 * Function: sd_mhd_watch_incomplete() 24756 * 24757 * Description: This function is used to find out why a scsi pkt sent by the 24758 * scsi watch facility was not completed. Under some scenarios this 24759 * routine will return. Otherwise it will send a bus reset to see 24760 * if the drive is still online. 24761 * 24762 * Arguments: un - driver soft state (unit) structure 24763 * pkt - incomplete scsi pkt 24764 */ 24765 24766 static void 24767 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24768 { 24769 int be_chatty; 24770 int perr; 24771 24772 ASSERT(pkt != NULL); 24773 ASSERT(un != NULL); 24774 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24775 perr = (pkt->pkt_statistics & STAT_PERR); 24776 24777 mutex_enter(SD_MUTEX(un)); 24778 if (un->un_state == SD_STATE_DUMPING) { 24779 mutex_exit(SD_MUTEX(un)); 24780 return; 24781 } 24782 24783 switch (pkt->pkt_reason) { 24784 case CMD_UNX_BUS_FREE: 24785 /* 24786 * If we had a parity error that caused the target to drop BSY*, 24787 * don't be chatty about it. 24788 */ 24789 if (perr && be_chatty) { 24790 be_chatty = 0; 24791 } 24792 break; 24793 case CMD_TAG_REJECT: 24794 /* 24795 * The SCSI-2 spec states that a tag reject will be sent by the 24796 * target if tagged queuing is not supported. A tag reject may 24797 * also be sent during certain initialization periods or to 24798 * control internal resources. For the latter case the target 24799 * may also return Queue Full. 24800 * 24801 * If this driver receives a tag reject from a target that is 24802 * going through an init period or controlling internal 24803 * resources tagged queuing will be disabled. This is a less 24804 * than optimal behavior but the driver is unable to determine 24805 * the target state and assumes tagged queueing is not supported 24806 */ 24807 pkt->pkt_flags = 0; 24808 un->un_tagflags = 0; 24809 24810 if (un->un_f_opt_queueing == TRUE) { 24811 un->un_throttle = min(un->un_throttle, 3); 24812 } else { 24813 un->un_throttle = 1; 24814 } 24815 mutex_exit(SD_MUTEX(un)); 24816 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24817 mutex_enter(SD_MUTEX(un)); 24818 break; 24819 case CMD_INCOMPLETE: 24820 /* 24821 * The transport stopped with an abnormal state, fallthrough and 24822 * reset the target and/or bus unless selection did not complete 24823 * (indicated by STATE_GOT_BUS) in which case we don't want to 24824 * go through a target/bus reset 24825 */ 24826 if (pkt->pkt_state == STATE_GOT_BUS) { 24827 break; 24828 } 24829 /*FALLTHROUGH*/ 24830 24831 case CMD_TIMEOUT: 24832 default: 24833 /* 24834 * The lun may still be running the command, so a lun reset 24835 * should be attempted. If the lun reset fails or cannot be 24836 * issued, than try a target reset. Lastly try a bus reset. 24837 */ 24838 if ((pkt->pkt_statistics & 24839 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24840 int reset_retval = 0; 24841 mutex_exit(SD_MUTEX(un)); 24842 if (un->un_f_allow_bus_device_reset == TRUE) { 24843 if (un->un_f_lun_reset_enabled == TRUE) { 24844 reset_retval = 24845 scsi_reset(SD_ADDRESS(un), 24846 RESET_LUN); 24847 } 24848 if (reset_retval == 0) { 24849 reset_retval = 24850 scsi_reset(SD_ADDRESS(un), 24851 RESET_TARGET); 24852 } 24853 } 24854 if (reset_retval == 0) { 24855 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24856 } 24857 mutex_enter(SD_MUTEX(un)); 24858 } 24859 break; 24860 } 24861 24862 /* A device/bus reset has occurred; update the reservation status. */ 24863 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24864 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24865 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24866 un->un_resvd_status |= 24867 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24868 SD_INFO(SD_LOG_IOCTL_MHD, un, 24869 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24870 } 24871 } 24872 24873 /* 24874 * The disk has been turned off; Update the device state. 24875 * 24876 * Note: Should we be offlining the disk here? 24877 */ 24878 if (pkt->pkt_state == STATE_GOT_BUS) { 24879 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24880 "Disk not responding to selection\n"); 24881 if (un->un_state != SD_STATE_OFFLINE) { 24882 New_state(un, SD_STATE_OFFLINE); 24883 } 24884 } else if (be_chatty) { 24885 /* 24886 * suppress messages if they are all the same pkt reason; 24887 * with TQ, many (up to 256) are returned with the same 24888 * pkt_reason 24889 */ 24890 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24891 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24892 "sd_mhd_watch_incomplete: " 24893 "SCSI transport failed: reason '%s'\n", 24894 scsi_rname(pkt->pkt_reason)); 24895 } 24896 } 24897 un->un_last_pkt_reason = pkt->pkt_reason; 24898 mutex_exit(SD_MUTEX(un)); 24899 } 24900 24901 24902 /* 24903 * Function: sd_sname() 24904 * 24905 * Description: This is a simple little routine to return a string containing 24906 * a printable description of command status byte for use in 24907 * logging. 24908 * 24909 * Arguments: status - pointer to a status byte 24910 * 24911 * Return Code: char * - string containing status description. 24912 */ 24913 24914 static char * 24915 sd_sname(uchar_t status) 24916 { 24917 switch (status & STATUS_MASK) { 24918 case STATUS_GOOD: 24919 return ("good status"); 24920 case STATUS_CHECK: 24921 return ("check condition"); 24922 case STATUS_MET: 24923 return ("condition met"); 24924 case STATUS_BUSY: 24925 return ("busy"); 24926 case STATUS_INTERMEDIATE: 24927 return ("intermediate"); 24928 case STATUS_INTERMEDIATE_MET: 24929 return ("intermediate - condition met"); 24930 case STATUS_RESERVATION_CONFLICT: 24931 return ("reservation_conflict"); 24932 case STATUS_TERMINATED: 24933 return ("command terminated"); 24934 case STATUS_QFULL: 24935 return ("queue full"); 24936 default: 24937 return ("<unknown status>"); 24938 } 24939 } 24940 24941 24942 /* 24943 * Function: sd_mhd_resvd_recover() 24944 * 24945 * Description: This function adds a reservation entry to the 24946 * sd_resv_reclaim_request list and signals the reservation 24947 * reclaim thread that there is work pending. If the reservation 24948 * reclaim thread has not been previously created this function 24949 * will kick it off. 24950 * 24951 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24952 * among multiple watches that share this callback function 24953 * 24954 * Context: This routine is called by timeout() and is run in interrupt 24955 * context. It must not sleep or call other functions which may 24956 * sleep. 24957 */ 24958 24959 static void 24960 sd_mhd_resvd_recover(void *arg) 24961 { 24962 dev_t dev = (dev_t)arg; 24963 struct sd_lun *un; 24964 struct sd_thr_request *sd_treq = NULL; 24965 struct sd_thr_request *sd_cur = NULL; 24966 struct sd_thr_request *sd_prev = NULL; 24967 int already_there = 0; 24968 24969 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24970 return; 24971 } 24972 24973 mutex_enter(SD_MUTEX(un)); 24974 un->un_resvd_timeid = NULL; 24975 if (un->un_resvd_status & SD_WANT_RESERVE) { 24976 /* 24977 * There was a reset so don't issue the reserve, allow the 24978 * sd_mhd_watch_cb callback function to notice this and 24979 * reschedule the timeout for reservation. 24980 */ 24981 mutex_exit(SD_MUTEX(un)); 24982 return; 24983 } 24984 mutex_exit(SD_MUTEX(un)); 24985 24986 /* 24987 * Add this device to the sd_resv_reclaim_request list and the 24988 * sd_resv_reclaim_thread should take care of the rest. 24989 * 24990 * Note: We can't sleep in this context so if the memory allocation 24991 * fails allow the sd_mhd_watch_cb callback function to notice this and 24992 * reschedule the timeout for reservation. (4378460) 24993 */ 24994 sd_treq = (struct sd_thr_request *) 24995 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24996 if (sd_treq == NULL) { 24997 return; 24998 } 24999 25000 sd_treq->sd_thr_req_next = NULL; 25001 sd_treq->dev = dev; 25002 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25003 if (sd_tr.srq_thr_req_head == NULL) { 25004 sd_tr.srq_thr_req_head = sd_treq; 25005 } else { 25006 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25007 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25008 if (sd_cur->dev == dev) { 25009 /* 25010 * already in Queue so don't log 25011 * another request for the device 25012 */ 25013 already_there = 1; 25014 break; 25015 } 25016 sd_prev = sd_cur; 25017 } 25018 if (!already_there) { 25019 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25020 "logging request for %lx\n", dev); 25021 sd_prev->sd_thr_req_next = sd_treq; 25022 } else { 25023 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25024 } 25025 } 25026 25027 /* 25028 * Create a kernel thread to do the reservation reclaim and free up this 25029 * thread. We cannot block this thread while we go away to do the 25030 * reservation reclaim 25031 */ 25032 if (sd_tr.srq_resv_reclaim_thread == NULL) 25033 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25034 sd_resv_reclaim_thread, NULL, 25035 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25036 25037 /* Tell the reservation reclaim thread that it has work to do */ 25038 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25039 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25040 } 25041 25042 /* 25043 * Function: sd_resv_reclaim_thread() 25044 * 25045 * Description: This function implements the reservation reclaim operations 25046 * 25047 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25048 * among multiple watches that share this callback function 25049 */ 25050 25051 static void 25052 sd_resv_reclaim_thread() 25053 { 25054 struct sd_lun *un; 25055 struct sd_thr_request *sd_mhreq; 25056 25057 /* Wait for work */ 25058 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25059 if (sd_tr.srq_thr_req_head == NULL) { 25060 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25061 &sd_tr.srq_resv_reclaim_mutex); 25062 } 25063 25064 /* Loop while we have work */ 25065 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25066 un = ddi_get_soft_state(sd_state, 25067 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25068 if (un == NULL) { 25069 /* 25070 * softstate structure is NULL so just 25071 * dequeue the request and continue 25072 */ 25073 sd_tr.srq_thr_req_head = 25074 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25075 kmem_free(sd_tr.srq_thr_cur_req, 25076 sizeof (struct sd_thr_request)); 25077 continue; 25078 } 25079 25080 /* dequeue the request */ 25081 sd_mhreq = sd_tr.srq_thr_cur_req; 25082 sd_tr.srq_thr_req_head = 25083 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25084 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25085 25086 /* 25087 * Reclaim reservation only if SD_RESERVE is still set. There 25088 * may have been a call to MHIOCRELEASE before we got here. 25089 */ 25090 mutex_enter(SD_MUTEX(un)); 25091 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25092 /* 25093 * Note: The SD_LOST_RESERVE flag is cleared before 25094 * reclaiming the reservation. If this is done after the 25095 * call to sd_reserve_release a reservation loss in the 25096 * window between pkt completion of reserve cmd and 25097 * mutex_enter below may not be recognized 25098 */ 25099 un->un_resvd_status &= ~SD_LOST_RESERVE; 25100 mutex_exit(SD_MUTEX(un)); 25101 25102 if (sd_reserve_release(sd_mhreq->dev, 25103 SD_RESERVE) == 0) { 25104 mutex_enter(SD_MUTEX(un)); 25105 un->un_resvd_status |= SD_RESERVE; 25106 mutex_exit(SD_MUTEX(un)); 25107 SD_INFO(SD_LOG_IOCTL_MHD, un, 25108 "sd_resv_reclaim_thread: " 25109 "Reservation Recovered\n"); 25110 } else { 25111 mutex_enter(SD_MUTEX(un)); 25112 un->un_resvd_status |= SD_LOST_RESERVE; 25113 mutex_exit(SD_MUTEX(un)); 25114 SD_INFO(SD_LOG_IOCTL_MHD, un, 25115 "sd_resv_reclaim_thread: Failed " 25116 "Reservation Recovery\n"); 25117 } 25118 } else { 25119 mutex_exit(SD_MUTEX(un)); 25120 } 25121 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25122 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25123 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25124 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25125 /* 25126 * wakeup the destroy thread if anyone is waiting on 25127 * us to complete. 25128 */ 25129 cv_signal(&sd_tr.srq_inprocess_cv); 25130 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25131 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25132 } 25133 25134 /* 25135 * cleanup the sd_tr structure now that this thread will not exist 25136 */ 25137 ASSERT(sd_tr.srq_thr_req_head == NULL); 25138 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25139 sd_tr.srq_resv_reclaim_thread = NULL; 25140 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25141 thread_exit(); 25142 } 25143 25144 25145 /* 25146 * Function: sd_rmv_resv_reclaim_req() 25147 * 25148 * Description: This function removes any pending reservation reclaim requests 25149 * for the specified device. 25150 * 25151 * Arguments: dev - the device 'dev_t' 25152 */ 25153 25154 static void 25155 sd_rmv_resv_reclaim_req(dev_t dev) 25156 { 25157 struct sd_thr_request *sd_mhreq; 25158 struct sd_thr_request *sd_prev; 25159 25160 /* Remove a reservation reclaim request from the list */ 25161 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25162 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25163 /* 25164 * We are attempting to reinstate reservation for 25165 * this device. We wait for sd_reserve_release() 25166 * to return before we return. 25167 */ 25168 cv_wait(&sd_tr.srq_inprocess_cv, 25169 &sd_tr.srq_resv_reclaim_mutex); 25170 } else { 25171 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25172 if (sd_mhreq && sd_mhreq->dev == dev) { 25173 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25174 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25175 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25176 return; 25177 } 25178 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25179 if (sd_mhreq && sd_mhreq->dev == dev) { 25180 break; 25181 } 25182 sd_prev = sd_mhreq; 25183 } 25184 if (sd_mhreq != NULL) { 25185 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25186 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25187 } 25188 } 25189 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25190 } 25191 25192 25193 /* 25194 * Function: sd_mhd_reset_notify_cb() 25195 * 25196 * Description: This is a call back function for scsi_reset_notify. This 25197 * function updates the softstate reserved status and logs the 25198 * reset. The driver scsi watch facility callback function 25199 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25200 * will reclaim the reservation. 25201 * 25202 * Arguments: arg - driver soft state (unit) structure 25203 */ 25204 25205 static void 25206 sd_mhd_reset_notify_cb(caddr_t arg) 25207 { 25208 struct sd_lun *un = (struct sd_lun *)arg; 25209 25210 mutex_enter(SD_MUTEX(un)); 25211 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25212 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25213 SD_INFO(SD_LOG_IOCTL_MHD, un, 25214 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25215 } 25216 mutex_exit(SD_MUTEX(un)); 25217 } 25218 25219 25220 /* 25221 * Function: sd_take_ownership() 25222 * 25223 * Description: This routine implements an algorithm to achieve a stable 25224 * reservation on disks which don't implement priority reserve, 25225 * and makes sure that other host lose re-reservation attempts. 25226 * This algorithm contains of a loop that keeps issuing the RESERVE 25227 * for some period of time (min_ownership_delay, default 6 seconds) 25228 * During that loop, it looks to see if there has been a bus device 25229 * reset or bus reset (both of which cause an existing reservation 25230 * to be lost). If the reservation is lost issue RESERVE until a 25231 * period of min_ownership_delay with no resets has gone by, or 25232 * until max_ownership_delay has expired. This loop ensures that 25233 * the host really did manage to reserve the device, in spite of 25234 * resets. The looping for min_ownership_delay (default six 25235 * seconds) is important to early generation clustering products, 25236 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25237 * MHIOCENFAILFAST periodic timer of two seconds. By having 25238 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25239 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25240 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25241 * have already noticed, via the MHIOCENFAILFAST polling, that it 25242 * no longer "owns" the disk and will have panicked itself. Thus, 25243 * the host issuing the MHIOCTKOWN is assured (with timing 25244 * dependencies) that by the time it actually starts to use the 25245 * disk for real work, the old owner is no longer accessing it. 25246 * 25247 * min_ownership_delay is the minimum amount of time for which the 25248 * disk must be reserved continuously devoid of resets before the 25249 * MHIOCTKOWN ioctl will return success. 25250 * 25251 * max_ownership_delay indicates the amount of time by which the 25252 * take ownership should succeed or timeout with an error. 25253 * 25254 * Arguments: dev - the device 'dev_t' 25255 * *p - struct containing timing info. 25256 * 25257 * Return Code: 0 for success or error code 25258 */ 25259 25260 static int 25261 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25262 { 25263 struct sd_lun *un; 25264 int rval; 25265 int err; 25266 int reservation_count = 0; 25267 int min_ownership_delay = 6000000; /* in usec */ 25268 int max_ownership_delay = 30000000; /* in usec */ 25269 clock_t start_time; /* starting time of this algorithm */ 25270 clock_t end_time; /* time limit for giving up */ 25271 clock_t ownership_time; /* time limit for stable ownership */ 25272 clock_t current_time; 25273 clock_t previous_current_time; 25274 25275 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25276 return (ENXIO); 25277 } 25278 25279 /* 25280 * Attempt a device reservation. A priority reservation is requested. 25281 */ 25282 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25283 != SD_SUCCESS) { 25284 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25285 "sd_take_ownership: return(1)=%d\n", rval); 25286 return (rval); 25287 } 25288 25289 /* Update the softstate reserved status to indicate the reservation */ 25290 mutex_enter(SD_MUTEX(un)); 25291 un->un_resvd_status |= SD_RESERVE; 25292 un->un_resvd_status &= 25293 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25294 mutex_exit(SD_MUTEX(un)); 25295 25296 if (p != NULL) { 25297 if (p->min_ownership_delay != 0) { 25298 min_ownership_delay = p->min_ownership_delay * 1000; 25299 } 25300 if (p->max_ownership_delay != 0) { 25301 max_ownership_delay = p->max_ownership_delay * 1000; 25302 } 25303 } 25304 SD_INFO(SD_LOG_IOCTL_MHD, un, 25305 "sd_take_ownership: min, max delays: %d, %d\n", 25306 min_ownership_delay, max_ownership_delay); 25307 25308 start_time = ddi_get_lbolt(); 25309 current_time = start_time; 25310 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25311 end_time = start_time + drv_usectohz(max_ownership_delay); 25312 25313 while (current_time - end_time < 0) { 25314 delay(drv_usectohz(500000)); 25315 25316 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25317 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25318 mutex_enter(SD_MUTEX(un)); 25319 rval = (un->un_resvd_status & 25320 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25321 mutex_exit(SD_MUTEX(un)); 25322 break; 25323 } 25324 } 25325 previous_current_time = current_time; 25326 current_time = ddi_get_lbolt(); 25327 mutex_enter(SD_MUTEX(un)); 25328 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25329 ownership_time = ddi_get_lbolt() + 25330 drv_usectohz(min_ownership_delay); 25331 reservation_count = 0; 25332 } else { 25333 reservation_count++; 25334 } 25335 un->un_resvd_status |= SD_RESERVE; 25336 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25337 mutex_exit(SD_MUTEX(un)); 25338 25339 SD_INFO(SD_LOG_IOCTL_MHD, un, 25340 "sd_take_ownership: ticks for loop iteration=%ld, " 25341 "reservation=%s\n", (current_time - previous_current_time), 25342 reservation_count ? "ok" : "reclaimed"); 25343 25344 if (current_time - ownership_time >= 0 && 25345 reservation_count >= 4) { 25346 rval = 0; /* Achieved a stable ownership */ 25347 break; 25348 } 25349 if (current_time - end_time >= 0) { 25350 rval = EACCES; /* No ownership in max possible time */ 25351 break; 25352 } 25353 } 25354 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25355 "sd_take_ownership: return(2)=%d\n", rval); 25356 return (rval); 25357 } 25358 25359 25360 /* 25361 * Function: sd_reserve_release() 25362 * 25363 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25364 * PRIORITY RESERVE commands based on a user specified command type 25365 * 25366 * Arguments: dev - the device 'dev_t' 25367 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25368 * SD_RESERVE, SD_RELEASE 25369 * 25370 * Return Code: 0 or Error Code 25371 */ 25372 25373 static int 25374 sd_reserve_release(dev_t dev, int cmd) 25375 { 25376 struct uscsi_cmd *com = NULL; 25377 struct sd_lun *un = NULL; 25378 char cdb[CDB_GROUP0]; 25379 int rval; 25380 25381 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25382 (cmd == SD_PRIORITY_RESERVE)); 25383 25384 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25385 return (ENXIO); 25386 } 25387 25388 /* instantiate and initialize the command and cdb */ 25389 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25390 bzero(cdb, CDB_GROUP0); 25391 com->uscsi_flags = USCSI_SILENT; 25392 com->uscsi_timeout = un->un_reserve_release_time; 25393 com->uscsi_cdblen = CDB_GROUP0; 25394 com->uscsi_cdb = cdb; 25395 if (cmd == SD_RELEASE) { 25396 cdb[0] = SCMD_RELEASE; 25397 } else { 25398 cdb[0] = SCMD_RESERVE; 25399 } 25400 25401 /* Send the command. */ 25402 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25403 UIO_SYSSPACE, SD_PATH_STANDARD); 25404 25405 /* 25406 * "break" a reservation that is held by another host, by issuing a 25407 * reset if priority reserve is desired, and we could not get the 25408 * device. 25409 */ 25410 if ((cmd == SD_PRIORITY_RESERVE) && 25411 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25412 /* 25413 * First try to reset the LUN. If we cannot, then try a target 25414 * reset, followed by a bus reset if the target reset fails. 25415 */ 25416 int reset_retval = 0; 25417 if (un->un_f_lun_reset_enabled == TRUE) { 25418 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25419 } 25420 if (reset_retval == 0) { 25421 /* The LUN reset either failed or was not issued */ 25422 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25423 } 25424 if ((reset_retval == 0) && 25425 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25426 rval = EIO; 25427 kmem_free(com, sizeof (*com)); 25428 return (rval); 25429 } 25430 25431 bzero(com, sizeof (struct uscsi_cmd)); 25432 com->uscsi_flags = USCSI_SILENT; 25433 com->uscsi_cdb = cdb; 25434 com->uscsi_cdblen = CDB_GROUP0; 25435 com->uscsi_timeout = 5; 25436 25437 /* 25438 * Reissue the last reserve command, this time without request 25439 * sense. Assume that it is just a regular reserve command. 25440 */ 25441 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25442 UIO_SYSSPACE, SD_PATH_STANDARD); 25443 } 25444 25445 /* Return an error if still getting a reservation conflict. */ 25446 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25447 rval = EACCES; 25448 } 25449 25450 kmem_free(com, sizeof (*com)); 25451 return (rval); 25452 } 25453 25454 25455 #define SD_NDUMP_RETRIES 12 25456 /* 25457 * System Crash Dump routine 25458 */ 25459 25460 static int 25461 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25462 { 25463 int instance; 25464 int partition; 25465 int i; 25466 int err; 25467 struct sd_lun *un; 25468 struct dk_map *lp; 25469 struct scsi_pkt *wr_pktp; 25470 struct buf *wr_bp; 25471 struct buf wr_buf; 25472 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25473 daddr_t tgt_blkno; /* rmw - blkno for target */ 25474 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25475 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25476 size_t io_start_offset; 25477 int doing_rmw = FALSE; 25478 int rval; 25479 #if defined(__i386) || defined(__amd64) 25480 ssize_t dma_resid; 25481 daddr_t oblkno; 25482 #endif 25483 25484 instance = SDUNIT(dev); 25485 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25486 (!un->un_f_geometry_is_valid) || ISCD(un)) { 25487 return (ENXIO); 25488 } 25489 25490 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25491 25492 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25493 25494 partition = SDPART(dev); 25495 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25496 25497 /* Validate blocks to dump at against partition size. */ 25498 lp = &un->un_map[partition]; 25499 if ((blkno + nblk) > lp->dkl_nblk) { 25500 SD_TRACE(SD_LOG_DUMP, un, 25501 "sddump: dump range larger than partition: " 25502 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25503 blkno, nblk, lp->dkl_nblk); 25504 return (EINVAL); 25505 } 25506 25507 mutex_enter(&un->un_pm_mutex); 25508 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25509 struct scsi_pkt *start_pktp; 25510 25511 mutex_exit(&un->un_pm_mutex); 25512 25513 /* 25514 * use pm framework to power on HBA 1st 25515 */ 25516 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25517 25518 /* 25519 * Dump no long uses sdpower to power on a device, it's 25520 * in-line here so it can be done in polled mode. 25521 */ 25522 25523 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25524 25525 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25526 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25527 25528 if (start_pktp == NULL) { 25529 /* We were not given a SCSI packet, fail. */ 25530 return (EIO); 25531 } 25532 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25533 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25534 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25535 start_pktp->pkt_flags = FLAG_NOINTR; 25536 25537 mutex_enter(SD_MUTEX(un)); 25538 SD_FILL_SCSI1_LUN(un, start_pktp); 25539 mutex_exit(SD_MUTEX(un)); 25540 /* 25541 * Scsi_poll returns 0 (success) if the command completes and 25542 * the status block is STATUS_GOOD. 25543 */ 25544 if (sd_scsi_poll(un, start_pktp) != 0) { 25545 scsi_destroy_pkt(start_pktp); 25546 return (EIO); 25547 } 25548 scsi_destroy_pkt(start_pktp); 25549 (void) sd_ddi_pm_resume(un); 25550 } else { 25551 mutex_exit(&un->un_pm_mutex); 25552 } 25553 25554 mutex_enter(SD_MUTEX(un)); 25555 un->un_throttle = 0; 25556 25557 /* 25558 * The first time through, reset the specific target device. 25559 * However, when cpr calls sddump we know that sd is in a 25560 * a good state so no bus reset is required. 25561 * Clear sense data via Request Sense cmd. 25562 * In sddump we don't care about allow_bus_device_reset anymore 25563 */ 25564 25565 if ((un->un_state != SD_STATE_SUSPENDED) && 25566 (un->un_state != SD_STATE_DUMPING)) { 25567 25568 New_state(un, SD_STATE_DUMPING); 25569 25570 if (un->un_f_is_fibre == FALSE) { 25571 mutex_exit(SD_MUTEX(un)); 25572 /* 25573 * Attempt a bus reset for parallel scsi. 25574 * 25575 * Note: A bus reset is required because on some host 25576 * systems (i.e. E420R) a bus device reset is 25577 * insufficient to reset the state of the target. 25578 * 25579 * Note: Don't issue the reset for fibre-channel, 25580 * because this tends to hang the bus (loop) for 25581 * too long while everyone is logging out and in 25582 * and the deadman timer for dumping will fire 25583 * before the dump is complete. 25584 */ 25585 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25586 mutex_enter(SD_MUTEX(un)); 25587 Restore_state(un); 25588 mutex_exit(SD_MUTEX(un)); 25589 return (EIO); 25590 } 25591 25592 /* Delay to give the device some recovery time. */ 25593 drv_usecwait(10000); 25594 25595 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25596 SD_INFO(SD_LOG_DUMP, un, 25597 "sddump: sd_send_polled_RQS failed\n"); 25598 } 25599 mutex_enter(SD_MUTEX(un)); 25600 } 25601 } 25602 25603 /* 25604 * Convert the partition-relative block number to a 25605 * disk physical block number. 25606 */ 25607 blkno += un->un_offset[partition]; 25608 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25609 25610 25611 /* 25612 * Check if the device has a non-512 block size. 25613 */ 25614 wr_bp = NULL; 25615 if (NOT_DEVBSIZE(un)) { 25616 tgt_byte_offset = blkno * un->un_sys_blocksize; 25617 tgt_byte_count = nblk * un->un_sys_blocksize; 25618 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25619 (tgt_byte_count % un->un_tgt_blocksize)) { 25620 doing_rmw = TRUE; 25621 /* 25622 * Calculate the block number and number of block 25623 * in terms of the media block size. 25624 */ 25625 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25626 tgt_nblk = 25627 ((tgt_byte_offset + tgt_byte_count + 25628 (un->un_tgt_blocksize - 1)) / 25629 un->un_tgt_blocksize) - tgt_blkno; 25630 25631 /* 25632 * Invoke the routine which is going to do read part 25633 * of read-modify-write. 25634 * Note that this routine returns a pointer to 25635 * a valid bp in wr_bp. 25636 */ 25637 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25638 &wr_bp); 25639 if (err) { 25640 mutex_exit(SD_MUTEX(un)); 25641 return (err); 25642 } 25643 /* 25644 * Offset is being calculated as - 25645 * (original block # * system block size) - 25646 * (new block # * target block size) 25647 */ 25648 io_start_offset = 25649 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25650 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25651 25652 ASSERT((io_start_offset >= 0) && 25653 (io_start_offset < un->un_tgt_blocksize)); 25654 /* 25655 * Do the modify portion of read modify write. 25656 */ 25657 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25658 (size_t)nblk * un->un_sys_blocksize); 25659 } else { 25660 doing_rmw = FALSE; 25661 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25662 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25663 } 25664 25665 /* Convert blkno and nblk to target blocks */ 25666 blkno = tgt_blkno; 25667 nblk = tgt_nblk; 25668 } else { 25669 wr_bp = &wr_buf; 25670 bzero(wr_bp, sizeof (struct buf)); 25671 wr_bp->b_flags = B_BUSY; 25672 wr_bp->b_un.b_addr = addr; 25673 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25674 wr_bp->b_resid = 0; 25675 } 25676 25677 mutex_exit(SD_MUTEX(un)); 25678 25679 /* 25680 * Obtain a SCSI packet for the write command. 25681 * It should be safe to call the allocator here without 25682 * worrying about being locked for DVMA mapping because 25683 * the address we're passed is already a DVMA mapping 25684 * 25685 * We are also not going to worry about semaphore ownership 25686 * in the dump buffer. Dumping is single threaded at present. 25687 */ 25688 25689 wr_pktp = NULL; 25690 25691 #if defined(__i386) || defined(__amd64) 25692 dma_resid = wr_bp->b_bcount; 25693 oblkno = blkno; 25694 while (dma_resid != 0) { 25695 #endif 25696 25697 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25698 wr_bp->b_flags &= ~B_ERROR; 25699 25700 #if defined(__i386) || defined(__amd64) 25701 blkno = oblkno + 25702 ((wr_bp->b_bcount - dma_resid) / 25703 un->un_tgt_blocksize); 25704 nblk = dma_resid / un->un_tgt_blocksize; 25705 25706 if (wr_pktp) { 25707 /* Partial DMA transfers after initial transfer */ 25708 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25709 blkno, nblk); 25710 } else { 25711 /* Initial transfer */ 25712 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25713 un->un_pkt_flags, NULL_FUNC, NULL, 25714 blkno, nblk); 25715 } 25716 #else 25717 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25718 0, NULL_FUNC, NULL, blkno, nblk); 25719 #endif 25720 25721 if (rval == 0) { 25722 /* We were given a SCSI packet, continue. */ 25723 break; 25724 } 25725 25726 if (i == 0) { 25727 if (wr_bp->b_flags & B_ERROR) { 25728 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25729 "no resources for dumping; " 25730 "error code: 0x%x, retrying", 25731 geterror(wr_bp)); 25732 } else { 25733 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25734 "no resources for dumping; retrying"); 25735 } 25736 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25737 if (wr_bp->b_flags & B_ERROR) { 25738 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25739 "no resources for dumping; error code: " 25740 "0x%x, retrying\n", geterror(wr_bp)); 25741 } 25742 } else { 25743 if (wr_bp->b_flags & B_ERROR) { 25744 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25745 "no resources for dumping; " 25746 "error code: 0x%x, retries failed, " 25747 "giving up.\n", geterror(wr_bp)); 25748 } else { 25749 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25750 "no resources for dumping; " 25751 "retries failed, giving up.\n"); 25752 } 25753 mutex_enter(SD_MUTEX(un)); 25754 Restore_state(un); 25755 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25756 mutex_exit(SD_MUTEX(un)); 25757 scsi_free_consistent_buf(wr_bp); 25758 } else { 25759 mutex_exit(SD_MUTEX(un)); 25760 } 25761 return (EIO); 25762 } 25763 drv_usecwait(10000); 25764 } 25765 25766 #if defined(__i386) || defined(__amd64) 25767 /* 25768 * save the resid from PARTIAL_DMA 25769 */ 25770 dma_resid = wr_pktp->pkt_resid; 25771 if (dma_resid != 0) 25772 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25773 wr_pktp->pkt_resid = 0; 25774 #endif 25775 25776 /* SunBug 1222170 */ 25777 wr_pktp->pkt_flags = FLAG_NOINTR; 25778 25779 err = EIO; 25780 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25781 25782 /* 25783 * Scsi_poll returns 0 (success) if the command completes and 25784 * the status block is STATUS_GOOD. We should only check 25785 * errors if this condition is not true. Even then we should 25786 * send our own request sense packet only if we have a check 25787 * condition and auto request sense has not been performed by 25788 * the hba. 25789 */ 25790 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25791 25792 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25793 (wr_pktp->pkt_resid == 0)) { 25794 err = SD_SUCCESS; 25795 break; 25796 } 25797 25798 /* 25799 * Check CMD_DEV_GONE 1st, give up if device is gone. 25800 */ 25801 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25802 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25803 "Device is gone\n"); 25804 break; 25805 } 25806 25807 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25808 SD_INFO(SD_LOG_DUMP, un, 25809 "sddump: write failed with CHECK, try # %d\n", i); 25810 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25811 (void) sd_send_polled_RQS(un); 25812 } 25813 25814 continue; 25815 } 25816 25817 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25818 int reset_retval = 0; 25819 25820 SD_INFO(SD_LOG_DUMP, un, 25821 "sddump: write failed with BUSY, try # %d\n", i); 25822 25823 if (un->un_f_lun_reset_enabled == TRUE) { 25824 reset_retval = scsi_reset(SD_ADDRESS(un), 25825 RESET_LUN); 25826 } 25827 if (reset_retval == 0) { 25828 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25829 } 25830 (void) sd_send_polled_RQS(un); 25831 25832 } else { 25833 SD_INFO(SD_LOG_DUMP, un, 25834 "sddump: write failed with 0x%x, try # %d\n", 25835 SD_GET_PKT_STATUS(wr_pktp), i); 25836 mutex_enter(SD_MUTEX(un)); 25837 sd_reset_target(un, wr_pktp); 25838 mutex_exit(SD_MUTEX(un)); 25839 } 25840 25841 /* 25842 * If we are not getting anywhere with lun/target resets, 25843 * let's reset the bus. 25844 */ 25845 if (i == SD_NDUMP_RETRIES/2) { 25846 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25847 (void) sd_send_polled_RQS(un); 25848 } 25849 25850 } 25851 #if defined(__i386) || defined(__amd64) 25852 } /* dma_resid */ 25853 #endif 25854 25855 scsi_destroy_pkt(wr_pktp); 25856 mutex_enter(SD_MUTEX(un)); 25857 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25858 mutex_exit(SD_MUTEX(un)); 25859 scsi_free_consistent_buf(wr_bp); 25860 } else { 25861 mutex_exit(SD_MUTEX(un)); 25862 } 25863 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25864 return (err); 25865 } 25866 25867 /* 25868 * Function: sd_scsi_poll() 25869 * 25870 * Description: This is a wrapper for the scsi_poll call. 25871 * 25872 * Arguments: sd_lun - The unit structure 25873 * scsi_pkt - The scsi packet being sent to the device. 25874 * 25875 * Return Code: 0 - Command completed successfully with good status 25876 * -1 - Command failed. This could indicate a check condition 25877 * or other status value requiring recovery action. 25878 * 25879 */ 25880 25881 static int 25882 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25883 { 25884 int status; 25885 25886 ASSERT(un != NULL); 25887 ASSERT(!mutex_owned(SD_MUTEX(un))); 25888 ASSERT(pktp != NULL); 25889 25890 status = SD_SUCCESS; 25891 25892 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25893 pktp->pkt_flags |= un->un_tagflags; 25894 pktp->pkt_flags &= ~FLAG_NODISCON; 25895 } 25896 25897 status = sd_ddi_scsi_poll(pktp); 25898 /* 25899 * Scsi_poll returns 0 (success) if the command completes and the 25900 * status block is STATUS_GOOD. We should only check errors if this 25901 * condition is not true. Even then we should send our own request 25902 * sense packet only if we have a check condition and auto 25903 * request sense has not been performed by the hba. 25904 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25905 */ 25906 if ((status != SD_SUCCESS) && 25907 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25908 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25909 (pktp->pkt_reason != CMD_DEV_GONE)) 25910 (void) sd_send_polled_RQS(un); 25911 25912 return (status); 25913 } 25914 25915 /* 25916 * Function: sd_send_polled_RQS() 25917 * 25918 * Description: This sends the request sense command to a device. 25919 * 25920 * Arguments: sd_lun - The unit structure 25921 * 25922 * Return Code: 0 - Command completed successfully with good status 25923 * -1 - Command failed. 25924 * 25925 */ 25926 25927 static int 25928 sd_send_polled_RQS(struct sd_lun *un) 25929 { 25930 int ret_val; 25931 struct scsi_pkt *rqs_pktp; 25932 struct buf *rqs_bp; 25933 25934 ASSERT(un != NULL); 25935 ASSERT(!mutex_owned(SD_MUTEX(un))); 25936 25937 ret_val = SD_SUCCESS; 25938 25939 rqs_pktp = un->un_rqs_pktp; 25940 rqs_bp = un->un_rqs_bp; 25941 25942 mutex_enter(SD_MUTEX(un)); 25943 25944 if (un->un_sense_isbusy) { 25945 ret_val = SD_FAILURE; 25946 mutex_exit(SD_MUTEX(un)); 25947 return (ret_val); 25948 } 25949 25950 /* 25951 * If the request sense buffer (and packet) is not in use, 25952 * let's set the un_sense_isbusy and send our packet 25953 */ 25954 un->un_sense_isbusy = 1; 25955 rqs_pktp->pkt_resid = 0; 25956 rqs_pktp->pkt_reason = 0; 25957 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25958 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25959 25960 mutex_exit(SD_MUTEX(un)); 25961 25962 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25963 " 0x%p\n", rqs_bp->b_un.b_addr); 25964 25965 /* 25966 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25967 * axle - it has a call into us! 25968 */ 25969 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25970 SD_INFO(SD_LOG_COMMON, un, 25971 "sd_send_polled_RQS: RQS failed\n"); 25972 } 25973 25974 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25975 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25976 25977 mutex_enter(SD_MUTEX(un)); 25978 un->un_sense_isbusy = 0; 25979 mutex_exit(SD_MUTEX(un)); 25980 25981 return (ret_val); 25982 } 25983 25984 /* 25985 * Defines needed for localized version of the scsi_poll routine. 25986 */ 25987 #define SD_CSEC 10000 /* usecs */ 25988 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 25989 25990 25991 /* 25992 * Function: sd_ddi_scsi_poll() 25993 * 25994 * Description: Localized version of the scsi_poll routine. The purpose is to 25995 * send a scsi_pkt to a device as a polled command. This version 25996 * is to ensure more robust handling of transport errors. 25997 * Specifically this routine cures not ready, coming ready 25998 * transition for power up and reset of sonoma's. This can take 25999 * up to 45 seconds for power-on and 20 seconds for reset of a 26000 * sonoma lun. 26001 * 26002 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26003 * 26004 * Return Code: 0 - Command completed successfully with good status 26005 * -1 - Command failed. 26006 * 26007 */ 26008 26009 static int 26010 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26011 { 26012 int busy_count; 26013 int timeout; 26014 int rval = SD_FAILURE; 26015 int savef; 26016 struct scsi_extended_sense *sensep; 26017 long savet; 26018 void (*savec)(); 26019 /* 26020 * The following is defined in machdep.c and is used in determining if 26021 * the scsi transport system will do polled I/O instead of interrupt 26022 * I/O when called from xx_dump(). 26023 */ 26024 extern int do_polled_io; 26025 26026 /* 26027 * save old flags in pkt, to restore at end 26028 */ 26029 savef = pkt->pkt_flags; 26030 savec = pkt->pkt_comp; 26031 savet = pkt->pkt_time; 26032 26033 pkt->pkt_flags |= FLAG_NOINTR; 26034 26035 /* 26036 * XXX there is nothing in the SCSA spec that states that we should not 26037 * do a callback for polled cmds; however, removing this will break sd 26038 * and probably other target drivers 26039 */ 26040 pkt->pkt_comp = NULL; 26041 26042 /* 26043 * we don't like a polled command without timeout. 26044 * 60 seconds seems long enough. 26045 */ 26046 if (pkt->pkt_time == 0) { 26047 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26048 } 26049 26050 /* 26051 * Send polled cmd. 26052 * 26053 * We do some error recovery for various errors. Tran_busy, 26054 * queue full, and non-dispatched commands are retried every 10 msec. 26055 * as they are typically transient failures. Busy status and Not 26056 * Ready are retried every second as this status takes a while to 26057 * change. Unit attention is retried for pkt_time (60) times 26058 * with no delay. 26059 */ 26060 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 26061 26062 for (busy_count = 0; busy_count < timeout; busy_count++) { 26063 int rc; 26064 int poll_delay; 26065 26066 /* 26067 * Initialize pkt status variables. 26068 */ 26069 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26070 26071 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26072 if (rc != TRAN_BUSY) { 26073 /* Transport failed - give up. */ 26074 break; 26075 } else { 26076 /* Transport busy - try again. */ 26077 poll_delay = 1 * SD_CSEC; /* 10 msec */ 26078 } 26079 } else { 26080 /* 26081 * Transport accepted - check pkt status. 26082 */ 26083 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26084 if (pkt->pkt_reason == CMD_CMPLT && 26085 rc == STATUS_CHECK && 26086 pkt->pkt_state & STATE_ARQ_DONE) { 26087 struct scsi_arq_status *arqstat = 26088 (struct scsi_arq_status *)(pkt->pkt_scbp); 26089 26090 sensep = &arqstat->sts_sensedata; 26091 } else { 26092 sensep = NULL; 26093 } 26094 26095 if ((pkt->pkt_reason == CMD_CMPLT) && 26096 (rc == STATUS_GOOD)) { 26097 /* No error - we're done */ 26098 rval = SD_SUCCESS; 26099 break; 26100 26101 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26102 /* Lost connection - give up */ 26103 break; 26104 26105 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26106 (pkt->pkt_state == 0)) { 26107 /* Pkt not dispatched - try again. */ 26108 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26109 26110 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26111 (rc == STATUS_QFULL)) { 26112 /* Queue full - try again. */ 26113 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26114 26115 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26116 (rc == STATUS_BUSY)) { 26117 /* Busy - try again. */ 26118 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26119 busy_count += (SD_SEC_TO_CSEC - 1); 26120 26121 } else if ((sensep != NULL) && 26122 (sensep->es_key == KEY_UNIT_ATTENTION)) { 26123 /* Unit Attention - try again */ 26124 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 26125 continue; 26126 26127 } else if ((sensep != NULL) && 26128 (sensep->es_key == KEY_NOT_READY) && 26129 (sensep->es_add_code == 0x04) && 26130 (sensep->es_qual_code == 0x01)) { 26131 /* Not ready -> ready - try again. */ 26132 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26133 busy_count += (SD_SEC_TO_CSEC - 1); 26134 26135 } else { 26136 /* BAD status - give up. */ 26137 break; 26138 } 26139 } 26140 26141 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 26142 !do_polled_io) { 26143 delay(drv_usectohz(poll_delay)); 26144 } else { 26145 /* we busy wait during cpr_dump or interrupt threads */ 26146 drv_usecwait(poll_delay); 26147 } 26148 } 26149 26150 pkt->pkt_flags = savef; 26151 pkt->pkt_comp = savec; 26152 pkt->pkt_time = savet; 26153 return (rval); 26154 } 26155 26156 26157 /* 26158 * Function: sd_persistent_reservation_in_read_keys 26159 * 26160 * Description: This routine is the driver entry point for handling CD-ROM 26161 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26162 * by sending the SCSI-3 PRIN commands to the device. 26163 * Processes the read keys command response by copying the 26164 * reservation key information into the user provided buffer. 26165 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26166 * 26167 * Arguments: un - Pointer to soft state struct for the target. 26168 * usrp - user provided pointer to multihost Persistent In Read 26169 * Keys structure (mhioc_inkeys_t) 26170 * flag - this argument is a pass through to ddi_copyxxx() 26171 * directly from the mode argument of ioctl(). 26172 * 26173 * Return Code: 0 - Success 26174 * EACCES 26175 * ENOTSUP 26176 * errno return code from sd_send_scsi_cmd() 26177 * 26178 * Context: Can sleep. Does not return until command is completed. 26179 */ 26180 26181 static int 26182 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26183 mhioc_inkeys_t *usrp, int flag) 26184 { 26185 #ifdef _MULTI_DATAMODEL 26186 struct mhioc_key_list32 li32; 26187 #endif 26188 sd_prin_readkeys_t *in; 26189 mhioc_inkeys_t *ptr; 26190 mhioc_key_list_t li; 26191 uchar_t *data_bufp; 26192 int data_len; 26193 int rval; 26194 size_t copysz; 26195 26196 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26197 return (EINVAL); 26198 } 26199 bzero(&li, sizeof (mhioc_key_list_t)); 26200 26201 /* 26202 * Get the listsize from user 26203 */ 26204 #ifdef _MULTI_DATAMODEL 26205 26206 switch (ddi_model_convert_from(flag & FMODELS)) { 26207 case DDI_MODEL_ILP32: 26208 copysz = sizeof (struct mhioc_key_list32); 26209 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26210 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26211 "sd_persistent_reservation_in_read_keys: " 26212 "failed ddi_copyin: mhioc_key_list32_t\n"); 26213 rval = EFAULT; 26214 goto done; 26215 } 26216 li.listsize = li32.listsize; 26217 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26218 break; 26219 26220 case DDI_MODEL_NONE: 26221 copysz = sizeof (mhioc_key_list_t); 26222 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26223 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26224 "sd_persistent_reservation_in_read_keys: " 26225 "failed ddi_copyin: mhioc_key_list_t\n"); 26226 rval = EFAULT; 26227 goto done; 26228 } 26229 break; 26230 } 26231 26232 #else /* ! _MULTI_DATAMODEL */ 26233 copysz = sizeof (mhioc_key_list_t); 26234 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26235 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26236 "sd_persistent_reservation_in_read_keys: " 26237 "failed ddi_copyin: mhioc_key_list_t\n"); 26238 rval = EFAULT; 26239 goto done; 26240 } 26241 #endif 26242 26243 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26244 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26245 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26246 26247 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 26248 data_len, data_bufp)) != 0) { 26249 goto done; 26250 } 26251 in = (sd_prin_readkeys_t *)data_bufp; 26252 ptr->generation = BE_32(in->generation); 26253 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26254 26255 /* 26256 * Return the min(listsize, listlen) keys 26257 */ 26258 #ifdef _MULTI_DATAMODEL 26259 26260 switch (ddi_model_convert_from(flag & FMODELS)) { 26261 case DDI_MODEL_ILP32: 26262 li32.listlen = li.listlen; 26263 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26264 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26265 "sd_persistent_reservation_in_read_keys: " 26266 "failed ddi_copyout: mhioc_key_list32_t\n"); 26267 rval = EFAULT; 26268 goto done; 26269 } 26270 break; 26271 26272 case DDI_MODEL_NONE: 26273 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26274 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26275 "sd_persistent_reservation_in_read_keys: " 26276 "failed ddi_copyout: mhioc_key_list_t\n"); 26277 rval = EFAULT; 26278 goto done; 26279 } 26280 break; 26281 } 26282 26283 #else /* ! _MULTI_DATAMODEL */ 26284 26285 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26286 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26287 "sd_persistent_reservation_in_read_keys: " 26288 "failed ddi_copyout: mhioc_key_list_t\n"); 26289 rval = EFAULT; 26290 goto done; 26291 } 26292 26293 #endif /* _MULTI_DATAMODEL */ 26294 26295 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26296 li.listsize * MHIOC_RESV_KEY_SIZE); 26297 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26298 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26299 "sd_persistent_reservation_in_read_keys: " 26300 "failed ddi_copyout: keylist\n"); 26301 rval = EFAULT; 26302 } 26303 done: 26304 kmem_free(data_bufp, data_len); 26305 return (rval); 26306 } 26307 26308 26309 /* 26310 * Function: sd_persistent_reservation_in_read_resv 26311 * 26312 * Description: This routine is the driver entry point for handling CD-ROM 26313 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26314 * by sending the SCSI-3 PRIN commands to the device. 26315 * Process the read persistent reservations command response by 26316 * copying the reservation information into the user provided 26317 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26318 * 26319 * Arguments: un - Pointer to soft state struct for the target. 26320 * usrp - user provided pointer to multihost Persistent In Read 26321 * Keys structure (mhioc_inkeys_t) 26322 * flag - this argument is a pass through to ddi_copyxxx() 26323 * directly from the mode argument of ioctl(). 26324 * 26325 * Return Code: 0 - Success 26326 * EACCES 26327 * ENOTSUP 26328 * errno return code from sd_send_scsi_cmd() 26329 * 26330 * Context: Can sleep. Does not return until command is completed. 26331 */ 26332 26333 static int 26334 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26335 mhioc_inresvs_t *usrp, int flag) 26336 { 26337 #ifdef _MULTI_DATAMODEL 26338 struct mhioc_resv_desc_list32 resvlist32; 26339 #endif 26340 sd_prin_readresv_t *in; 26341 mhioc_inresvs_t *ptr; 26342 sd_readresv_desc_t *readresv_ptr; 26343 mhioc_resv_desc_list_t resvlist; 26344 mhioc_resv_desc_t resvdesc; 26345 uchar_t *data_bufp; 26346 int data_len; 26347 int rval; 26348 int i; 26349 size_t copysz; 26350 mhioc_resv_desc_t *bufp; 26351 26352 if ((ptr = usrp) == NULL) { 26353 return (EINVAL); 26354 } 26355 26356 /* 26357 * Get the listsize from user 26358 */ 26359 #ifdef _MULTI_DATAMODEL 26360 switch (ddi_model_convert_from(flag & FMODELS)) { 26361 case DDI_MODEL_ILP32: 26362 copysz = sizeof (struct mhioc_resv_desc_list32); 26363 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26364 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26365 "sd_persistent_reservation_in_read_resv: " 26366 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26367 rval = EFAULT; 26368 goto done; 26369 } 26370 resvlist.listsize = resvlist32.listsize; 26371 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26372 break; 26373 26374 case DDI_MODEL_NONE: 26375 copysz = sizeof (mhioc_resv_desc_list_t); 26376 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26377 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26378 "sd_persistent_reservation_in_read_resv: " 26379 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26380 rval = EFAULT; 26381 goto done; 26382 } 26383 break; 26384 } 26385 #else /* ! _MULTI_DATAMODEL */ 26386 copysz = sizeof (mhioc_resv_desc_list_t); 26387 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26388 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26389 "sd_persistent_reservation_in_read_resv: " 26390 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26391 rval = EFAULT; 26392 goto done; 26393 } 26394 #endif /* ! _MULTI_DATAMODEL */ 26395 26396 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26397 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26398 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26399 26400 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 26401 data_len, data_bufp)) != 0) { 26402 goto done; 26403 } 26404 in = (sd_prin_readresv_t *)data_bufp; 26405 ptr->generation = BE_32(in->generation); 26406 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26407 26408 /* 26409 * Return the min(listsize, listlen( keys 26410 */ 26411 #ifdef _MULTI_DATAMODEL 26412 26413 switch (ddi_model_convert_from(flag & FMODELS)) { 26414 case DDI_MODEL_ILP32: 26415 resvlist32.listlen = resvlist.listlen; 26416 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26417 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26418 "sd_persistent_reservation_in_read_resv: " 26419 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26420 rval = EFAULT; 26421 goto done; 26422 } 26423 break; 26424 26425 case DDI_MODEL_NONE: 26426 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26427 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26428 "sd_persistent_reservation_in_read_resv: " 26429 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26430 rval = EFAULT; 26431 goto done; 26432 } 26433 break; 26434 } 26435 26436 #else /* ! _MULTI_DATAMODEL */ 26437 26438 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26439 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26440 "sd_persistent_reservation_in_read_resv: " 26441 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26442 rval = EFAULT; 26443 goto done; 26444 } 26445 26446 #endif /* ! _MULTI_DATAMODEL */ 26447 26448 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26449 bufp = resvlist.list; 26450 copysz = sizeof (mhioc_resv_desc_t); 26451 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26452 i++, readresv_ptr++, bufp++) { 26453 26454 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26455 MHIOC_RESV_KEY_SIZE); 26456 resvdesc.type = readresv_ptr->type; 26457 resvdesc.scope = readresv_ptr->scope; 26458 resvdesc.scope_specific_addr = 26459 BE_32(readresv_ptr->scope_specific_addr); 26460 26461 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26462 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26463 "sd_persistent_reservation_in_read_resv: " 26464 "failed ddi_copyout: resvlist\n"); 26465 rval = EFAULT; 26466 goto done; 26467 } 26468 } 26469 done: 26470 kmem_free(data_bufp, data_len); 26471 return (rval); 26472 } 26473 26474 26475 /* 26476 * Function: sr_change_blkmode() 26477 * 26478 * Description: This routine is the driver entry point for handling CD-ROM 26479 * block mode ioctl requests. Support for returning and changing 26480 * the current block size in use by the device is implemented. The 26481 * LBA size is changed via a MODE SELECT Block Descriptor. 26482 * 26483 * This routine issues a mode sense with an allocation length of 26484 * 12 bytes for the mode page header and a single block descriptor. 26485 * 26486 * Arguments: dev - the device 'dev_t' 26487 * cmd - the request type; one of CDROMGBLKMODE (get) or 26488 * CDROMSBLKMODE (set) 26489 * data - current block size or requested block size 26490 * flag - this argument is a pass through to ddi_copyxxx() directly 26491 * from the mode argument of ioctl(). 26492 * 26493 * Return Code: the code returned by sd_send_scsi_cmd() 26494 * EINVAL if invalid arguments are provided 26495 * EFAULT if ddi_copyxxx() fails 26496 * ENXIO if fail ddi_get_soft_state 26497 * EIO if invalid mode sense block descriptor length 26498 * 26499 */ 26500 26501 static int 26502 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26503 { 26504 struct sd_lun *un = NULL; 26505 struct mode_header *sense_mhp, *select_mhp; 26506 struct block_descriptor *sense_desc, *select_desc; 26507 int current_bsize; 26508 int rval = EINVAL; 26509 uchar_t *sense = NULL; 26510 uchar_t *select = NULL; 26511 26512 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26513 26514 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26515 return (ENXIO); 26516 } 26517 26518 /* 26519 * The block length is changed via the Mode Select block descriptor, the 26520 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26521 * required as part of this routine. Therefore the mode sense allocation 26522 * length is specified to be the length of a mode page header and a 26523 * block descriptor. 26524 */ 26525 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26526 26527 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26528 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 26529 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26530 "sr_change_blkmode: Mode Sense Failed\n"); 26531 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26532 return (rval); 26533 } 26534 26535 /* Check the block descriptor len to handle only 1 block descriptor */ 26536 sense_mhp = (struct mode_header *)sense; 26537 if ((sense_mhp->bdesc_length == 0) || 26538 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26539 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26540 "sr_change_blkmode: Mode Sense returned invalid block" 26541 " descriptor length\n"); 26542 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26543 return (EIO); 26544 } 26545 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26546 current_bsize = ((sense_desc->blksize_hi << 16) | 26547 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26548 26549 /* Process command */ 26550 switch (cmd) { 26551 case CDROMGBLKMODE: 26552 /* Return the block size obtained during the mode sense */ 26553 if (ddi_copyout(¤t_bsize, (void *)data, 26554 sizeof (int), flag) != 0) 26555 rval = EFAULT; 26556 break; 26557 case CDROMSBLKMODE: 26558 /* Validate the requested block size */ 26559 switch (data) { 26560 case CDROM_BLK_512: 26561 case CDROM_BLK_1024: 26562 case CDROM_BLK_2048: 26563 case CDROM_BLK_2056: 26564 case CDROM_BLK_2336: 26565 case CDROM_BLK_2340: 26566 case CDROM_BLK_2352: 26567 case CDROM_BLK_2368: 26568 case CDROM_BLK_2448: 26569 case CDROM_BLK_2646: 26570 case CDROM_BLK_2647: 26571 break; 26572 default: 26573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26574 "sr_change_blkmode: " 26575 "Block Size '%ld' Not Supported\n", data); 26576 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26577 return (EINVAL); 26578 } 26579 26580 /* 26581 * The current block size matches the requested block size so 26582 * there is no need to send the mode select to change the size 26583 */ 26584 if (current_bsize == data) { 26585 break; 26586 } 26587 26588 /* Build the select data for the requested block size */ 26589 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26590 select_mhp = (struct mode_header *)select; 26591 select_desc = 26592 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26593 /* 26594 * The LBA size is changed via the block descriptor, so the 26595 * descriptor is built according to the user data 26596 */ 26597 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26598 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26599 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26600 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26601 26602 /* Send the mode select for the requested block size */ 26603 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26604 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26605 SD_PATH_STANDARD)) != 0) { 26606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26607 "sr_change_blkmode: Mode Select Failed\n"); 26608 /* 26609 * The mode select failed for the requested block size, 26610 * so reset the data for the original block size and 26611 * send it to the target. The error is indicated by the 26612 * return value for the failed mode select. 26613 */ 26614 select_desc->blksize_hi = sense_desc->blksize_hi; 26615 select_desc->blksize_mid = sense_desc->blksize_mid; 26616 select_desc->blksize_lo = sense_desc->blksize_lo; 26617 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26618 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26619 SD_PATH_STANDARD); 26620 } else { 26621 ASSERT(!mutex_owned(SD_MUTEX(un))); 26622 mutex_enter(SD_MUTEX(un)); 26623 sd_update_block_info(un, (uint32_t)data, 0); 26624 26625 mutex_exit(SD_MUTEX(un)); 26626 } 26627 break; 26628 default: 26629 /* should not reach here, but check anyway */ 26630 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26631 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26632 rval = EINVAL; 26633 break; 26634 } 26635 26636 if (select) { 26637 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26638 } 26639 if (sense) { 26640 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26641 } 26642 return (rval); 26643 } 26644 26645 26646 /* 26647 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26648 * implement driver support for getting and setting the CD speed. The command 26649 * set used will be based on the device type. If the device has not been 26650 * identified as MMC the Toshiba vendor specific mode page will be used. If 26651 * the device is MMC but does not support the Real Time Streaming feature 26652 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26653 * be used to read the speed. 26654 */ 26655 26656 /* 26657 * Function: sr_change_speed() 26658 * 26659 * Description: This routine is the driver entry point for handling CD-ROM 26660 * drive speed ioctl requests for devices supporting the Toshiba 26661 * vendor specific drive speed mode page. Support for returning 26662 * and changing the current drive speed in use by the device is 26663 * implemented. 26664 * 26665 * Arguments: dev - the device 'dev_t' 26666 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26667 * CDROMSDRVSPEED (set) 26668 * data - current drive speed or requested drive speed 26669 * flag - this argument is a pass through to ddi_copyxxx() directly 26670 * from the mode argument of ioctl(). 26671 * 26672 * Return Code: the code returned by sd_send_scsi_cmd() 26673 * EINVAL if invalid arguments are provided 26674 * EFAULT if ddi_copyxxx() fails 26675 * ENXIO if fail ddi_get_soft_state 26676 * EIO if invalid mode sense block descriptor length 26677 */ 26678 26679 static int 26680 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26681 { 26682 struct sd_lun *un = NULL; 26683 struct mode_header *sense_mhp, *select_mhp; 26684 struct mode_speed *sense_page, *select_page; 26685 int current_speed; 26686 int rval = EINVAL; 26687 int bd_len; 26688 uchar_t *sense = NULL; 26689 uchar_t *select = NULL; 26690 26691 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26692 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26693 return (ENXIO); 26694 } 26695 26696 /* 26697 * Note: The drive speed is being modified here according to a Toshiba 26698 * vendor specific mode page (0x31). 26699 */ 26700 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26701 26702 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26703 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26704 SD_PATH_STANDARD)) != 0) { 26705 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26706 "sr_change_speed: Mode Sense Failed\n"); 26707 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26708 return (rval); 26709 } 26710 sense_mhp = (struct mode_header *)sense; 26711 26712 /* Check the block descriptor len to handle only 1 block descriptor */ 26713 bd_len = sense_mhp->bdesc_length; 26714 if (bd_len > MODE_BLK_DESC_LENGTH) { 26715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26716 "sr_change_speed: Mode Sense returned invalid block " 26717 "descriptor length\n"); 26718 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26719 return (EIO); 26720 } 26721 26722 sense_page = (struct mode_speed *) 26723 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26724 current_speed = sense_page->speed; 26725 26726 /* Process command */ 26727 switch (cmd) { 26728 case CDROMGDRVSPEED: 26729 /* Return the drive speed obtained during the mode sense */ 26730 if (current_speed == 0x2) { 26731 current_speed = CDROM_TWELVE_SPEED; 26732 } 26733 if (ddi_copyout(¤t_speed, (void *)data, 26734 sizeof (int), flag) != 0) { 26735 rval = EFAULT; 26736 } 26737 break; 26738 case CDROMSDRVSPEED: 26739 /* Validate the requested drive speed */ 26740 switch ((uchar_t)data) { 26741 case CDROM_TWELVE_SPEED: 26742 data = 0x2; 26743 /*FALLTHROUGH*/ 26744 case CDROM_NORMAL_SPEED: 26745 case CDROM_DOUBLE_SPEED: 26746 case CDROM_QUAD_SPEED: 26747 case CDROM_MAXIMUM_SPEED: 26748 break; 26749 default: 26750 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26751 "sr_change_speed: " 26752 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26753 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26754 return (EINVAL); 26755 } 26756 26757 /* 26758 * The current drive speed matches the requested drive speed so 26759 * there is no need to send the mode select to change the speed 26760 */ 26761 if (current_speed == data) { 26762 break; 26763 } 26764 26765 /* Build the select data for the requested drive speed */ 26766 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26767 select_mhp = (struct mode_header *)select; 26768 select_mhp->bdesc_length = 0; 26769 select_page = 26770 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26771 select_page = 26772 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26773 select_page->mode_page.code = CDROM_MODE_SPEED; 26774 select_page->mode_page.length = 2; 26775 select_page->speed = (uchar_t)data; 26776 26777 /* Send the mode select for the requested block size */ 26778 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26779 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26780 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 26781 /* 26782 * The mode select failed for the requested drive speed, 26783 * so reset the data for the original drive speed and 26784 * send it to the target. The error is indicated by the 26785 * return value for the failed mode select. 26786 */ 26787 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26788 "sr_drive_speed: Mode Select Failed\n"); 26789 select_page->speed = sense_page->speed; 26790 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26791 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26792 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26793 } 26794 break; 26795 default: 26796 /* should not reach here, but check anyway */ 26797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26798 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26799 rval = EINVAL; 26800 break; 26801 } 26802 26803 if (select) { 26804 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26805 } 26806 if (sense) { 26807 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26808 } 26809 26810 return (rval); 26811 } 26812 26813 26814 /* 26815 * Function: sr_atapi_change_speed() 26816 * 26817 * Description: This routine is the driver entry point for handling CD-ROM 26818 * drive speed ioctl requests for MMC devices that do not support 26819 * the Real Time Streaming feature (0x107). 26820 * 26821 * Note: This routine will use the SET SPEED command which may not 26822 * be supported by all devices. 26823 * 26824 * Arguments: dev- the device 'dev_t' 26825 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26826 * CDROMSDRVSPEED (set) 26827 * data- current drive speed or requested drive speed 26828 * flag- this argument is a pass through to ddi_copyxxx() directly 26829 * from the mode argument of ioctl(). 26830 * 26831 * Return Code: the code returned by sd_send_scsi_cmd() 26832 * EINVAL if invalid arguments are provided 26833 * EFAULT if ddi_copyxxx() fails 26834 * ENXIO if fail ddi_get_soft_state 26835 * EIO if invalid mode sense block descriptor length 26836 */ 26837 26838 static int 26839 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26840 { 26841 struct sd_lun *un; 26842 struct uscsi_cmd *com = NULL; 26843 struct mode_header_grp2 *sense_mhp; 26844 uchar_t *sense_page; 26845 uchar_t *sense = NULL; 26846 char cdb[CDB_GROUP5]; 26847 int bd_len; 26848 int current_speed = 0; 26849 int max_speed = 0; 26850 int rval; 26851 26852 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26853 26854 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26855 return (ENXIO); 26856 } 26857 26858 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26859 26860 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26861 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26862 SD_PATH_STANDARD)) != 0) { 26863 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26864 "sr_atapi_change_speed: Mode Sense Failed\n"); 26865 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26866 return (rval); 26867 } 26868 26869 /* Check the block descriptor len to handle only 1 block descriptor */ 26870 sense_mhp = (struct mode_header_grp2 *)sense; 26871 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26872 if (bd_len > MODE_BLK_DESC_LENGTH) { 26873 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26874 "sr_atapi_change_speed: Mode Sense returned invalid " 26875 "block descriptor length\n"); 26876 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26877 return (EIO); 26878 } 26879 26880 /* Calculate the current and maximum drive speeds */ 26881 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26882 current_speed = (sense_page[14] << 8) | sense_page[15]; 26883 max_speed = (sense_page[8] << 8) | sense_page[9]; 26884 26885 /* Process the command */ 26886 switch (cmd) { 26887 case CDROMGDRVSPEED: 26888 current_speed /= SD_SPEED_1X; 26889 if (ddi_copyout(¤t_speed, (void *)data, 26890 sizeof (int), flag) != 0) 26891 rval = EFAULT; 26892 break; 26893 case CDROMSDRVSPEED: 26894 /* Convert the speed code to KB/sec */ 26895 switch ((uchar_t)data) { 26896 case CDROM_NORMAL_SPEED: 26897 current_speed = SD_SPEED_1X; 26898 break; 26899 case CDROM_DOUBLE_SPEED: 26900 current_speed = 2 * SD_SPEED_1X; 26901 break; 26902 case CDROM_QUAD_SPEED: 26903 current_speed = 4 * SD_SPEED_1X; 26904 break; 26905 case CDROM_TWELVE_SPEED: 26906 current_speed = 12 * SD_SPEED_1X; 26907 break; 26908 case CDROM_MAXIMUM_SPEED: 26909 current_speed = 0xffff; 26910 break; 26911 default: 26912 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26913 "sr_atapi_change_speed: invalid drive speed %d\n", 26914 (uchar_t)data); 26915 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26916 return (EINVAL); 26917 } 26918 26919 /* Check the request against the drive's max speed. */ 26920 if (current_speed != 0xffff) { 26921 if (current_speed > max_speed) { 26922 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26923 return (EINVAL); 26924 } 26925 } 26926 26927 /* 26928 * Build and send the SET SPEED command 26929 * 26930 * Note: The SET SPEED (0xBB) command used in this routine is 26931 * obsolete per the SCSI MMC spec but still supported in the 26932 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26933 * therefore the command is still implemented in this routine. 26934 */ 26935 bzero(cdb, sizeof (cdb)); 26936 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26937 cdb[2] = (uchar_t)(current_speed >> 8); 26938 cdb[3] = (uchar_t)current_speed; 26939 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26940 com->uscsi_cdb = (caddr_t)cdb; 26941 com->uscsi_cdblen = CDB_GROUP5; 26942 com->uscsi_bufaddr = NULL; 26943 com->uscsi_buflen = 0; 26944 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26945 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, 0, 26946 UIO_SYSSPACE, SD_PATH_STANDARD); 26947 break; 26948 default: 26949 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26950 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26951 rval = EINVAL; 26952 } 26953 26954 if (sense) { 26955 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26956 } 26957 if (com) { 26958 kmem_free(com, sizeof (*com)); 26959 } 26960 return (rval); 26961 } 26962 26963 26964 /* 26965 * Function: sr_pause_resume() 26966 * 26967 * Description: This routine is the driver entry point for handling CD-ROM 26968 * pause/resume ioctl requests. This only affects the audio play 26969 * operation. 26970 * 26971 * Arguments: dev - the device 'dev_t' 26972 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26973 * for setting the resume bit of the cdb. 26974 * 26975 * Return Code: the code returned by sd_send_scsi_cmd() 26976 * EINVAL if invalid mode specified 26977 * 26978 */ 26979 26980 static int 26981 sr_pause_resume(dev_t dev, int cmd) 26982 { 26983 struct sd_lun *un; 26984 struct uscsi_cmd *com; 26985 char cdb[CDB_GROUP1]; 26986 int rval; 26987 26988 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26989 return (ENXIO); 26990 } 26991 26992 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26993 bzero(cdb, CDB_GROUP1); 26994 cdb[0] = SCMD_PAUSE_RESUME; 26995 switch (cmd) { 26996 case CDROMRESUME: 26997 cdb[8] = 1; 26998 break; 26999 case CDROMPAUSE: 27000 cdb[8] = 0; 27001 break; 27002 default: 27003 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27004 " Command '%x' Not Supported\n", cmd); 27005 rval = EINVAL; 27006 goto done; 27007 } 27008 27009 com->uscsi_cdb = cdb; 27010 com->uscsi_cdblen = CDB_GROUP1; 27011 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27012 27013 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27014 UIO_SYSSPACE, SD_PATH_STANDARD); 27015 27016 done: 27017 kmem_free(com, sizeof (*com)); 27018 return (rval); 27019 } 27020 27021 27022 /* 27023 * Function: sr_play_msf() 27024 * 27025 * Description: This routine is the driver entry point for handling CD-ROM 27026 * ioctl requests to output the audio signals at the specified 27027 * starting address and continue the audio play until the specified 27028 * ending address (CDROMPLAYMSF) The address is in Minute Second 27029 * Frame (MSF) format. 27030 * 27031 * Arguments: dev - the device 'dev_t' 27032 * data - pointer to user provided audio msf structure, 27033 * specifying start/end addresses. 27034 * flag - this argument is a pass through to ddi_copyxxx() 27035 * directly from the mode argument of ioctl(). 27036 * 27037 * Return Code: the code returned by sd_send_scsi_cmd() 27038 * EFAULT if ddi_copyxxx() fails 27039 * ENXIO if fail ddi_get_soft_state 27040 * EINVAL if data pointer is NULL 27041 */ 27042 27043 static int 27044 sr_play_msf(dev_t dev, caddr_t data, int flag) 27045 { 27046 struct sd_lun *un; 27047 struct uscsi_cmd *com; 27048 struct cdrom_msf msf_struct; 27049 struct cdrom_msf *msf = &msf_struct; 27050 char cdb[CDB_GROUP1]; 27051 int rval; 27052 27053 if (data == NULL) { 27054 return (EINVAL); 27055 } 27056 27057 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27058 return (ENXIO); 27059 } 27060 27061 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27062 return (EFAULT); 27063 } 27064 27065 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27066 bzero(cdb, CDB_GROUP1); 27067 cdb[0] = SCMD_PLAYAUDIO_MSF; 27068 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27069 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27070 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27071 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27072 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27073 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27074 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27075 } else { 27076 cdb[3] = msf->cdmsf_min0; 27077 cdb[4] = msf->cdmsf_sec0; 27078 cdb[5] = msf->cdmsf_frame0; 27079 cdb[6] = msf->cdmsf_min1; 27080 cdb[7] = msf->cdmsf_sec1; 27081 cdb[8] = msf->cdmsf_frame1; 27082 } 27083 com->uscsi_cdb = cdb; 27084 com->uscsi_cdblen = CDB_GROUP1; 27085 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27086 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27087 UIO_SYSSPACE, SD_PATH_STANDARD); 27088 kmem_free(com, sizeof (*com)); 27089 return (rval); 27090 } 27091 27092 27093 /* 27094 * Function: sr_play_trkind() 27095 * 27096 * Description: This routine is the driver entry point for handling CD-ROM 27097 * ioctl requests to output the audio signals at the specified 27098 * starting address and continue the audio play until the specified 27099 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27100 * format. 27101 * 27102 * Arguments: dev - the device 'dev_t' 27103 * data - pointer to user provided audio track/index structure, 27104 * specifying start/end addresses. 27105 * flag - this argument is a pass through to ddi_copyxxx() 27106 * directly from the mode argument of ioctl(). 27107 * 27108 * Return Code: the code returned by sd_send_scsi_cmd() 27109 * EFAULT if ddi_copyxxx() fails 27110 * ENXIO if fail ddi_get_soft_state 27111 * EINVAL if data pointer is NULL 27112 */ 27113 27114 static int 27115 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27116 { 27117 struct cdrom_ti ti_struct; 27118 struct cdrom_ti *ti = &ti_struct; 27119 struct uscsi_cmd *com = NULL; 27120 char cdb[CDB_GROUP1]; 27121 int rval; 27122 27123 if (data == NULL) { 27124 return (EINVAL); 27125 } 27126 27127 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27128 return (EFAULT); 27129 } 27130 27131 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27132 bzero(cdb, CDB_GROUP1); 27133 cdb[0] = SCMD_PLAYAUDIO_TI; 27134 cdb[4] = ti->cdti_trk0; 27135 cdb[5] = ti->cdti_ind0; 27136 cdb[7] = ti->cdti_trk1; 27137 cdb[8] = ti->cdti_ind1; 27138 com->uscsi_cdb = cdb; 27139 com->uscsi_cdblen = CDB_GROUP1; 27140 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27141 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27142 UIO_SYSSPACE, SD_PATH_STANDARD); 27143 kmem_free(com, sizeof (*com)); 27144 return (rval); 27145 } 27146 27147 27148 /* 27149 * Function: sr_read_all_subcodes() 27150 * 27151 * Description: This routine is the driver entry point for handling CD-ROM 27152 * ioctl requests to return raw subcode data while the target is 27153 * playing audio (CDROMSUBCODE). 27154 * 27155 * Arguments: dev - the device 'dev_t' 27156 * data - pointer to user provided cdrom subcode structure, 27157 * specifying the transfer length and address. 27158 * flag - this argument is a pass through to ddi_copyxxx() 27159 * directly from the mode argument of ioctl(). 27160 * 27161 * Return Code: the code returned by sd_send_scsi_cmd() 27162 * EFAULT if ddi_copyxxx() fails 27163 * ENXIO if fail ddi_get_soft_state 27164 * EINVAL if data pointer is NULL 27165 */ 27166 27167 static int 27168 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27169 { 27170 struct sd_lun *un = NULL; 27171 struct uscsi_cmd *com = NULL; 27172 struct cdrom_subcode *subcode = NULL; 27173 int rval; 27174 size_t buflen; 27175 char cdb[CDB_GROUP5]; 27176 27177 #ifdef _MULTI_DATAMODEL 27178 /* To support ILP32 applications in an LP64 world */ 27179 struct cdrom_subcode32 cdrom_subcode32; 27180 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27181 #endif 27182 if (data == NULL) { 27183 return (EINVAL); 27184 } 27185 27186 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27187 return (ENXIO); 27188 } 27189 27190 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27191 27192 #ifdef _MULTI_DATAMODEL 27193 switch (ddi_model_convert_from(flag & FMODELS)) { 27194 case DDI_MODEL_ILP32: 27195 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27196 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27197 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27198 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27199 return (EFAULT); 27200 } 27201 /* Convert the ILP32 uscsi data from the application to LP64 */ 27202 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27203 break; 27204 case DDI_MODEL_NONE: 27205 if (ddi_copyin(data, subcode, 27206 sizeof (struct cdrom_subcode), flag)) { 27207 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27208 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27209 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27210 return (EFAULT); 27211 } 27212 break; 27213 } 27214 #else /* ! _MULTI_DATAMODEL */ 27215 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27216 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27217 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27218 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27219 return (EFAULT); 27220 } 27221 #endif /* _MULTI_DATAMODEL */ 27222 27223 /* 27224 * Since MMC-2 expects max 3 bytes for length, check if the 27225 * length input is greater than 3 bytes 27226 */ 27227 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27228 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27229 "sr_read_all_subcodes: " 27230 "cdrom transfer length too large: %d (limit %d)\n", 27231 subcode->cdsc_length, 0xFFFFFF); 27232 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27233 return (EINVAL); 27234 } 27235 27236 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27237 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27238 bzero(cdb, CDB_GROUP5); 27239 27240 if (un->un_f_mmc_cap == TRUE) { 27241 cdb[0] = (char)SCMD_READ_CD; 27242 cdb[2] = (char)0xff; 27243 cdb[3] = (char)0xff; 27244 cdb[4] = (char)0xff; 27245 cdb[5] = (char)0xff; 27246 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27247 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27248 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27249 cdb[10] = 1; 27250 } else { 27251 /* 27252 * Note: A vendor specific command (0xDF) is being used her to 27253 * request a read of all subcodes. 27254 */ 27255 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27256 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27257 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27258 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27259 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27260 } 27261 com->uscsi_cdb = cdb; 27262 com->uscsi_cdblen = CDB_GROUP5; 27263 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27264 com->uscsi_buflen = buflen; 27265 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27266 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27267 UIO_SYSSPACE, SD_PATH_STANDARD); 27268 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27269 kmem_free(com, sizeof (*com)); 27270 return (rval); 27271 } 27272 27273 27274 /* 27275 * Function: sr_read_subchannel() 27276 * 27277 * Description: This routine is the driver entry point for handling CD-ROM 27278 * ioctl requests to return the Q sub-channel data of the CD 27279 * current position block. (CDROMSUBCHNL) The data includes the 27280 * track number, index number, absolute CD-ROM address (LBA or MSF 27281 * format per the user) , track relative CD-ROM address (LBA or MSF 27282 * format per the user), control data and audio status. 27283 * 27284 * Arguments: dev - the device 'dev_t' 27285 * data - pointer to user provided cdrom sub-channel structure 27286 * flag - this argument is a pass through to ddi_copyxxx() 27287 * directly from the mode argument of ioctl(). 27288 * 27289 * Return Code: the code returned by sd_send_scsi_cmd() 27290 * EFAULT if ddi_copyxxx() fails 27291 * ENXIO if fail ddi_get_soft_state 27292 * EINVAL if data pointer is NULL 27293 */ 27294 27295 static int 27296 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27297 { 27298 struct sd_lun *un; 27299 struct uscsi_cmd *com; 27300 struct cdrom_subchnl subchanel; 27301 struct cdrom_subchnl *subchnl = &subchanel; 27302 char cdb[CDB_GROUP1]; 27303 caddr_t buffer; 27304 int rval; 27305 27306 if (data == NULL) { 27307 return (EINVAL); 27308 } 27309 27310 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27311 (un->un_state == SD_STATE_OFFLINE)) { 27312 return (ENXIO); 27313 } 27314 27315 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27316 return (EFAULT); 27317 } 27318 27319 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27320 bzero(cdb, CDB_GROUP1); 27321 cdb[0] = SCMD_READ_SUBCHANNEL; 27322 /* Set the MSF bit based on the user requested address format */ 27323 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27324 /* 27325 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27326 * returned 27327 */ 27328 cdb[2] = 0x40; 27329 /* 27330 * Set byte 3 to specify the return data format. A value of 0x01 27331 * indicates that the CD-ROM current position should be returned. 27332 */ 27333 cdb[3] = 0x01; 27334 cdb[8] = 0x10; 27335 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27336 com->uscsi_cdb = cdb; 27337 com->uscsi_cdblen = CDB_GROUP1; 27338 com->uscsi_bufaddr = buffer; 27339 com->uscsi_buflen = 16; 27340 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27341 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27342 UIO_SYSSPACE, SD_PATH_STANDARD); 27343 if (rval != 0) { 27344 kmem_free(buffer, 16); 27345 kmem_free(com, sizeof (*com)); 27346 return (rval); 27347 } 27348 27349 /* Process the returned Q sub-channel data */ 27350 subchnl->cdsc_audiostatus = buffer[1]; 27351 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27352 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27353 subchnl->cdsc_trk = buffer[6]; 27354 subchnl->cdsc_ind = buffer[7]; 27355 if (subchnl->cdsc_format & CDROM_LBA) { 27356 subchnl->cdsc_absaddr.lba = 27357 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27358 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27359 subchnl->cdsc_reladdr.lba = 27360 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27361 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27362 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27363 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27364 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27365 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27366 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27367 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27368 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27369 } else { 27370 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27371 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27372 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27373 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27374 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27375 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27376 } 27377 kmem_free(buffer, 16); 27378 kmem_free(com, sizeof (*com)); 27379 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27380 != 0) { 27381 return (EFAULT); 27382 } 27383 return (rval); 27384 } 27385 27386 27387 /* 27388 * Function: sr_read_tocentry() 27389 * 27390 * Description: This routine is the driver entry point for handling CD-ROM 27391 * ioctl requests to read from the Table of Contents (TOC) 27392 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27393 * fields, the starting address (LBA or MSF format per the user) 27394 * and the data mode if the user specified track is a data track. 27395 * 27396 * Note: The READ HEADER (0x44) command used in this routine is 27397 * obsolete per the SCSI MMC spec but still supported in the 27398 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27399 * therefore the command is still implemented in this routine. 27400 * 27401 * Arguments: dev - the device 'dev_t' 27402 * data - pointer to user provided toc entry structure, 27403 * specifying the track # and the address format 27404 * (LBA or MSF). 27405 * flag - this argument is a pass through to ddi_copyxxx() 27406 * directly from the mode argument of ioctl(). 27407 * 27408 * Return Code: the code returned by sd_send_scsi_cmd() 27409 * EFAULT if ddi_copyxxx() fails 27410 * ENXIO if fail ddi_get_soft_state 27411 * EINVAL if data pointer is NULL 27412 */ 27413 27414 static int 27415 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27416 { 27417 struct sd_lun *un = NULL; 27418 struct uscsi_cmd *com; 27419 struct cdrom_tocentry toc_entry; 27420 struct cdrom_tocentry *entry = &toc_entry; 27421 caddr_t buffer; 27422 int rval; 27423 char cdb[CDB_GROUP1]; 27424 27425 if (data == NULL) { 27426 return (EINVAL); 27427 } 27428 27429 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27430 (un->un_state == SD_STATE_OFFLINE)) { 27431 return (ENXIO); 27432 } 27433 27434 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27435 return (EFAULT); 27436 } 27437 27438 /* Validate the requested track and address format */ 27439 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27440 return (EINVAL); 27441 } 27442 27443 if (entry->cdte_track == 0) { 27444 return (EINVAL); 27445 } 27446 27447 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27448 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27449 bzero(cdb, CDB_GROUP1); 27450 27451 cdb[0] = SCMD_READ_TOC; 27452 /* Set the MSF bit based on the user requested address format */ 27453 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27454 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27455 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27456 } else { 27457 cdb[6] = entry->cdte_track; 27458 } 27459 27460 /* 27461 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27462 * (4 byte TOC response header + 8 byte track descriptor) 27463 */ 27464 cdb[8] = 12; 27465 com->uscsi_cdb = cdb; 27466 com->uscsi_cdblen = CDB_GROUP1; 27467 com->uscsi_bufaddr = buffer; 27468 com->uscsi_buflen = 0x0C; 27469 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27470 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27471 UIO_SYSSPACE, SD_PATH_STANDARD); 27472 if (rval != 0) { 27473 kmem_free(buffer, 12); 27474 kmem_free(com, sizeof (*com)); 27475 return (rval); 27476 } 27477 27478 /* Process the toc entry */ 27479 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27480 entry->cdte_ctrl = (buffer[5] & 0x0F); 27481 if (entry->cdte_format & CDROM_LBA) { 27482 entry->cdte_addr.lba = 27483 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27484 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27485 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27486 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27487 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27488 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27489 /* 27490 * Send a READ TOC command using the LBA address format to get 27491 * the LBA for the track requested so it can be used in the 27492 * READ HEADER request 27493 * 27494 * Note: The MSF bit of the READ HEADER command specifies the 27495 * output format. The block address specified in that command 27496 * must be in LBA format. 27497 */ 27498 cdb[1] = 0; 27499 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27500 UIO_SYSSPACE, SD_PATH_STANDARD); 27501 if (rval != 0) { 27502 kmem_free(buffer, 12); 27503 kmem_free(com, sizeof (*com)); 27504 return (rval); 27505 } 27506 } else { 27507 entry->cdte_addr.msf.minute = buffer[9]; 27508 entry->cdte_addr.msf.second = buffer[10]; 27509 entry->cdte_addr.msf.frame = buffer[11]; 27510 /* 27511 * Send a READ TOC command using the LBA address format to get 27512 * the LBA for the track requested so it can be used in the 27513 * READ HEADER request 27514 * 27515 * Note: The MSF bit of the READ HEADER command specifies the 27516 * output format. The block address specified in that command 27517 * must be in LBA format. 27518 */ 27519 cdb[1] = 0; 27520 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27521 UIO_SYSSPACE, SD_PATH_STANDARD); 27522 if (rval != 0) { 27523 kmem_free(buffer, 12); 27524 kmem_free(com, sizeof (*com)); 27525 return (rval); 27526 } 27527 } 27528 27529 /* 27530 * Build and send the READ HEADER command to determine the data mode of 27531 * the user specified track. 27532 */ 27533 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27534 (entry->cdte_track != CDROM_LEADOUT)) { 27535 bzero(cdb, CDB_GROUP1); 27536 cdb[0] = SCMD_READ_HEADER; 27537 cdb[2] = buffer[8]; 27538 cdb[3] = buffer[9]; 27539 cdb[4] = buffer[10]; 27540 cdb[5] = buffer[11]; 27541 cdb[8] = 0x08; 27542 com->uscsi_buflen = 0x08; 27543 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27544 UIO_SYSSPACE, SD_PATH_STANDARD); 27545 if (rval == 0) { 27546 entry->cdte_datamode = buffer[0]; 27547 } else { 27548 /* 27549 * READ HEADER command failed, since this is 27550 * obsoleted in one spec, its better to return 27551 * -1 for an invlid track so that we can still 27552 * recieve the rest of the TOC data. 27553 */ 27554 entry->cdte_datamode = (uchar_t)-1; 27555 } 27556 } else { 27557 entry->cdte_datamode = (uchar_t)-1; 27558 } 27559 27560 kmem_free(buffer, 12); 27561 kmem_free(com, sizeof (*com)); 27562 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27563 return (EFAULT); 27564 27565 return (rval); 27566 } 27567 27568 27569 /* 27570 * Function: sr_read_tochdr() 27571 * 27572 * Description: This routine is the driver entry point for handling CD-ROM 27573 * ioctl requests to read the Table of Contents (TOC) header 27574 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27575 * and ending track numbers 27576 * 27577 * Arguments: dev - the device 'dev_t' 27578 * data - pointer to user provided toc header structure, 27579 * specifying the starting and ending track numbers. 27580 * flag - this argument is a pass through to ddi_copyxxx() 27581 * directly from the mode argument of ioctl(). 27582 * 27583 * Return Code: the code returned by sd_send_scsi_cmd() 27584 * EFAULT if ddi_copyxxx() fails 27585 * ENXIO if fail ddi_get_soft_state 27586 * EINVAL if data pointer is NULL 27587 */ 27588 27589 static int 27590 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27591 { 27592 struct sd_lun *un; 27593 struct uscsi_cmd *com; 27594 struct cdrom_tochdr toc_header; 27595 struct cdrom_tochdr *hdr = &toc_header; 27596 char cdb[CDB_GROUP1]; 27597 int rval; 27598 caddr_t buffer; 27599 27600 if (data == NULL) { 27601 return (EINVAL); 27602 } 27603 27604 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27605 (un->un_state == SD_STATE_OFFLINE)) { 27606 return (ENXIO); 27607 } 27608 27609 buffer = kmem_zalloc(4, KM_SLEEP); 27610 bzero(cdb, CDB_GROUP1); 27611 cdb[0] = SCMD_READ_TOC; 27612 /* 27613 * Specifying a track number of 0x00 in the READ TOC command indicates 27614 * that the TOC header should be returned 27615 */ 27616 cdb[6] = 0x00; 27617 /* 27618 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27619 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27620 */ 27621 cdb[8] = 0x04; 27622 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27623 com->uscsi_cdb = cdb; 27624 com->uscsi_cdblen = CDB_GROUP1; 27625 com->uscsi_bufaddr = buffer; 27626 com->uscsi_buflen = 0x04; 27627 com->uscsi_timeout = 300; 27628 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27629 27630 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27631 UIO_SYSSPACE, SD_PATH_STANDARD); 27632 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27633 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27634 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27635 } else { 27636 hdr->cdth_trk0 = buffer[2]; 27637 hdr->cdth_trk1 = buffer[3]; 27638 } 27639 kmem_free(buffer, 4); 27640 kmem_free(com, sizeof (*com)); 27641 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27642 return (EFAULT); 27643 } 27644 return (rval); 27645 } 27646 27647 27648 /* 27649 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27650 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27651 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27652 * digital audio and extended architecture digital audio. These modes are 27653 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27654 * MMC specs. 27655 * 27656 * In addition to support for the various data formats these routines also 27657 * include support for devices that implement only the direct access READ 27658 * commands (0x08, 0x28), devices that implement the READ_CD commands 27659 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27660 * READ CDXA commands (0xD8, 0xDB) 27661 */ 27662 27663 /* 27664 * Function: sr_read_mode1() 27665 * 27666 * Description: This routine is the driver entry point for handling CD-ROM 27667 * ioctl read mode1 requests (CDROMREADMODE1). 27668 * 27669 * Arguments: dev - the device 'dev_t' 27670 * data - pointer to user provided cd read structure specifying 27671 * the lba buffer address and length. 27672 * flag - this argument is a pass through to ddi_copyxxx() 27673 * directly from the mode argument of ioctl(). 27674 * 27675 * Return Code: the code returned by sd_send_scsi_cmd() 27676 * EFAULT if ddi_copyxxx() fails 27677 * ENXIO if fail ddi_get_soft_state 27678 * EINVAL if data pointer is NULL 27679 */ 27680 27681 static int 27682 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27683 { 27684 struct sd_lun *un; 27685 struct cdrom_read mode1_struct; 27686 struct cdrom_read *mode1 = &mode1_struct; 27687 int rval; 27688 #ifdef _MULTI_DATAMODEL 27689 /* To support ILP32 applications in an LP64 world */ 27690 struct cdrom_read32 cdrom_read32; 27691 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27692 #endif /* _MULTI_DATAMODEL */ 27693 27694 if (data == NULL) { 27695 return (EINVAL); 27696 } 27697 27698 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27699 (un->un_state == SD_STATE_OFFLINE)) { 27700 return (ENXIO); 27701 } 27702 27703 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27704 "sd_read_mode1: entry: un:0x%p\n", un); 27705 27706 #ifdef _MULTI_DATAMODEL 27707 switch (ddi_model_convert_from(flag & FMODELS)) { 27708 case DDI_MODEL_ILP32: 27709 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27710 return (EFAULT); 27711 } 27712 /* Convert the ILP32 uscsi data from the application to LP64 */ 27713 cdrom_read32tocdrom_read(cdrd32, mode1); 27714 break; 27715 case DDI_MODEL_NONE: 27716 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27717 return (EFAULT); 27718 } 27719 } 27720 #else /* ! _MULTI_DATAMODEL */ 27721 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27722 return (EFAULT); 27723 } 27724 #endif /* _MULTI_DATAMODEL */ 27725 27726 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 27727 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27728 27729 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27730 "sd_read_mode1: exit: un:0x%p\n", un); 27731 27732 return (rval); 27733 } 27734 27735 27736 /* 27737 * Function: sr_read_cd_mode2() 27738 * 27739 * Description: This routine is the driver entry point for handling CD-ROM 27740 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27741 * support the READ CD (0xBE) command or the 1st generation 27742 * READ CD (0xD4) command. 27743 * 27744 * Arguments: dev - the device 'dev_t' 27745 * data - pointer to user provided cd read structure specifying 27746 * the lba buffer address and length. 27747 * flag - this argument is a pass through to ddi_copyxxx() 27748 * directly from the mode argument of ioctl(). 27749 * 27750 * Return Code: the code returned by sd_send_scsi_cmd() 27751 * EFAULT if ddi_copyxxx() fails 27752 * ENXIO if fail ddi_get_soft_state 27753 * EINVAL if data pointer is NULL 27754 */ 27755 27756 static int 27757 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27758 { 27759 struct sd_lun *un; 27760 struct uscsi_cmd *com; 27761 struct cdrom_read mode2_struct; 27762 struct cdrom_read *mode2 = &mode2_struct; 27763 uchar_t cdb[CDB_GROUP5]; 27764 int nblocks; 27765 int rval; 27766 #ifdef _MULTI_DATAMODEL 27767 /* To support ILP32 applications in an LP64 world */ 27768 struct cdrom_read32 cdrom_read32; 27769 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27770 #endif /* _MULTI_DATAMODEL */ 27771 27772 if (data == NULL) { 27773 return (EINVAL); 27774 } 27775 27776 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27777 (un->un_state == SD_STATE_OFFLINE)) { 27778 return (ENXIO); 27779 } 27780 27781 #ifdef _MULTI_DATAMODEL 27782 switch (ddi_model_convert_from(flag & FMODELS)) { 27783 case DDI_MODEL_ILP32: 27784 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27785 return (EFAULT); 27786 } 27787 /* Convert the ILP32 uscsi data from the application to LP64 */ 27788 cdrom_read32tocdrom_read(cdrd32, mode2); 27789 break; 27790 case DDI_MODEL_NONE: 27791 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27792 return (EFAULT); 27793 } 27794 break; 27795 } 27796 27797 #else /* ! _MULTI_DATAMODEL */ 27798 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27799 return (EFAULT); 27800 } 27801 #endif /* _MULTI_DATAMODEL */ 27802 27803 bzero(cdb, sizeof (cdb)); 27804 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27805 /* Read command supported by 1st generation atapi drives */ 27806 cdb[0] = SCMD_READ_CDD4; 27807 } else { 27808 /* Universal CD Access Command */ 27809 cdb[0] = SCMD_READ_CD; 27810 } 27811 27812 /* 27813 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27814 */ 27815 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27816 27817 /* set the start address */ 27818 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27819 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27820 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27821 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27822 27823 /* set the transfer length */ 27824 nblocks = mode2->cdread_buflen / 2336; 27825 cdb[6] = (uchar_t)(nblocks >> 16); 27826 cdb[7] = (uchar_t)(nblocks >> 8); 27827 cdb[8] = (uchar_t)nblocks; 27828 27829 /* set the filter bits */ 27830 cdb[9] = CDROM_READ_CD_USERDATA; 27831 27832 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27833 com->uscsi_cdb = (caddr_t)cdb; 27834 com->uscsi_cdblen = sizeof (cdb); 27835 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27836 com->uscsi_buflen = mode2->cdread_buflen; 27837 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27838 27839 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27840 UIO_SYSSPACE, SD_PATH_STANDARD); 27841 kmem_free(com, sizeof (*com)); 27842 return (rval); 27843 } 27844 27845 27846 /* 27847 * Function: sr_read_mode2() 27848 * 27849 * Description: This routine is the driver entry point for handling CD-ROM 27850 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27851 * do not support the READ CD (0xBE) command. 27852 * 27853 * Arguments: dev - the device 'dev_t' 27854 * data - pointer to user provided cd read structure specifying 27855 * the lba buffer address and length. 27856 * flag - this argument is a pass through to ddi_copyxxx() 27857 * directly from the mode argument of ioctl(). 27858 * 27859 * Return Code: the code returned by sd_send_scsi_cmd() 27860 * EFAULT if ddi_copyxxx() fails 27861 * ENXIO if fail ddi_get_soft_state 27862 * EINVAL if data pointer is NULL 27863 * EIO if fail to reset block size 27864 * EAGAIN if commands are in progress in the driver 27865 */ 27866 27867 static int 27868 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27869 { 27870 struct sd_lun *un; 27871 struct cdrom_read mode2_struct; 27872 struct cdrom_read *mode2 = &mode2_struct; 27873 int rval; 27874 uint32_t restore_blksize; 27875 struct uscsi_cmd *com; 27876 uchar_t cdb[CDB_GROUP0]; 27877 int nblocks; 27878 27879 #ifdef _MULTI_DATAMODEL 27880 /* To support ILP32 applications in an LP64 world */ 27881 struct cdrom_read32 cdrom_read32; 27882 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27883 #endif /* _MULTI_DATAMODEL */ 27884 27885 if (data == NULL) { 27886 return (EINVAL); 27887 } 27888 27889 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27890 (un->un_state == SD_STATE_OFFLINE)) { 27891 return (ENXIO); 27892 } 27893 27894 /* 27895 * Because this routine will update the device and driver block size 27896 * being used we want to make sure there are no commands in progress. 27897 * If commands are in progress the user will have to try again. 27898 * 27899 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27900 * in sdioctl to protect commands from sdioctl through to the top of 27901 * sd_uscsi_strategy. See sdioctl for details. 27902 */ 27903 mutex_enter(SD_MUTEX(un)); 27904 if (un->un_ncmds_in_driver != 1) { 27905 mutex_exit(SD_MUTEX(un)); 27906 return (EAGAIN); 27907 } 27908 mutex_exit(SD_MUTEX(un)); 27909 27910 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27911 "sd_read_mode2: entry: un:0x%p\n", un); 27912 27913 #ifdef _MULTI_DATAMODEL 27914 switch (ddi_model_convert_from(flag & FMODELS)) { 27915 case DDI_MODEL_ILP32: 27916 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27917 return (EFAULT); 27918 } 27919 /* Convert the ILP32 uscsi data from the application to LP64 */ 27920 cdrom_read32tocdrom_read(cdrd32, mode2); 27921 break; 27922 case DDI_MODEL_NONE: 27923 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27924 return (EFAULT); 27925 } 27926 break; 27927 } 27928 #else /* ! _MULTI_DATAMODEL */ 27929 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27930 return (EFAULT); 27931 } 27932 #endif /* _MULTI_DATAMODEL */ 27933 27934 /* Store the current target block size for restoration later */ 27935 restore_blksize = un->un_tgt_blocksize; 27936 27937 /* Change the device and soft state target block size to 2336 */ 27938 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27939 rval = EIO; 27940 goto done; 27941 } 27942 27943 27944 bzero(cdb, sizeof (cdb)); 27945 27946 /* set READ operation */ 27947 cdb[0] = SCMD_READ; 27948 27949 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27950 mode2->cdread_lba >>= 2; 27951 27952 /* set the start address */ 27953 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27954 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27955 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27956 27957 /* set the transfer length */ 27958 nblocks = mode2->cdread_buflen / 2336; 27959 cdb[4] = (uchar_t)nblocks & 0xFF; 27960 27961 /* build command */ 27962 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27963 com->uscsi_cdb = (caddr_t)cdb; 27964 com->uscsi_cdblen = sizeof (cdb); 27965 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27966 com->uscsi_buflen = mode2->cdread_buflen; 27967 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27968 27969 /* 27970 * Issue SCSI command with user space address for read buffer. 27971 * 27972 * This sends the command through main channel in the driver. 27973 * 27974 * Since this is accessed via an IOCTL call, we go through the 27975 * standard path, so that if the device was powered down, then 27976 * it would be 'awakened' to handle the command. 27977 */ 27978 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27979 UIO_SYSSPACE, SD_PATH_STANDARD); 27980 27981 kmem_free(com, sizeof (*com)); 27982 27983 /* Restore the device and soft state target block size */ 27984 if (sr_sector_mode(dev, restore_blksize) != 0) { 27985 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27986 "can't do switch back to mode 1\n"); 27987 /* 27988 * If sd_send_scsi_READ succeeded we still need to report 27989 * an error because we failed to reset the block size 27990 */ 27991 if (rval == 0) { 27992 rval = EIO; 27993 } 27994 } 27995 27996 done: 27997 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27998 "sd_read_mode2: exit: un:0x%p\n", un); 27999 28000 return (rval); 28001 } 28002 28003 28004 /* 28005 * Function: sr_sector_mode() 28006 * 28007 * Description: This utility function is used by sr_read_mode2 to set the target 28008 * block size based on the user specified size. This is a legacy 28009 * implementation based upon a vendor specific mode page 28010 * 28011 * Arguments: dev - the device 'dev_t' 28012 * data - flag indicating if block size is being set to 2336 or 28013 * 512. 28014 * 28015 * Return Code: the code returned by sd_send_scsi_cmd() 28016 * EFAULT if ddi_copyxxx() fails 28017 * ENXIO if fail ddi_get_soft_state 28018 * EINVAL if data pointer is NULL 28019 */ 28020 28021 static int 28022 sr_sector_mode(dev_t dev, uint32_t blksize) 28023 { 28024 struct sd_lun *un; 28025 uchar_t *sense; 28026 uchar_t *select; 28027 int rval; 28028 28029 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28030 (un->un_state == SD_STATE_OFFLINE)) { 28031 return (ENXIO); 28032 } 28033 28034 sense = kmem_zalloc(20, KM_SLEEP); 28035 28036 /* Note: This is a vendor specific mode page (0x81) */ 28037 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 28038 SD_PATH_STANDARD)) != 0) { 28039 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28040 "sr_sector_mode: Mode Sense failed\n"); 28041 kmem_free(sense, 20); 28042 return (rval); 28043 } 28044 select = kmem_zalloc(20, KM_SLEEP); 28045 select[3] = 0x08; 28046 select[10] = ((blksize >> 8) & 0xff); 28047 select[11] = (blksize & 0xff); 28048 select[12] = 0x01; 28049 select[13] = 0x06; 28050 select[14] = sense[14]; 28051 select[15] = sense[15]; 28052 if (blksize == SD_MODE2_BLKSIZE) { 28053 select[14] |= 0x01; 28054 } 28055 28056 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 28057 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 28058 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28059 "sr_sector_mode: Mode Select failed\n"); 28060 } else { 28061 /* 28062 * Only update the softstate block size if we successfully 28063 * changed the device block mode. 28064 */ 28065 mutex_enter(SD_MUTEX(un)); 28066 sd_update_block_info(un, blksize, 0); 28067 mutex_exit(SD_MUTEX(un)); 28068 } 28069 kmem_free(sense, 20); 28070 kmem_free(select, 20); 28071 return (rval); 28072 } 28073 28074 28075 /* 28076 * Function: sr_read_cdda() 28077 * 28078 * Description: This routine is the driver entry point for handling CD-ROM 28079 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28080 * the target supports CDDA these requests are handled via a vendor 28081 * specific command (0xD8) If the target does not support CDDA 28082 * these requests are handled via the READ CD command (0xBE). 28083 * 28084 * Arguments: dev - the device 'dev_t' 28085 * data - pointer to user provided CD-DA structure specifying 28086 * the track starting address, transfer length, and 28087 * subcode options. 28088 * flag - this argument is a pass through to ddi_copyxxx() 28089 * directly from the mode argument of ioctl(). 28090 * 28091 * Return Code: the code returned by sd_send_scsi_cmd() 28092 * EFAULT if ddi_copyxxx() fails 28093 * ENXIO if fail ddi_get_soft_state 28094 * EINVAL if invalid arguments are provided 28095 * ENOTTY 28096 */ 28097 28098 static int 28099 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28100 { 28101 struct sd_lun *un; 28102 struct uscsi_cmd *com; 28103 struct cdrom_cdda *cdda; 28104 int rval; 28105 size_t buflen; 28106 char cdb[CDB_GROUP5]; 28107 28108 #ifdef _MULTI_DATAMODEL 28109 /* To support ILP32 applications in an LP64 world */ 28110 struct cdrom_cdda32 cdrom_cdda32; 28111 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28112 #endif /* _MULTI_DATAMODEL */ 28113 28114 if (data == NULL) { 28115 return (EINVAL); 28116 } 28117 28118 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28119 return (ENXIO); 28120 } 28121 28122 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28123 28124 #ifdef _MULTI_DATAMODEL 28125 switch (ddi_model_convert_from(flag & FMODELS)) { 28126 case DDI_MODEL_ILP32: 28127 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28128 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28129 "sr_read_cdda: ddi_copyin Failed\n"); 28130 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28131 return (EFAULT); 28132 } 28133 /* Convert the ILP32 uscsi data from the application to LP64 */ 28134 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28135 break; 28136 case DDI_MODEL_NONE: 28137 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28138 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28139 "sr_read_cdda: ddi_copyin Failed\n"); 28140 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28141 return (EFAULT); 28142 } 28143 break; 28144 } 28145 #else /* ! _MULTI_DATAMODEL */ 28146 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28147 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28148 "sr_read_cdda: ddi_copyin Failed\n"); 28149 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28150 return (EFAULT); 28151 } 28152 #endif /* _MULTI_DATAMODEL */ 28153 28154 /* 28155 * Since MMC-2 expects max 3 bytes for length, check if the 28156 * length input is greater than 3 bytes 28157 */ 28158 if ((cdda->cdda_length & 0xFF000000) != 0) { 28159 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28160 "cdrom transfer length too large: %d (limit %d)\n", 28161 cdda->cdda_length, 0xFFFFFF); 28162 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28163 return (EINVAL); 28164 } 28165 28166 switch (cdda->cdda_subcode) { 28167 case CDROM_DA_NO_SUBCODE: 28168 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28169 break; 28170 case CDROM_DA_SUBQ: 28171 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28172 break; 28173 case CDROM_DA_ALL_SUBCODE: 28174 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28175 break; 28176 case CDROM_DA_SUBCODE_ONLY: 28177 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28178 break; 28179 default: 28180 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28181 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28182 cdda->cdda_subcode); 28183 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28184 return (EINVAL); 28185 } 28186 28187 /* Build and send the command */ 28188 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28189 bzero(cdb, CDB_GROUP5); 28190 28191 if (un->un_f_cfg_cdda == TRUE) { 28192 cdb[0] = (char)SCMD_READ_CD; 28193 cdb[1] = 0x04; 28194 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28195 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28196 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28197 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28198 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28199 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28200 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28201 cdb[9] = 0x10; 28202 switch (cdda->cdda_subcode) { 28203 case CDROM_DA_NO_SUBCODE : 28204 cdb[10] = 0x0; 28205 break; 28206 case CDROM_DA_SUBQ : 28207 cdb[10] = 0x2; 28208 break; 28209 case CDROM_DA_ALL_SUBCODE : 28210 cdb[10] = 0x1; 28211 break; 28212 case CDROM_DA_SUBCODE_ONLY : 28213 /* FALLTHROUGH */ 28214 default : 28215 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28216 kmem_free(com, sizeof (*com)); 28217 return (ENOTTY); 28218 } 28219 } else { 28220 cdb[0] = (char)SCMD_READ_CDDA; 28221 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28222 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28223 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28224 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28225 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28226 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28227 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28228 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28229 cdb[10] = cdda->cdda_subcode; 28230 } 28231 28232 com->uscsi_cdb = cdb; 28233 com->uscsi_cdblen = CDB_GROUP5; 28234 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28235 com->uscsi_buflen = buflen; 28236 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28237 28238 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28239 UIO_SYSSPACE, SD_PATH_STANDARD); 28240 28241 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28242 kmem_free(com, sizeof (*com)); 28243 return (rval); 28244 } 28245 28246 28247 /* 28248 * Function: sr_read_cdxa() 28249 * 28250 * Description: This routine is the driver entry point for handling CD-ROM 28251 * ioctl requests to return CD-XA (Extended Architecture) data. 28252 * (CDROMCDXA). 28253 * 28254 * Arguments: dev - the device 'dev_t' 28255 * data - pointer to user provided CD-XA structure specifying 28256 * the data starting address, transfer length, and format 28257 * flag - this argument is a pass through to ddi_copyxxx() 28258 * directly from the mode argument of ioctl(). 28259 * 28260 * Return Code: the code returned by sd_send_scsi_cmd() 28261 * EFAULT if ddi_copyxxx() fails 28262 * ENXIO if fail ddi_get_soft_state 28263 * EINVAL if data pointer is NULL 28264 */ 28265 28266 static int 28267 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28268 { 28269 struct sd_lun *un; 28270 struct uscsi_cmd *com; 28271 struct cdrom_cdxa *cdxa; 28272 int rval; 28273 size_t buflen; 28274 char cdb[CDB_GROUP5]; 28275 uchar_t read_flags; 28276 28277 #ifdef _MULTI_DATAMODEL 28278 /* To support ILP32 applications in an LP64 world */ 28279 struct cdrom_cdxa32 cdrom_cdxa32; 28280 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28281 #endif /* _MULTI_DATAMODEL */ 28282 28283 if (data == NULL) { 28284 return (EINVAL); 28285 } 28286 28287 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28288 return (ENXIO); 28289 } 28290 28291 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28292 28293 #ifdef _MULTI_DATAMODEL 28294 switch (ddi_model_convert_from(flag & FMODELS)) { 28295 case DDI_MODEL_ILP32: 28296 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28297 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28298 return (EFAULT); 28299 } 28300 /* 28301 * Convert the ILP32 uscsi data from the 28302 * application to LP64 for internal use. 28303 */ 28304 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28305 break; 28306 case DDI_MODEL_NONE: 28307 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28308 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28309 return (EFAULT); 28310 } 28311 break; 28312 } 28313 #else /* ! _MULTI_DATAMODEL */ 28314 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28315 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28316 return (EFAULT); 28317 } 28318 #endif /* _MULTI_DATAMODEL */ 28319 28320 /* 28321 * Since MMC-2 expects max 3 bytes for length, check if the 28322 * length input is greater than 3 bytes 28323 */ 28324 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28325 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28326 "cdrom transfer length too large: %d (limit %d)\n", 28327 cdxa->cdxa_length, 0xFFFFFF); 28328 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28329 return (EINVAL); 28330 } 28331 28332 switch (cdxa->cdxa_format) { 28333 case CDROM_XA_DATA: 28334 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28335 read_flags = 0x10; 28336 break; 28337 case CDROM_XA_SECTOR_DATA: 28338 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28339 read_flags = 0xf8; 28340 break; 28341 case CDROM_XA_DATA_W_ERROR: 28342 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28343 read_flags = 0xfc; 28344 break; 28345 default: 28346 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28347 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28348 cdxa->cdxa_format); 28349 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28350 return (EINVAL); 28351 } 28352 28353 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28354 bzero(cdb, CDB_GROUP5); 28355 if (un->un_f_mmc_cap == TRUE) { 28356 cdb[0] = (char)SCMD_READ_CD; 28357 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28358 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28359 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28360 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28361 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28362 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28363 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28364 cdb[9] = (char)read_flags; 28365 } else { 28366 /* 28367 * Note: A vendor specific command (0xDB) is being used her to 28368 * request a read of all subcodes. 28369 */ 28370 cdb[0] = (char)SCMD_READ_CDXA; 28371 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28372 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28373 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28374 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28375 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28376 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28377 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28378 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28379 cdb[10] = cdxa->cdxa_format; 28380 } 28381 com->uscsi_cdb = cdb; 28382 com->uscsi_cdblen = CDB_GROUP5; 28383 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28384 com->uscsi_buflen = buflen; 28385 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28386 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28387 UIO_SYSSPACE, SD_PATH_STANDARD); 28388 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28389 kmem_free(com, sizeof (*com)); 28390 return (rval); 28391 } 28392 28393 28394 /* 28395 * Function: sr_eject() 28396 * 28397 * Description: This routine is the driver entry point for handling CD-ROM 28398 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28399 * 28400 * Arguments: dev - the device 'dev_t' 28401 * 28402 * Return Code: the code returned by sd_send_scsi_cmd() 28403 */ 28404 28405 static int 28406 sr_eject(dev_t dev) 28407 { 28408 struct sd_lun *un; 28409 int rval; 28410 28411 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28412 (un->un_state == SD_STATE_OFFLINE)) { 28413 return (ENXIO); 28414 } 28415 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 28416 SD_PATH_STANDARD)) != 0) { 28417 return (rval); 28418 } 28419 28420 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 28421 SD_PATH_STANDARD); 28422 28423 if (rval == 0) { 28424 mutex_enter(SD_MUTEX(un)); 28425 sr_ejected(un); 28426 un->un_mediastate = DKIO_EJECTED; 28427 cv_broadcast(&un->un_state_cv); 28428 mutex_exit(SD_MUTEX(un)); 28429 } 28430 return (rval); 28431 } 28432 28433 28434 /* 28435 * Function: sr_ejected() 28436 * 28437 * Description: This routine updates the soft state structure to invalidate the 28438 * geometry information after the media has been ejected or a 28439 * media eject has been detected. 28440 * 28441 * Arguments: un - driver soft state (unit) structure 28442 */ 28443 28444 static void 28445 sr_ejected(struct sd_lun *un) 28446 { 28447 struct sd_errstats *stp; 28448 28449 ASSERT(un != NULL); 28450 ASSERT(mutex_owned(SD_MUTEX(un))); 28451 28452 un->un_f_blockcount_is_valid = FALSE; 28453 un->un_f_tgt_blocksize_is_valid = FALSE; 28454 un->un_f_geometry_is_valid = FALSE; 28455 28456 if (un->un_errstats != NULL) { 28457 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28458 stp->sd_capacity.value.ui64 = 0; 28459 } 28460 } 28461 28462 28463 /* 28464 * Function: sr_check_wp() 28465 * 28466 * Description: This routine checks the write protection of a removable media 28467 * disk via the write protect bit of the Mode Page Header device 28468 * specific field. This routine has been implemented to use the 28469 * error recovery mode page for all device types. 28470 * Note: In the future use a sd_send_scsi_MODE_SENSE() routine 28471 * 28472 * Arguments: dev - the device 'dev_t' 28473 * 28474 * Return Code: int indicating if the device is write protected (1) or not (0) 28475 * 28476 * Context: Kernel thread. 28477 * 28478 */ 28479 28480 static int 28481 sr_check_wp(dev_t dev) 28482 { 28483 struct sd_lun *un; 28484 uchar_t device_specific; 28485 uchar_t *sense; 28486 int hdrlen; 28487 int rval; 28488 int retry_flag = FALSE; 28489 28490 /* 28491 * Note: The return codes for this routine should be reworked to 28492 * properly handle the case of a NULL softstate. 28493 */ 28494 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28495 return (FALSE); 28496 } 28497 28498 if (un->un_f_cfg_is_atapi == TRUE) { 28499 retry_flag = TRUE; 28500 } 28501 28502 retry: 28503 if (un->un_f_cfg_is_atapi == TRUE) { 28504 /* 28505 * The mode page contents are not required; set the allocation 28506 * length for the mode page header only 28507 */ 28508 hdrlen = MODE_HEADER_LENGTH_GRP2; 28509 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28510 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 28511 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28512 device_specific = 28513 ((struct mode_header_grp2 *)sense)->device_specific; 28514 } else { 28515 hdrlen = MODE_HEADER_LENGTH; 28516 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28517 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 28518 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28519 device_specific = 28520 ((struct mode_header *)sense)->device_specific; 28521 } 28522 28523 if (rval != 0) { 28524 if ((un->un_f_cfg_is_atapi == TRUE) && (retry_flag)) { 28525 /* 28526 * For an Atapi Zip drive, observed the drive 28527 * reporting check condition for the first attempt. 28528 * Sense data indicating power on or bus device/reset. 28529 * Hence in case of failure need to try at least once 28530 * for Atapi devices. 28531 */ 28532 retry_flag = FALSE; 28533 kmem_free(sense, hdrlen); 28534 goto retry; 28535 } else { 28536 /* 28537 * Write protect mode sense failed; not all disks 28538 * understand this query. Return FALSE assuming that 28539 * these devices are not writable. 28540 */ 28541 rval = FALSE; 28542 } 28543 } else { 28544 if (device_specific & WRITE_PROTECT) { 28545 rval = TRUE; 28546 } else { 28547 rval = FALSE; 28548 } 28549 } 28550 kmem_free(sense, hdrlen); 28551 return (rval); 28552 } 28553 28554 28555 /* 28556 * Function: sr_volume_ctrl() 28557 * 28558 * Description: This routine is the driver entry point for handling CD-ROM 28559 * audio output volume ioctl requests. (CDROMVOLCTRL) 28560 * 28561 * Arguments: dev - the device 'dev_t' 28562 * data - pointer to user audio volume control structure 28563 * flag - this argument is a pass through to ddi_copyxxx() 28564 * directly from the mode argument of ioctl(). 28565 * 28566 * Return Code: the code returned by sd_send_scsi_cmd() 28567 * EFAULT if ddi_copyxxx() fails 28568 * ENXIO if fail ddi_get_soft_state 28569 * EINVAL if data pointer is NULL 28570 * 28571 */ 28572 28573 static int 28574 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28575 { 28576 struct sd_lun *un; 28577 struct cdrom_volctrl volume; 28578 struct cdrom_volctrl *vol = &volume; 28579 uchar_t *sense_page; 28580 uchar_t *select_page; 28581 uchar_t *sense; 28582 uchar_t *select; 28583 int sense_buflen; 28584 int select_buflen; 28585 int rval; 28586 28587 if (data == NULL) { 28588 return (EINVAL); 28589 } 28590 28591 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28592 (un->un_state == SD_STATE_OFFLINE)) { 28593 return (ENXIO); 28594 } 28595 28596 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28597 return (EFAULT); 28598 } 28599 28600 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28601 struct mode_header_grp2 *sense_mhp; 28602 struct mode_header_grp2 *select_mhp; 28603 int bd_len; 28604 28605 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28606 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28607 MODEPAGE_AUDIO_CTRL_LEN; 28608 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28609 select = kmem_zalloc(select_buflen, KM_SLEEP); 28610 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 28611 sense_buflen, MODEPAGE_AUDIO_CTRL, 28612 SD_PATH_STANDARD)) != 0) { 28613 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28614 "sr_volume_ctrl: Mode Sense Failed\n"); 28615 kmem_free(sense, sense_buflen); 28616 kmem_free(select, select_buflen); 28617 return (rval); 28618 } 28619 sense_mhp = (struct mode_header_grp2 *)sense; 28620 select_mhp = (struct mode_header_grp2 *)select; 28621 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28622 sense_mhp->bdesc_length_lo; 28623 if (bd_len > MODE_BLK_DESC_LENGTH) { 28624 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28625 "sr_volume_ctrl: Mode Sense returned invalid " 28626 "block descriptor length\n"); 28627 kmem_free(sense, sense_buflen); 28628 kmem_free(select, select_buflen); 28629 return (EIO); 28630 } 28631 sense_page = (uchar_t *) 28632 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28633 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28634 select_mhp->length_msb = 0; 28635 select_mhp->length_lsb = 0; 28636 select_mhp->bdesc_length_hi = 0; 28637 select_mhp->bdesc_length_lo = 0; 28638 } else { 28639 struct mode_header *sense_mhp, *select_mhp; 28640 28641 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28642 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28643 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28644 select = kmem_zalloc(select_buflen, KM_SLEEP); 28645 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 28646 sense_buflen, MODEPAGE_AUDIO_CTRL, 28647 SD_PATH_STANDARD)) != 0) { 28648 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28649 "sr_volume_ctrl: Mode Sense Failed\n"); 28650 kmem_free(sense, sense_buflen); 28651 kmem_free(select, select_buflen); 28652 return (rval); 28653 } 28654 sense_mhp = (struct mode_header *)sense; 28655 select_mhp = (struct mode_header *)select; 28656 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28657 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28658 "sr_volume_ctrl: Mode Sense returned invalid " 28659 "block descriptor length\n"); 28660 kmem_free(sense, sense_buflen); 28661 kmem_free(select, select_buflen); 28662 return (EIO); 28663 } 28664 sense_page = (uchar_t *) 28665 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28666 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28667 select_mhp->length = 0; 28668 select_mhp->bdesc_length = 0; 28669 } 28670 /* 28671 * Note: An audio control data structure could be created and overlayed 28672 * on the following in place of the array indexing method implemented. 28673 */ 28674 28675 /* Build the select data for the user volume data */ 28676 select_page[0] = MODEPAGE_AUDIO_CTRL; 28677 select_page[1] = 0xE; 28678 /* Set the immediate bit */ 28679 select_page[2] = 0x04; 28680 /* Zero out reserved fields */ 28681 select_page[3] = 0x00; 28682 select_page[4] = 0x00; 28683 /* Return sense data for fields not to be modified */ 28684 select_page[5] = sense_page[5]; 28685 select_page[6] = sense_page[6]; 28686 select_page[7] = sense_page[7]; 28687 /* Set the user specified volume levels for channel 0 and 1 */ 28688 select_page[8] = 0x01; 28689 select_page[9] = vol->channel0; 28690 select_page[10] = 0x02; 28691 select_page[11] = vol->channel1; 28692 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28693 select_page[12] = sense_page[12]; 28694 select_page[13] = sense_page[13]; 28695 select_page[14] = sense_page[14]; 28696 select_page[15] = sense_page[15]; 28697 28698 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28699 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 28700 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28701 } else { 28702 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 28703 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28704 } 28705 28706 kmem_free(sense, sense_buflen); 28707 kmem_free(select, select_buflen); 28708 return (rval); 28709 } 28710 28711 28712 /* 28713 * Function: sr_read_sony_session_offset() 28714 * 28715 * Description: This routine is the driver entry point for handling CD-ROM 28716 * ioctl requests for session offset information. (CDROMREADOFFSET) 28717 * The address of the first track in the last session of a 28718 * multi-session CD-ROM is returned 28719 * 28720 * Note: This routine uses a vendor specific key value in the 28721 * command control field without implementing any vendor check here 28722 * or in the ioctl routine. 28723 * 28724 * Arguments: dev - the device 'dev_t' 28725 * data - pointer to an int to hold the requested address 28726 * flag - this argument is a pass through to ddi_copyxxx() 28727 * directly from the mode argument of ioctl(). 28728 * 28729 * Return Code: the code returned by sd_send_scsi_cmd() 28730 * EFAULT if ddi_copyxxx() fails 28731 * ENXIO if fail ddi_get_soft_state 28732 * EINVAL if data pointer is NULL 28733 */ 28734 28735 static int 28736 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28737 { 28738 struct sd_lun *un; 28739 struct uscsi_cmd *com; 28740 caddr_t buffer; 28741 char cdb[CDB_GROUP1]; 28742 int session_offset = 0; 28743 int rval; 28744 28745 if (data == NULL) { 28746 return (EINVAL); 28747 } 28748 28749 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28750 (un->un_state == SD_STATE_OFFLINE)) { 28751 return (ENXIO); 28752 } 28753 28754 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28755 bzero(cdb, CDB_GROUP1); 28756 cdb[0] = SCMD_READ_TOC; 28757 /* 28758 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28759 * (4 byte TOC response header + 8 byte response data) 28760 */ 28761 cdb[8] = SONY_SESSION_OFFSET_LEN; 28762 /* Byte 9 is the control byte. A vendor specific value is used */ 28763 cdb[9] = SONY_SESSION_OFFSET_KEY; 28764 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28765 com->uscsi_cdb = cdb; 28766 com->uscsi_cdblen = CDB_GROUP1; 28767 com->uscsi_bufaddr = buffer; 28768 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28769 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28770 28771 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 28772 UIO_SYSSPACE, SD_PATH_STANDARD); 28773 if (rval != 0) { 28774 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28775 kmem_free(com, sizeof (*com)); 28776 return (rval); 28777 } 28778 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28779 session_offset = 28780 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28781 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28782 /* 28783 * Offset returned offset in current lbasize block's. Convert to 28784 * 2k block's to return to the user 28785 */ 28786 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28787 session_offset >>= 2; 28788 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28789 session_offset >>= 1; 28790 } 28791 } 28792 28793 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28794 rval = EFAULT; 28795 } 28796 28797 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28798 kmem_free(com, sizeof (*com)); 28799 return (rval); 28800 } 28801 28802 28803 /* 28804 * Function: sd_wm_cache_constructor() 28805 * 28806 * Description: Cache Constructor for the wmap cache for the read/modify/write 28807 * devices. 28808 * 28809 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28810 * un - sd_lun structure for the device. 28811 * flag - the km flags passed to constructor 28812 * 28813 * Return Code: 0 on success. 28814 * -1 on failure. 28815 */ 28816 28817 /*ARGSUSED*/ 28818 static int 28819 sd_wm_cache_constructor(void *wm, void *un, int flags) 28820 { 28821 bzero(wm, sizeof (struct sd_w_map)); 28822 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28823 return (0); 28824 } 28825 28826 28827 /* 28828 * Function: sd_wm_cache_destructor() 28829 * 28830 * Description: Cache destructor for the wmap cache for the read/modify/write 28831 * devices. 28832 * 28833 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28834 * un - sd_lun structure for the device. 28835 */ 28836 /*ARGSUSED*/ 28837 static void 28838 sd_wm_cache_destructor(void *wm, void *un) 28839 { 28840 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28841 } 28842 28843 28844 /* 28845 * Function: sd_range_lock() 28846 * 28847 * Description: Lock the range of blocks specified as parameter to ensure 28848 * that read, modify write is atomic and no other i/o writes 28849 * to the same location. The range is specified in terms 28850 * of start and end blocks. Block numbers are the actual 28851 * media block numbers and not system. 28852 * 28853 * Arguments: un - sd_lun structure for the device. 28854 * startb - The starting block number 28855 * endb - The end block number 28856 * typ - type of i/o - simple/read_modify_write 28857 * 28858 * Return Code: wm - pointer to the wmap structure. 28859 * 28860 * Context: This routine can sleep. 28861 */ 28862 28863 static struct sd_w_map * 28864 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28865 { 28866 struct sd_w_map *wmp = NULL; 28867 struct sd_w_map *sl_wmp = NULL; 28868 struct sd_w_map *tmp_wmp; 28869 wm_state state = SD_WM_CHK_LIST; 28870 28871 28872 ASSERT(un != NULL); 28873 ASSERT(!mutex_owned(SD_MUTEX(un))); 28874 28875 mutex_enter(SD_MUTEX(un)); 28876 28877 while (state != SD_WM_DONE) { 28878 28879 switch (state) { 28880 case SD_WM_CHK_LIST: 28881 /* 28882 * This is the starting state. Check the wmap list 28883 * to see if the range is currently available. 28884 */ 28885 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28886 /* 28887 * If this is a simple write and no rmw 28888 * i/o is pending then try to lock the 28889 * range as the range should be available. 28890 */ 28891 state = SD_WM_LOCK_RANGE; 28892 } else { 28893 tmp_wmp = sd_get_range(un, startb, endb); 28894 if (tmp_wmp != NULL) { 28895 if ((wmp != NULL) && ONLIST(un, wmp)) { 28896 /* 28897 * Should not keep onlist wmps 28898 * while waiting this macro 28899 * will also do wmp = NULL; 28900 */ 28901 FREE_ONLIST_WMAP(un, wmp); 28902 } 28903 /* 28904 * sl_wmp is the wmap on which wait 28905 * is done, since the tmp_wmp points 28906 * to the inuse wmap, set sl_wmp to 28907 * tmp_wmp and change the state to sleep 28908 */ 28909 sl_wmp = tmp_wmp; 28910 state = SD_WM_WAIT_MAP; 28911 } else { 28912 state = SD_WM_LOCK_RANGE; 28913 } 28914 28915 } 28916 break; 28917 28918 case SD_WM_LOCK_RANGE: 28919 ASSERT(un->un_wm_cache); 28920 /* 28921 * The range need to be locked, try to get a wmap. 28922 * First attempt it with NO_SLEEP, want to avoid a sleep 28923 * if possible as we will have to release the sd mutex 28924 * if we have to sleep. 28925 */ 28926 if (wmp == NULL) 28927 wmp = kmem_cache_alloc(un->un_wm_cache, 28928 KM_NOSLEEP); 28929 if (wmp == NULL) { 28930 mutex_exit(SD_MUTEX(un)); 28931 _NOTE(DATA_READABLE_WITHOUT_LOCK 28932 (sd_lun::un_wm_cache)) 28933 wmp = kmem_cache_alloc(un->un_wm_cache, 28934 KM_SLEEP); 28935 mutex_enter(SD_MUTEX(un)); 28936 /* 28937 * we released the mutex so recheck and go to 28938 * check list state. 28939 */ 28940 state = SD_WM_CHK_LIST; 28941 } else { 28942 /* 28943 * We exit out of state machine since we 28944 * have the wmap. Do the housekeeping first. 28945 * place the wmap on the wmap list if it is not 28946 * on it already and then set the state to done. 28947 */ 28948 wmp->wm_start = startb; 28949 wmp->wm_end = endb; 28950 wmp->wm_flags = typ | SD_WM_BUSY; 28951 if (typ & SD_WTYPE_RMW) { 28952 un->un_rmw_count++; 28953 } 28954 /* 28955 * If not already on the list then link 28956 */ 28957 if (!ONLIST(un, wmp)) { 28958 wmp->wm_next = un->un_wm; 28959 wmp->wm_prev = NULL; 28960 if (wmp->wm_next) 28961 wmp->wm_next->wm_prev = wmp; 28962 un->un_wm = wmp; 28963 } 28964 state = SD_WM_DONE; 28965 } 28966 break; 28967 28968 case SD_WM_WAIT_MAP: 28969 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28970 /* 28971 * Wait is done on sl_wmp, which is set in the 28972 * check_list state. 28973 */ 28974 sl_wmp->wm_wanted_count++; 28975 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28976 sl_wmp->wm_wanted_count--; 28977 if (!(sl_wmp->wm_flags & SD_WM_BUSY)) { 28978 if (wmp != NULL) 28979 CHK_N_FREEWMP(un, wmp); 28980 wmp = sl_wmp; 28981 } 28982 sl_wmp = NULL; 28983 /* 28984 * After waking up, need to recheck for availability of 28985 * range. 28986 */ 28987 state = SD_WM_CHK_LIST; 28988 break; 28989 28990 default: 28991 panic("sd_range_lock: " 28992 "Unknown state %d in sd_range_lock", state); 28993 /*NOTREACHED*/ 28994 } /* switch(state) */ 28995 28996 } /* while(state != SD_WM_DONE) */ 28997 28998 mutex_exit(SD_MUTEX(un)); 28999 29000 ASSERT(wmp != NULL); 29001 29002 return (wmp); 29003 } 29004 29005 29006 /* 29007 * Function: sd_get_range() 29008 * 29009 * Description: Find if there any overlapping I/O to this one 29010 * Returns the write-map of 1st such I/O, NULL otherwise. 29011 * 29012 * Arguments: un - sd_lun structure for the device. 29013 * startb - The starting block number 29014 * endb - The end block number 29015 * 29016 * Return Code: wm - pointer to the wmap structure. 29017 */ 29018 29019 static struct sd_w_map * 29020 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29021 { 29022 struct sd_w_map *wmp; 29023 29024 ASSERT(un != NULL); 29025 29026 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29027 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29028 continue; 29029 } 29030 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29031 break; 29032 } 29033 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29034 break; 29035 } 29036 } 29037 29038 return (wmp); 29039 } 29040 29041 29042 /* 29043 * Function: sd_free_inlist_wmap() 29044 * 29045 * Description: Unlink and free a write map struct. 29046 * 29047 * Arguments: un - sd_lun structure for the device. 29048 * wmp - sd_w_map which needs to be unlinked. 29049 */ 29050 29051 static void 29052 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29053 { 29054 ASSERT(un != NULL); 29055 29056 if (un->un_wm == wmp) { 29057 un->un_wm = wmp->wm_next; 29058 } else { 29059 wmp->wm_prev->wm_next = wmp->wm_next; 29060 } 29061 29062 if (wmp->wm_next) { 29063 wmp->wm_next->wm_prev = wmp->wm_prev; 29064 } 29065 29066 wmp->wm_next = wmp->wm_prev = NULL; 29067 29068 kmem_cache_free(un->un_wm_cache, wmp); 29069 } 29070 29071 29072 /* 29073 * Function: sd_range_unlock() 29074 * 29075 * Description: Unlock the range locked by wm. 29076 * Free write map if nobody else is waiting on it. 29077 * 29078 * Arguments: un - sd_lun structure for the device. 29079 * wmp - sd_w_map which needs to be unlinked. 29080 */ 29081 29082 static void 29083 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29084 { 29085 ASSERT(un != NULL); 29086 ASSERT(wm != NULL); 29087 ASSERT(!mutex_owned(SD_MUTEX(un))); 29088 29089 mutex_enter(SD_MUTEX(un)); 29090 29091 if (wm->wm_flags & SD_WTYPE_RMW) { 29092 un->un_rmw_count--; 29093 } 29094 29095 if (wm->wm_wanted_count) { 29096 wm->wm_flags = 0; 29097 /* 29098 * Broadcast that the wmap is available now. 29099 */ 29100 cv_broadcast(&wm->wm_avail); 29101 } else { 29102 /* 29103 * If no one is waiting on the map, it should be free'ed. 29104 */ 29105 sd_free_inlist_wmap(un, wm); 29106 } 29107 29108 mutex_exit(SD_MUTEX(un)); 29109 } 29110 29111 29112 /* 29113 * Function: sd_read_modify_write_task 29114 * 29115 * Description: Called from a taskq thread to initiate the write phase of 29116 * a read-modify-write request. This is used for targets where 29117 * un->un_sys_blocksize != un->un_tgt_blocksize. 29118 * 29119 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29120 * 29121 * Context: Called under taskq thread context. 29122 */ 29123 29124 static void 29125 sd_read_modify_write_task(void *arg) 29126 { 29127 struct sd_mapblocksize_info *bsp; 29128 struct buf *bp; 29129 struct sd_xbuf *xp; 29130 struct sd_lun *un; 29131 29132 bp = arg; /* The bp is given in arg */ 29133 ASSERT(bp != NULL); 29134 29135 /* Get the pointer to the layer-private data struct */ 29136 xp = SD_GET_XBUF(bp); 29137 ASSERT(xp != NULL); 29138 bsp = xp->xb_private; 29139 ASSERT(bsp != NULL); 29140 29141 un = SD_GET_UN(bp); 29142 ASSERT(un != NULL); 29143 ASSERT(!mutex_owned(SD_MUTEX(un))); 29144 29145 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29146 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29147 29148 /* 29149 * This is the write phase of a read-modify-write request, called 29150 * under the context of a taskq thread in response to the completion 29151 * of the read portion of the rmw request completing under interrupt 29152 * context. The write request must be sent from here down the iostart 29153 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29154 * we use the layer index saved in the layer-private data area. 29155 */ 29156 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29157 29158 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29159 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29160 } 29161 29162 29163 /* 29164 * Function: sddump_do_read_of_rmw() 29165 * 29166 * Description: This routine will be called from sddump, If sddump is called 29167 * with an I/O which not aligned on device blocksize boundary 29168 * then the write has to be converted to read-modify-write. 29169 * Do the read part here in order to keep sddump simple. 29170 * Note - That the sd_mutex is held across the call to this 29171 * routine. 29172 * 29173 * Arguments: un - sd_lun 29174 * blkno - block number in terms of media block size. 29175 * nblk - number of blocks. 29176 * bpp - pointer to pointer to the buf structure. On return 29177 * from this function, *bpp points to the valid buffer 29178 * to which the write has to be done. 29179 * 29180 * Return Code: 0 for success or errno-type return code 29181 */ 29182 29183 static int 29184 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29185 struct buf **bpp) 29186 { 29187 int err; 29188 int i; 29189 int rval; 29190 struct buf *bp; 29191 struct scsi_pkt *pkt = NULL; 29192 uint32_t target_blocksize; 29193 29194 ASSERT(un != NULL); 29195 ASSERT(mutex_owned(SD_MUTEX(un))); 29196 29197 target_blocksize = un->un_tgt_blocksize; 29198 29199 mutex_exit(SD_MUTEX(un)); 29200 29201 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29202 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29203 if (bp == NULL) { 29204 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29205 "no resources for dumping; giving up"); 29206 err = ENOMEM; 29207 goto done; 29208 } 29209 29210 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29211 blkno, nblk); 29212 if (rval != 0) { 29213 scsi_free_consistent_buf(bp); 29214 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29215 "no resources for dumping; giving up"); 29216 err = ENOMEM; 29217 goto done; 29218 } 29219 29220 pkt->pkt_flags |= FLAG_NOINTR; 29221 29222 err = EIO; 29223 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29224 29225 /* 29226 * Scsi_poll returns 0 (success) if the command completes and 29227 * the status block is STATUS_GOOD. We should only check 29228 * errors if this condition is not true. Even then we should 29229 * send our own request sense packet only if we have a check 29230 * condition and auto request sense has not been performed by 29231 * the hba. 29232 */ 29233 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29234 29235 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29236 err = 0; 29237 break; 29238 } 29239 29240 /* 29241 * Check CMD_DEV_GONE 1st, give up if device is gone, 29242 * no need to read RQS data. 29243 */ 29244 if (pkt->pkt_reason == CMD_DEV_GONE) { 29245 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29246 "Device is gone\n"); 29247 break; 29248 } 29249 29250 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29251 SD_INFO(SD_LOG_DUMP, un, 29252 "sddump: read failed with CHECK, try # %d\n", i); 29253 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29254 (void) sd_send_polled_RQS(un); 29255 } 29256 29257 continue; 29258 } 29259 29260 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29261 int reset_retval = 0; 29262 29263 SD_INFO(SD_LOG_DUMP, un, 29264 "sddump: read failed with BUSY, try # %d\n", i); 29265 29266 if (un->un_f_lun_reset_enabled == TRUE) { 29267 reset_retval = scsi_reset(SD_ADDRESS(un), 29268 RESET_LUN); 29269 } 29270 if (reset_retval == 0) { 29271 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29272 } 29273 (void) sd_send_polled_RQS(un); 29274 29275 } else { 29276 SD_INFO(SD_LOG_DUMP, un, 29277 "sddump: read failed with 0x%x, try # %d\n", 29278 SD_GET_PKT_STATUS(pkt), i); 29279 mutex_enter(SD_MUTEX(un)); 29280 sd_reset_target(un, pkt); 29281 mutex_exit(SD_MUTEX(un)); 29282 } 29283 29284 /* 29285 * If we are not getting anywhere with lun/target resets, 29286 * let's reset the bus. 29287 */ 29288 if (i > SD_NDUMP_RETRIES/2) { 29289 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29290 (void) sd_send_polled_RQS(un); 29291 } 29292 29293 } 29294 scsi_destroy_pkt(pkt); 29295 29296 if (err != 0) { 29297 scsi_free_consistent_buf(bp); 29298 *bpp = NULL; 29299 } else { 29300 *bpp = bp; 29301 } 29302 29303 done: 29304 mutex_enter(SD_MUTEX(un)); 29305 return (err); 29306 } 29307 29308 29309 /* 29310 * Function: sd_failfast_flushq 29311 * 29312 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29313 * in b_flags and move them onto the failfast queue, then kick 29314 * off a thread to return all bp's on the failfast queue to 29315 * their owners with an error set. 29316 * 29317 * Arguments: un - pointer to the soft state struct for the instance. 29318 * 29319 * Context: may execute in interrupt context. 29320 */ 29321 29322 static void 29323 sd_failfast_flushq(struct sd_lun *un) 29324 { 29325 struct buf *bp; 29326 struct buf *next_waitq_bp; 29327 struct buf *prev_waitq_bp = NULL; 29328 29329 ASSERT(un != NULL); 29330 ASSERT(mutex_owned(SD_MUTEX(un))); 29331 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29332 ASSERT(un->un_failfast_bp == NULL); 29333 29334 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29335 "sd_failfast_flushq: entry: un:0x%p\n", un); 29336 29337 /* 29338 * Check if we should flush all bufs when entering failfast state, or 29339 * just those with B_FAILFAST set. 29340 */ 29341 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29342 /* 29343 * Move *all* bp's on the wait queue to the failfast flush 29344 * queue, including those that do NOT have B_FAILFAST set. 29345 */ 29346 if (un->un_failfast_headp == NULL) { 29347 ASSERT(un->un_failfast_tailp == NULL); 29348 un->un_failfast_headp = un->un_waitq_headp; 29349 } else { 29350 ASSERT(un->un_failfast_tailp != NULL); 29351 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29352 } 29353 29354 un->un_failfast_tailp = un->un_waitq_tailp; 29355 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29356 29357 } else { 29358 /* 29359 * Go thru the wait queue, pick off all entries with 29360 * B_FAILFAST set, and move these onto the failfast queue. 29361 */ 29362 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29363 /* 29364 * Save the pointer to the next bp on the wait queue, 29365 * so we get to it on the next iteration of this loop. 29366 */ 29367 next_waitq_bp = bp->av_forw; 29368 29369 /* 29370 * If this bp from the wait queue does NOT have 29371 * B_FAILFAST set, just move on to the next element 29372 * in the wait queue. Note, this is the only place 29373 * where it is correct to set prev_waitq_bp. 29374 */ 29375 if ((bp->b_flags & B_FAILFAST) == 0) { 29376 prev_waitq_bp = bp; 29377 continue; 29378 } 29379 29380 /* 29381 * Remove the bp from the wait queue. 29382 */ 29383 if (bp == un->un_waitq_headp) { 29384 /* The bp is the first element of the waitq. */ 29385 un->un_waitq_headp = next_waitq_bp; 29386 if (un->un_waitq_headp == NULL) { 29387 /* The wait queue is now empty */ 29388 un->un_waitq_tailp = NULL; 29389 } 29390 } else { 29391 /* 29392 * The bp is either somewhere in the middle 29393 * or at the end of the wait queue. 29394 */ 29395 ASSERT(un->un_waitq_headp != NULL); 29396 ASSERT(prev_waitq_bp != NULL); 29397 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29398 == 0); 29399 if (bp == un->un_waitq_tailp) { 29400 /* bp is the last entry on the waitq. */ 29401 ASSERT(next_waitq_bp == NULL); 29402 un->un_waitq_tailp = prev_waitq_bp; 29403 } 29404 prev_waitq_bp->av_forw = next_waitq_bp; 29405 } 29406 bp->av_forw = NULL; 29407 29408 /* 29409 * Now put the bp onto the failfast queue. 29410 */ 29411 if (un->un_failfast_headp == NULL) { 29412 /* failfast queue is currently empty */ 29413 ASSERT(un->un_failfast_tailp == NULL); 29414 un->un_failfast_headp = 29415 un->un_failfast_tailp = bp; 29416 } else { 29417 /* Add the bp to the end of the failfast q */ 29418 ASSERT(un->un_failfast_tailp != NULL); 29419 ASSERT(un->un_failfast_tailp->b_flags & 29420 B_FAILFAST); 29421 un->un_failfast_tailp->av_forw = bp; 29422 un->un_failfast_tailp = bp; 29423 } 29424 } 29425 } 29426 29427 /* 29428 * Now return all bp's on the failfast queue to their owners. 29429 */ 29430 while ((bp = un->un_failfast_headp) != NULL) { 29431 29432 un->un_failfast_headp = bp->av_forw; 29433 if (un->un_failfast_headp == NULL) { 29434 un->un_failfast_tailp = NULL; 29435 } 29436 29437 /* 29438 * We want to return the bp with a failure error code, but 29439 * we do not want a call to sd_start_cmds() to occur here, 29440 * so use sd_return_failed_command_no_restart() instead of 29441 * sd_return_failed_command(). 29442 */ 29443 sd_return_failed_command_no_restart(un, bp, EIO); 29444 } 29445 29446 /* Flush the xbuf queues if required. */ 29447 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29448 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29449 } 29450 29451 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29452 "sd_failfast_flushq: exit: un:0x%p\n", un); 29453 } 29454 29455 29456 /* 29457 * Function: sd_failfast_flushq_callback 29458 * 29459 * Description: Return TRUE if the given bp meets the criteria for failfast 29460 * flushing. Used with ddi_xbuf_flushq(9F). 29461 * 29462 * Arguments: bp - ptr to buf struct to be examined. 29463 * 29464 * Context: Any 29465 */ 29466 29467 static int 29468 sd_failfast_flushq_callback(struct buf *bp) 29469 { 29470 /* 29471 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29472 * state is entered; OR (2) the given bp has B_FAILFAST set. 29473 */ 29474 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29475 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29476 } 29477 29478 29479 29480 #if defined(__i386) || defined(__amd64) 29481 /* 29482 * Function: sd_setup_next_xfer 29483 * 29484 * Description: Prepare next I/O operation using DMA_PARTIAL 29485 * 29486 */ 29487 29488 static int 29489 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29490 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29491 { 29492 ssize_t num_blks_not_xfered; 29493 daddr_t strt_blk_num; 29494 ssize_t bytes_not_xfered; 29495 int rval; 29496 29497 ASSERT(pkt->pkt_resid == 0); 29498 29499 /* 29500 * Calculate next block number and amount to be transferred. 29501 * 29502 * How much data NOT transfered to the HBA yet. 29503 */ 29504 bytes_not_xfered = xp->xb_dma_resid; 29505 29506 /* 29507 * figure how many blocks NOT transfered to the HBA yet. 29508 */ 29509 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29510 29511 /* 29512 * set starting block number to the end of what WAS transfered. 29513 */ 29514 strt_blk_num = xp->xb_blkno + 29515 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29516 29517 /* 29518 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29519 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29520 * the disk mutex here. 29521 */ 29522 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29523 strt_blk_num, num_blks_not_xfered); 29524 29525 if (rval == 0) { 29526 29527 /* 29528 * Success. 29529 * 29530 * Adjust things if there are still more blocks to be 29531 * transfered. 29532 */ 29533 xp->xb_dma_resid = pkt->pkt_resid; 29534 pkt->pkt_resid = 0; 29535 29536 return (1); 29537 } 29538 29539 /* 29540 * There's really only one possible return value from 29541 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29542 * returns NULL. 29543 */ 29544 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29545 29546 bp->b_resid = bp->b_bcount; 29547 bp->b_flags |= B_ERROR; 29548 29549 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29550 "Error setting up next portion of DMA transfer\n"); 29551 29552 return (0); 29553 } 29554 #endif 29555 29556 /* 29557 * Note: The following sd_faultinjection_ioctl( ) routines implement 29558 * driver support for handling fault injection for error analysis 29559 * causing faults in multiple layers of the driver. 29560 * 29561 */ 29562 29563 #ifdef SD_FAULT_INJECTION 29564 static uint_t sd_fault_injection_on = 0; 29565 29566 /* 29567 * Function: sd_faultinjection_ioctl() 29568 * 29569 * Description: This routine is the driver entry point for handling 29570 * faultinjection ioctls to inject errors into the 29571 * layer model 29572 * 29573 * Arguments: cmd - the ioctl cmd recieved 29574 * arg - the arguments from user and returns 29575 */ 29576 29577 static void 29578 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29579 29580 uint_t i; 29581 uint_t rval; 29582 29583 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29584 29585 mutex_enter(SD_MUTEX(un)); 29586 29587 switch (cmd) { 29588 case SDIOCRUN: 29589 /* Allow pushed faults to be injected */ 29590 SD_INFO(SD_LOG_SDTEST, un, 29591 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29592 29593 sd_fault_injection_on = 1; 29594 29595 SD_INFO(SD_LOG_IOERR, un, 29596 "sd_faultinjection_ioctl: run finished\n"); 29597 break; 29598 29599 case SDIOCSTART: 29600 /* Start Injection Session */ 29601 SD_INFO(SD_LOG_SDTEST, un, 29602 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29603 29604 sd_fault_injection_on = 0; 29605 un->sd_injection_mask = 0xFFFFFFFF; 29606 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29607 un->sd_fi_fifo_pkt[i] = NULL; 29608 un->sd_fi_fifo_xb[i] = NULL; 29609 un->sd_fi_fifo_un[i] = NULL; 29610 un->sd_fi_fifo_arq[i] = NULL; 29611 } 29612 un->sd_fi_fifo_start = 0; 29613 un->sd_fi_fifo_end = 0; 29614 29615 mutex_enter(&(un->un_fi_mutex)); 29616 un->sd_fi_log[0] = '\0'; 29617 un->sd_fi_buf_len = 0; 29618 mutex_exit(&(un->un_fi_mutex)); 29619 29620 SD_INFO(SD_LOG_IOERR, un, 29621 "sd_faultinjection_ioctl: start finished\n"); 29622 break; 29623 29624 case SDIOCSTOP: 29625 /* Stop Injection Session */ 29626 SD_INFO(SD_LOG_SDTEST, un, 29627 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29628 sd_fault_injection_on = 0; 29629 un->sd_injection_mask = 0x0; 29630 29631 /* Empty stray or unuseds structs from fifo */ 29632 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29633 if (un->sd_fi_fifo_pkt[i] != NULL) { 29634 kmem_free(un->sd_fi_fifo_pkt[i], 29635 sizeof (struct sd_fi_pkt)); 29636 } 29637 if (un->sd_fi_fifo_xb[i] != NULL) { 29638 kmem_free(un->sd_fi_fifo_xb[i], 29639 sizeof (struct sd_fi_xb)); 29640 } 29641 if (un->sd_fi_fifo_un[i] != NULL) { 29642 kmem_free(un->sd_fi_fifo_un[i], 29643 sizeof (struct sd_fi_un)); 29644 } 29645 if (un->sd_fi_fifo_arq[i] != NULL) { 29646 kmem_free(un->sd_fi_fifo_arq[i], 29647 sizeof (struct sd_fi_arq)); 29648 } 29649 un->sd_fi_fifo_pkt[i] = NULL; 29650 un->sd_fi_fifo_un[i] = NULL; 29651 un->sd_fi_fifo_xb[i] = NULL; 29652 un->sd_fi_fifo_arq[i] = NULL; 29653 } 29654 un->sd_fi_fifo_start = 0; 29655 un->sd_fi_fifo_end = 0; 29656 29657 SD_INFO(SD_LOG_IOERR, un, 29658 "sd_faultinjection_ioctl: stop finished\n"); 29659 break; 29660 29661 case SDIOCINSERTPKT: 29662 /* Store a packet struct to be pushed onto fifo */ 29663 SD_INFO(SD_LOG_SDTEST, un, 29664 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29665 29666 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29667 29668 sd_fault_injection_on = 0; 29669 29670 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29671 if (un->sd_fi_fifo_pkt[i] != NULL) { 29672 kmem_free(un->sd_fi_fifo_pkt[i], 29673 sizeof (struct sd_fi_pkt)); 29674 } 29675 if (arg != NULL) { 29676 un->sd_fi_fifo_pkt[i] = 29677 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29678 if (un->sd_fi_fifo_pkt[i] == NULL) { 29679 /* Alloc failed don't store anything */ 29680 break; 29681 } 29682 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29683 sizeof (struct sd_fi_pkt), 0); 29684 if (rval == -1) { 29685 kmem_free(un->sd_fi_fifo_pkt[i], 29686 sizeof (struct sd_fi_pkt)); 29687 un->sd_fi_fifo_pkt[i] = NULL; 29688 } 29689 } else { 29690 SD_INFO(SD_LOG_IOERR, un, 29691 "sd_faultinjection_ioctl: pkt null\n"); 29692 } 29693 break; 29694 29695 case SDIOCINSERTXB: 29696 /* Store a xb struct to be pushed onto fifo */ 29697 SD_INFO(SD_LOG_SDTEST, un, 29698 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29699 29700 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29701 29702 sd_fault_injection_on = 0; 29703 29704 if (un->sd_fi_fifo_xb[i] != NULL) { 29705 kmem_free(un->sd_fi_fifo_xb[i], 29706 sizeof (struct sd_fi_xb)); 29707 un->sd_fi_fifo_xb[i] = NULL; 29708 } 29709 if (arg != NULL) { 29710 un->sd_fi_fifo_xb[i] = 29711 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29712 if (un->sd_fi_fifo_xb[i] == NULL) { 29713 /* Alloc failed don't store anything */ 29714 break; 29715 } 29716 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29717 sizeof (struct sd_fi_xb), 0); 29718 29719 if (rval == -1) { 29720 kmem_free(un->sd_fi_fifo_xb[i], 29721 sizeof (struct sd_fi_xb)); 29722 un->sd_fi_fifo_xb[i] = NULL; 29723 } 29724 } else { 29725 SD_INFO(SD_LOG_IOERR, un, 29726 "sd_faultinjection_ioctl: xb null\n"); 29727 } 29728 break; 29729 29730 case SDIOCINSERTUN: 29731 /* Store a un struct to be pushed onto fifo */ 29732 SD_INFO(SD_LOG_SDTEST, un, 29733 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29734 29735 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29736 29737 sd_fault_injection_on = 0; 29738 29739 if (un->sd_fi_fifo_un[i] != NULL) { 29740 kmem_free(un->sd_fi_fifo_un[i], 29741 sizeof (struct sd_fi_un)); 29742 un->sd_fi_fifo_un[i] = NULL; 29743 } 29744 if (arg != NULL) { 29745 un->sd_fi_fifo_un[i] = 29746 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29747 if (un->sd_fi_fifo_un[i] == NULL) { 29748 /* Alloc failed don't store anything */ 29749 break; 29750 } 29751 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29752 sizeof (struct sd_fi_un), 0); 29753 if (rval == -1) { 29754 kmem_free(un->sd_fi_fifo_un[i], 29755 sizeof (struct sd_fi_un)); 29756 un->sd_fi_fifo_un[i] = NULL; 29757 } 29758 29759 } else { 29760 SD_INFO(SD_LOG_IOERR, un, 29761 "sd_faultinjection_ioctl: un null\n"); 29762 } 29763 29764 break; 29765 29766 case SDIOCINSERTARQ: 29767 /* Store a arq struct to be pushed onto fifo */ 29768 SD_INFO(SD_LOG_SDTEST, un, 29769 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29770 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29771 29772 sd_fault_injection_on = 0; 29773 29774 if (un->sd_fi_fifo_arq[i] != NULL) { 29775 kmem_free(un->sd_fi_fifo_arq[i], 29776 sizeof (struct sd_fi_arq)); 29777 un->sd_fi_fifo_arq[i] = NULL; 29778 } 29779 if (arg != NULL) { 29780 un->sd_fi_fifo_arq[i] = 29781 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29782 if (un->sd_fi_fifo_arq[i] == NULL) { 29783 /* Alloc failed don't store anything */ 29784 break; 29785 } 29786 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29787 sizeof (struct sd_fi_arq), 0); 29788 if (rval == -1) { 29789 kmem_free(un->sd_fi_fifo_arq[i], 29790 sizeof (struct sd_fi_arq)); 29791 un->sd_fi_fifo_arq[i] = NULL; 29792 } 29793 29794 } else { 29795 SD_INFO(SD_LOG_IOERR, un, 29796 "sd_faultinjection_ioctl: arq null\n"); 29797 } 29798 29799 break; 29800 29801 case SDIOCPUSH: 29802 /* Push stored xb, pkt, un, and arq onto fifo */ 29803 sd_fault_injection_on = 0; 29804 29805 if (arg != NULL) { 29806 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29807 if (rval != -1 && 29808 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29809 un->sd_fi_fifo_end += i; 29810 } 29811 } else { 29812 SD_INFO(SD_LOG_IOERR, un, 29813 "sd_faultinjection_ioctl: push arg null\n"); 29814 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29815 un->sd_fi_fifo_end++; 29816 } 29817 } 29818 SD_INFO(SD_LOG_IOERR, un, 29819 "sd_faultinjection_ioctl: push to end=%d\n", 29820 un->sd_fi_fifo_end); 29821 break; 29822 29823 case SDIOCRETRIEVE: 29824 /* Return buffer of log from Injection session */ 29825 SD_INFO(SD_LOG_SDTEST, un, 29826 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29827 29828 sd_fault_injection_on = 0; 29829 29830 mutex_enter(&(un->un_fi_mutex)); 29831 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29832 un->sd_fi_buf_len+1, 0); 29833 mutex_exit(&(un->un_fi_mutex)); 29834 29835 if (rval == -1) { 29836 /* 29837 * arg is possibly invalid setting 29838 * it to NULL for return 29839 */ 29840 arg = NULL; 29841 } 29842 break; 29843 } 29844 29845 mutex_exit(SD_MUTEX(un)); 29846 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29847 " exit\n"); 29848 } 29849 29850 29851 /* 29852 * Function: sd_injection_log() 29853 * 29854 * Description: This routine adds buff to the already existing injection log 29855 * for retrieval via faultinjection_ioctl for use in fault 29856 * detection and recovery 29857 * 29858 * Arguments: buf - the string to add to the log 29859 */ 29860 29861 static void 29862 sd_injection_log(char *buf, struct sd_lun *un) 29863 { 29864 uint_t len; 29865 29866 ASSERT(un != NULL); 29867 ASSERT(buf != NULL); 29868 29869 mutex_enter(&(un->un_fi_mutex)); 29870 29871 len = min(strlen(buf), 255); 29872 /* Add logged value to Injection log to be returned later */ 29873 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29874 uint_t offset = strlen((char *)un->sd_fi_log); 29875 char *destp = (char *)un->sd_fi_log + offset; 29876 int i; 29877 for (i = 0; i < len; i++) { 29878 *destp++ = *buf++; 29879 } 29880 un->sd_fi_buf_len += len; 29881 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29882 } 29883 29884 mutex_exit(&(un->un_fi_mutex)); 29885 } 29886 29887 29888 /* 29889 * Function: sd_faultinjection() 29890 * 29891 * Description: This routine takes the pkt and changes its 29892 * content based on error injection scenerio. 29893 * 29894 * Arguments: pktp - packet to be changed 29895 */ 29896 29897 static void 29898 sd_faultinjection(struct scsi_pkt *pktp) 29899 { 29900 uint_t i; 29901 struct sd_fi_pkt *fi_pkt; 29902 struct sd_fi_xb *fi_xb; 29903 struct sd_fi_un *fi_un; 29904 struct sd_fi_arq *fi_arq; 29905 struct buf *bp; 29906 struct sd_xbuf *xb; 29907 struct sd_lun *un; 29908 29909 ASSERT(pktp != NULL); 29910 29911 /* pull bp xb and un from pktp */ 29912 bp = (struct buf *)pktp->pkt_private; 29913 xb = SD_GET_XBUF(bp); 29914 un = SD_GET_UN(bp); 29915 29916 ASSERT(un != NULL); 29917 29918 mutex_enter(SD_MUTEX(un)); 29919 29920 SD_TRACE(SD_LOG_SDTEST, un, 29921 "sd_faultinjection: entry Injection from sdintr\n"); 29922 29923 /* if injection is off return */ 29924 if (sd_fault_injection_on == 0 || 29925 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29926 mutex_exit(SD_MUTEX(un)); 29927 return; 29928 } 29929 29930 29931 /* take next set off fifo */ 29932 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29933 29934 fi_pkt = un->sd_fi_fifo_pkt[i]; 29935 fi_xb = un->sd_fi_fifo_xb[i]; 29936 fi_un = un->sd_fi_fifo_un[i]; 29937 fi_arq = un->sd_fi_fifo_arq[i]; 29938 29939 29940 /* set variables accordingly */ 29941 /* set pkt if it was on fifo */ 29942 if (fi_pkt != NULL) { 29943 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29944 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29945 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29946 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29947 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29948 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29949 29950 } 29951 29952 /* set xb if it was on fifo */ 29953 if (fi_xb != NULL) { 29954 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29955 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29956 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29957 SD_CONDSET(xb, xb, xb_victim_retry_count, 29958 "xb_victim_retry_count"); 29959 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29960 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29961 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29962 29963 /* copy in block data from sense */ 29964 if (fi_xb->xb_sense_data[0] != -1) { 29965 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29966 SENSE_LENGTH); 29967 } 29968 29969 /* copy in extended sense codes */ 29970 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 29971 "es_code"); 29972 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 29973 "es_key"); 29974 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 29975 "es_add_code"); 29976 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 29977 es_qual_code, "es_qual_code"); 29978 } 29979 29980 /* set un if it was on fifo */ 29981 if (fi_un != NULL) { 29982 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29983 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29984 SD_CONDSET(un, un, un_reset_retry_count, 29985 "un_reset_retry_count"); 29986 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29987 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29988 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29989 SD_CONDSET(un, un, un_f_geometry_is_valid, 29990 "un_f_geometry_is_valid"); 29991 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29992 "un_f_allow_bus_device_reset"); 29993 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29994 29995 } 29996 29997 /* copy in auto request sense if it was on fifo */ 29998 if (fi_arq != NULL) { 29999 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30000 } 30001 30002 /* free structs */ 30003 if (un->sd_fi_fifo_pkt[i] != NULL) { 30004 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30005 } 30006 if (un->sd_fi_fifo_xb[i] != NULL) { 30007 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30008 } 30009 if (un->sd_fi_fifo_un[i] != NULL) { 30010 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30011 } 30012 if (un->sd_fi_fifo_arq[i] != NULL) { 30013 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30014 } 30015 30016 /* 30017 * kmem_free does not gurantee to set to NULL 30018 * since we uses these to determine if we set 30019 * values or not lets confirm they are always 30020 * NULL after free 30021 */ 30022 un->sd_fi_fifo_pkt[i] = NULL; 30023 un->sd_fi_fifo_un[i] = NULL; 30024 un->sd_fi_fifo_xb[i] = NULL; 30025 un->sd_fi_fifo_arq[i] = NULL; 30026 30027 un->sd_fi_fifo_start++; 30028 30029 mutex_exit(SD_MUTEX(un)); 30030 30031 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30032 } 30033 30034 #endif /* SD_FAULT_INJECTION */ 30035