1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSI disk target driver. 31 */ 32 33 #include <sys/scsi/scsi.h> 34 #include <sys/dkbad.h> 35 #include <sys/dklabel.h> 36 #include <sys/dkio.h> 37 #include <sys/fdio.h> 38 #include <sys/cdio.h> 39 #include <sys/mhd.h> 40 #include <sys/vtoc.h> 41 #include <sys/dktp/fdisk.h> 42 #include <sys/file.h> 43 #include <sys/stat.h> 44 #include <sys/kstat.h> 45 #include <sys/vtrace.h> 46 #include <sys/note.h> 47 #include <sys/thread.h> 48 #include <sys/proc.h> 49 #include <sys/efi_partition.h> 50 #include <sys/var.h> 51 #include <sys/aio_req.h> 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 65 66 /* 67 * Loadable module info. 68 */ 69 #if (defined(__fibre)) 70 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 71 char _depends_on[] = "misc/scsi drv/fcp"; 72 #else 73 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 74 char _depends_on[] = "misc/scsi"; 75 #endif 76 77 /* 78 * Define the interconnect type, to allow the driver to distinguish 79 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 80 * 81 * This is really for backward compatability. In the future, the driver 82 * should actually check the "interconnect-type" property as reported by 83 * the HBA; however at present this property is not defined by all HBAs, 84 * so we will use this #define (1) to permit the driver to run in 85 * backward-compatability mode; and (2) to print a notification message 86 * if an FC HBA does not support the "interconnect-type" property. The 87 * behavior of the driver will be to assume parallel SCSI behaviors unless 88 * the "interconnect-type" property is defined by the HBA **AND** has a 89 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 90 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 91 * Channel behaviors (as per the old ssd). (Note that the 92 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 93 * will result in the driver assuming parallel SCSI behaviors.) 94 * 95 * (see common/sys/scsi/impl/services.h) 96 * 97 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 98 * since some FC HBAs may already support that, and there is some code in 99 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 100 * default would confuse that code, and besides things should work fine 101 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 102 * "interconnect_type" property. 103 */ 104 #if (defined(__fibre)) 105 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 106 #else 107 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 108 #endif 109 110 /* 111 * The name of the driver, established from the module name in _init. 112 */ 113 static char *sd_label = NULL; 114 115 /* 116 * Driver name is unfortunately prefixed on some driver.conf properties. 117 */ 118 #if (defined(__fibre)) 119 #define sd_max_xfer_size ssd_max_xfer_size 120 #define sd_config_list ssd_config_list 121 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 122 static char *sd_config_list = "ssd-config-list"; 123 #else 124 static char *sd_max_xfer_size = "sd_max_xfer_size"; 125 static char *sd_config_list = "sd-config-list"; 126 #endif 127 128 /* 129 * Driver global variables 130 */ 131 132 #if (defined(__fibre)) 133 /* 134 * These #defines are to avoid namespace collisions that occur because this 135 * code is currently used to compile two seperate driver modules: sd and ssd. 136 * All global variables need to be treated this way (even if declared static) 137 * in order to allow the debugger to resolve the names properly. 138 * It is anticipated that in the near future the ssd module will be obsoleted, 139 * at which time this namespace issue should go away. 140 */ 141 #define sd_state ssd_state 142 #define sd_io_time ssd_io_time 143 #define sd_failfast_enable ssd_failfast_enable 144 #define sd_ua_retry_count ssd_ua_retry_count 145 #define sd_report_pfa ssd_report_pfa 146 #define sd_max_throttle ssd_max_throttle 147 #define sd_min_throttle ssd_min_throttle 148 #define sd_rot_delay ssd_rot_delay 149 150 #define sd_retry_on_reservation_conflict \ 151 ssd_retry_on_reservation_conflict 152 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 153 #define sd_resv_conflict_name ssd_resv_conflict_name 154 155 #define sd_component_mask ssd_component_mask 156 #define sd_level_mask ssd_level_mask 157 #define sd_debug_un ssd_debug_un 158 #define sd_error_level ssd_error_level 159 160 #define sd_xbuf_active_limit ssd_xbuf_active_limit 161 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 162 163 #define sd_tr ssd_tr 164 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 165 #define sd_check_media_time ssd_check_media_time 166 #define sd_wait_cmds_complete ssd_wait_cmds_complete 167 #define sd_label_mutex ssd_label_mutex 168 #define sd_detach_mutex ssd_detach_mutex 169 #define sd_log_buf ssd_log_buf 170 #define sd_log_mutex ssd_log_mutex 171 172 #define sd_disk_table ssd_disk_table 173 #define sd_disk_table_size ssd_disk_table_size 174 #define sd_sense_mutex ssd_sense_mutex 175 #define sd_cdbtab ssd_cdbtab 176 177 #define sd_cb_ops ssd_cb_ops 178 #define sd_ops ssd_ops 179 #define sd_additional_codes ssd_additional_codes 180 181 #define sd_minor_data ssd_minor_data 182 #define sd_minor_data_efi ssd_minor_data_efi 183 184 #define sd_tq ssd_tq 185 #define sd_wmr_tq ssd_wmr_tq 186 #define sd_taskq_name ssd_taskq_name 187 #define sd_wmr_taskq_name ssd_wmr_taskq_name 188 #define sd_taskq_minalloc ssd_taskq_minalloc 189 #define sd_taskq_maxalloc ssd_taskq_maxalloc 190 191 #define sd_dump_format_string ssd_dump_format_string 192 193 #define sd_iostart_chain ssd_iostart_chain 194 #define sd_iodone_chain ssd_iodone_chain 195 196 #define sd_pm_idletime ssd_pm_idletime 197 198 #define sd_force_pm_supported ssd_force_pm_supported 199 200 #define sd_dtype_optical_bind ssd_dtype_optical_bind 201 #endif 202 203 204 #ifdef SDDEBUG 205 int sd_force_pm_supported = 0; 206 #endif /* SDDEBUG */ 207 208 void *sd_state = NULL; 209 int sd_io_time = SD_IO_TIME; 210 int sd_failfast_enable = 1; 211 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 212 int sd_report_pfa = 1; 213 int sd_max_throttle = SD_MAX_THROTTLE; 214 int sd_min_throttle = SD_MIN_THROTTLE; 215 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 216 217 int sd_retry_on_reservation_conflict = 1; 218 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 219 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 220 221 static int sd_dtype_optical_bind = -1; 222 223 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 224 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 225 226 /* 227 * Global data for debug logging. To enable debug printing, sd_component_mask 228 * and sd_level_mask should be set to the desired bit patterns as outlined in 229 * sddef.h. 230 */ 231 uint_t sd_component_mask = 0x0; 232 uint_t sd_level_mask = 0x0; 233 struct sd_lun *sd_debug_un = NULL; 234 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 235 236 /* Note: these may go away in the future... */ 237 static uint32_t sd_xbuf_active_limit = 512; 238 static uint32_t sd_xbuf_reserve_limit = 16; 239 240 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 241 242 /* 243 * Timer value used to reset the throttle after it has been reduced 244 * (typically in response to TRAN_BUSY) 245 */ 246 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 247 248 /* 249 * Interval value associated with the media change scsi watch. 250 */ 251 static int sd_check_media_time = 3000000; 252 253 /* 254 * Wait value used for in progress operations during a DDI_SUSPEND 255 */ 256 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 257 258 /* 259 * sd_label_mutex protects a static buffer used in the disk label 260 * component of the driver 261 */ 262 static kmutex_t sd_label_mutex; 263 264 /* 265 * sd_detach_mutex protects un_layer_count, un_detach_count, and 266 * un_opens_in_progress in the sd_lun structure. 267 */ 268 static kmutex_t sd_detach_mutex; 269 270 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 271 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 272 273 /* 274 * Global buffer and mutex for debug logging 275 */ 276 static char sd_log_buf[1024]; 277 static kmutex_t sd_log_mutex; 278 279 280 /* 281 * "Smart" Probe Caching structs, globals, #defines, etc. 282 * For parallel scsi and non-self-identify device only. 283 */ 284 285 /* 286 * The following resources and routines are implemented to support 287 * "smart" probing, which caches the scsi_probe() results in an array, 288 * in order to help avoid long probe times. 289 */ 290 struct sd_scsi_probe_cache { 291 struct sd_scsi_probe_cache *next; 292 dev_info_t *pdip; 293 int cache[NTARGETS_WIDE]; 294 }; 295 296 static kmutex_t sd_scsi_probe_cache_mutex; 297 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 298 299 /* 300 * Really we only need protection on the head of the linked list, but 301 * better safe than sorry. 302 */ 303 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 304 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 305 306 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 307 sd_scsi_probe_cache_head)) 308 309 310 /* 311 * Vendor specific data name property declarations 312 */ 313 314 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 315 316 static sd_tunables seagate_properties = { 317 SEAGATE_THROTTLE_VALUE, 318 0, 319 0, 320 0, 321 0, 322 0, 323 0, 324 0, 325 0 326 }; 327 328 static sd_tunables lsi_properties = { 329 0, 330 0, 331 LSI_NOTREADY_RETRIES, 332 0, 333 0, 334 0, 335 0, 336 0, 337 0 338 }; 339 340 static sd_tunables lsi_oem_properties = { 341 0, 342 0, 343 LSI_OEM_NOTREADY_RETRIES, 344 0, 345 0, 346 0, 347 0, 348 0, 349 0 350 }; 351 352 static sd_tunables fujitsu_properties = { 353 FUJITSU_THROTTLE_VALUE, 354 0, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0 362 }; 363 364 static sd_tunables ibm_properties = { 365 IBM_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 static sd_tunables purple_properties = { 377 PURPLE_THROTTLE_VALUE, 378 0, 379 0, 380 PURPLE_BUSY_RETRIES, 381 PURPLE_RESET_RETRY_COUNT, 382 PURPLE_RESERVE_RELEASE_TIME, 383 0, 384 0, 385 0 386 }; 387 388 static sd_tunables sve_properties = { 389 SVE_THROTTLE_VALUE, 390 0, 391 0, 392 SVE_BUSY_RETRIES, 393 SVE_RESET_RETRY_COUNT, 394 SVE_RESERVE_RELEASE_TIME, 395 SVE_MIN_THROTTLE_VALUE, 396 SVE_DISKSORT_DISABLED_FLAG, 397 0 398 }; 399 400 static sd_tunables maserati_properties = { 401 0, 402 0, 403 0, 404 0, 405 0, 406 0, 407 0, 408 MASERATI_DISKSORT_DISABLED_FLAG, 409 MASERATI_LUN_RESET_ENABLED_FLAG 410 }; 411 412 static sd_tunables pirus_properties = { 413 PIRUS_THROTTLE_VALUE, 414 0, 415 PIRUS_NRR_COUNT, 416 PIRUS_BUSY_RETRIES, 417 PIRUS_RESET_RETRY_COUNT, 418 0, 419 PIRUS_MIN_THROTTLE_VALUE, 420 PIRUS_DISKSORT_DISABLED_FLAG, 421 PIRUS_LUN_RESET_ENABLED_FLAG 422 }; 423 424 #endif 425 #if (defined(__sparc) && !defined(__fibre)) || \ 426 (defined(__i386) || defined(__amd64)) 427 428 static sd_tunables lsi_properties_scsi = { 429 LSI_THROTTLE_VALUE, 430 0, 431 LSI_NOTREADY_RETRIES, 432 0, 433 0, 434 0, 435 0, 436 0, 437 0 438 }; 439 440 static sd_tunables elite_properties = { 441 ELITE_THROTTLE_VALUE, 442 0, 443 0, 444 0, 445 0, 446 0, 447 0, 448 0, 449 0 450 }; 451 452 static sd_tunables st31200n_properties = { 453 ST31200N_THROTTLE_VALUE, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0, 460 0, 461 0 462 }; 463 464 #endif /* Fibre or not */ 465 466 static sd_tunables symbios_properties = { 467 SYMBIOS_THROTTLE_VALUE, 468 0, 469 SYMBIOS_NOTREADY_RETRIES, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0 476 }; 477 478 479 480 481 #if (defined(SD_PROP_TST)) 482 483 #define SD_TST_CTYPE_VAL CTYPE_CDROM 484 #define SD_TST_THROTTLE_VAL 16 485 #define SD_TST_NOTREADY_VAL 12 486 #define SD_TST_BUSY_VAL 60 487 #define SD_TST_RST_RETRY_VAL 36 488 #define SD_TST_RSV_REL_TIME 60 489 490 static sd_tunables tst_properties = { 491 SD_TST_THROTTLE_VAL, 492 SD_TST_CTYPE_VAL, 493 SD_TST_NOTREADY_VAL, 494 SD_TST_BUSY_VAL, 495 SD_TST_RST_RETRY_VAL, 496 SD_TST_RSV_REL_TIME, 497 0, 498 0, 499 0 500 }; 501 #endif 502 503 /* This is similiar to the ANSI toupper implementation */ 504 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 505 506 /* 507 * Static Driver Configuration Table 508 * 509 * This is the table of disks which need throttle adjustment (or, perhaps 510 * something else as defined by the flags at a future time.) device_id 511 * is a string consisting of concatenated vid (vendor), pid (product/model) 512 * and revision strings as defined in the scsi_inquiry structure. Offsets of 513 * the parts of the string are as defined by the sizes in the scsi_inquiry 514 * structure. Device type is searched as far as the device_id string is 515 * defined. Flags defines which values are to be set in the driver from the 516 * properties list. 517 * 518 * Entries below which begin and end with a "*" are a special case. 519 * These do not have a specific vendor, and the string which follows 520 * can appear anywhere in the 16 byte PID portion of the inquiry data. 521 * 522 * Entries below which begin and end with a " " (blank) are a special 523 * case. The comparison function will treat multiple consecutive blanks 524 * as equivalent to a single blank. For example, this causes a 525 * sd_disk_table entry of " NEC CDROM " to match a device's id string 526 * of "NEC CDROM". 527 * 528 * Note: The MD21 controller type has been obsoleted. 529 * ST318202F is a Legacy device 530 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 531 * made with an FC connection. The entries here are a legacy. 532 */ 533 static sd_disk_config_t sd_disk_table[] = { 534 #if defined(__fibre) || defined(__i386) || defined(__amd64) 535 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 536 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 537 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 538 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 539 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 540 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 541 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 542 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 543 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 544 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 545 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 546 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 547 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 548 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 549 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 550 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 551 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 552 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 553 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 554 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 555 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 556 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 557 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 558 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 559 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 560 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 561 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 562 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 563 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 564 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 565 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 566 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 567 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 568 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 569 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 570 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 571 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 572 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 573 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 574 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 575 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 576 { "SUN T3", SD_CONF_BSET_THROTTLE | 577 SD_CONF_BSET_BSY_RETRY_COUNT| 578 SD_CONF_BSET_RST_RETRIES| 579 SD_CONF_BSET_RSV_REL_TIME, 580 &purple_properties }, 581 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 582 SD_CONF_BSET_BSY_RETRY_COUNT| 583 SD_CONF_BSET_RST_RETRIES| 584 SD_CONF_BSET_RSV_REL_TIME| 585 SD_CONF_BSET_MIN_THROTTLE| 586 SD_CONF_BSET_DISKSORT_DISABLED, 587 &sve_properties }, 588 { "SUN T4", SD_CONF_BSET_THROTTLE | 589 SD_CONF_BSET_BSY_RETRY_COUNT| 590 SD_CONF_BSET_RST_RETRIES| 591 SD_CONF_BSET_RSV_REL_TIME, 592 &purple_properties }, 593 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 594 SD_CONF_BSET_LUN_RESET_ENABLED, 595 &maserati_properties }, 596 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 597 SD_CONF_BSET_NRR_COUNT| 598 SD_CONF_BSET_BSY_RETRY_COUNT| 599 SD_CONF_BSET_RST_RETRIES| 600 SD_CONF_BSET_MIN_THROTTLE| 601 SD_CONF_BSET_DISKSORT_DISABLED| 602 SD_CONF_BSET_LUN_RESET_ENABLED, 603 &pirus_properties }, 604 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 605 SD_CONF_BSET_NRR_COUNT| 606 SD_CONF_BSET_BSY_RETRY_COUNT| 607 SD_CONF_BSET_RST_RETRIES| 608 SD_CONF_BSET_MIN_THROTTLE| 609 SD_CONF_BSET_DISKSORT_DISABLED| 610 SD_CONF_BSET_LUN_RESET_ENABLED, 611 &pirus_properties }, 612 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 613 SD_CONF_BSET_NRR_COUNT| 614 SD_CONF_BSET_BSY_RETRY_COUNT| 615 SD_CONF_BSET_RST_RETRIES| 616 SD_CONF_BSET_MIN_THROTTLE| 617 SD_CONF_BSET_DISKSORT_DISABLED| 618 SD_CONF_BSET_LUN_RESET_ENABLED, 619 &pirus_properties }, 620 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 625 #endif /* fibre or NON-sparc platforms */ 626 #if ((defined(__sparc) && !defined(__fibre)) ||\ 627 (defined(__i386) || defined(__amd64))) 628 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 629 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 630 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 631 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 632 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 633 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 634 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 635 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 636 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 637 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 638 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 639 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 640 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 641 &symbios_properties }, 642 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 643 &lsi_properties_scsi }, 644 #if defined(__i386) || defined(__amd64) 645 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 646 | SD_CONF_BSET_READSUB_BCD 647 | SD_CONF_BSET_READ_TOC_ADDR_BCD 648 | SD_CONF_BSET_NO_READ_HEADER 649 | SD_CONF_BSET_READ_CD_XD4), NULL }, 650 651 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 652 | SD_CONF_BSET_READSUB_BCD 653 | SD_CONF_BSET_READ_TOC_ADDR_BCD 654 | SD_CONF_BSET_NO_READ_HEADER 655 | SD_CONF_BSET_READ_CD_XD4), NULL }, 656 #endif /* __i386 || __amd64 */ 657 #endif /* sparc NON-fibre or NON-sparc platforms */ 658 659 #if (defined(SD_PROP_TST)) 660 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 661 | SD_CONF_BSET_CTYPE 662 | SD_CONF_BSET_NRR_COUNT 663 | SD_CONF_BSET_FAB_DEVID 664 | SD_CONF_BSET_NOCACHE 665 | SD_CONF_BSET_BSY_RETRY_COUNT 666 | SD_CONF_BSET_PLAYMSF_BCD 667 | SD_CONF_BSET_READSUB_BCD 668 | SD_CONF_BSET_READ_TOC_TRK_BCD 669 | SD_CONF_BSET_READ_TOC_ADDR_BCD 670 | SD_CONF_BSET_NO_READ_HEADER 671 | SD_CONF_BSET_READ_CD_XD4 672 | SD_CONF_BSET_RST_RETRIES 673 | SD_CONF_BSET_RSV_REL_TIME 674 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 675 #endif 676 }; 677 678 static const int sd_disk_table_size = 679 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 680 681 682 /* 683 * Return codes of sd_uselabel(). 684 */ 685 #define SD_LABEL_IS_VALID 0 686 #define SD_LABEL_IS_INVALID 1 687 688 #define SD_INTERCONNECT_PARALLEL 0 689 #define SD_INTERCONNECT_FABRIC 1 690 #define SD_INTERCONNECT_FIBRE 2 691 #define SD_INTERCONNECT_SSA 3 692 #define SD_IS_PARALLEL_SCSI(un) \ 693 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 694 695 /* 696 * Definitions used by device id registration routines 697 */ 698 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 699 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 700 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 701 #define WD_NODE 7 /* the whole disk minor */ 702 703 static kmutex_t sd_sense_mutex = {0}; 704 705 /* 706 * Macros for updates of the driver state 707 */ 708 #define New_state(un, s) \ 709 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 710 #define Restore_state(un) \ 711 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 712 713 static struct sd_cdbinfo sd_cdbtab[] = { 714 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 715 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 716 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 717 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 718 }; 719 720 /* 721 * Specifies the number of seconds that must have elapsed since the last 722 * cmd. has completed for a device to be declared idle to the PM framework. 723 */ 724 static int sd_pm_idletime = 1; 725 726 /* 727 * Internal function prototypes 728 */ 729 730 #if (defined(__fibre)) 731 /* 732 * These #defines are to avoid namespace collisions that occur because this 733 * code is currently used to compile two seperate driver modules: sd and ssd. 734 * All function names need to be treated this way (even if declared static) 735 * in order to allow the debugger to resolve the names properly. 736 * It is anticipated that in the near future the ssd module will be obsoleted, 737 * at which time this ugliness should go away. 738 */ 739 #define sd_log_trace ssd_log_trace 740 #define sd_log_info ssd_log_info 741 #define sd_log_err ssd_log_err 742 #define sdprobe ssdprobe 743 #define sdinfo ssdinfo 744 #define sd_prop_op ssd_prop_op 745 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 746 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 747 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 748 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 749 #define sd_spin_up_unit ssd_spin_up_unit 750 #define sd_enable_descr_sense ssd_enable_descr_sense 751 #define sd_set_mmc_caps ssd_set_mmc_caps 752 #define sd_read_unit_properties ssd_read_unit_properties 753 #define sd_process_sdconf_file ssd_process_sdconf_file 754 #define sd_process_sdconf_table ssd_process_sdconf_table 755 #define sd_sdconf_id_match ssd_sdconf_id_match 756 #define sd_blank_cmp ssd_blank_cmp 757 #define sd_chk_vers1_data ssd_chk_vers1_data 758 #define sd_set_vers1_properties ssd_set_vers1_properties 759 #define sd_validate_geometry ssd_validate_geometry 760 761 #if defined(_SUNOS_VTOC_16) 762 #define sd_convert_geometry ssd_convert_geometry 763 #endif 764 765 #define sd_resync_geom_caches ssd_resync_geom_caches 766 #define sd_read_fdisk ssd_read_fdisk 767 #define sd_get_physical_geometry ssd_get_physical_geometry 768 #define sd_get_virtual_geometry ssd_get_virtual_geometry 769 #define sd_update_block_info ssd_update_block_info 770 #define sd_swap_efi_gpt ssd_swap_efi_gpt 771 #define sd_swap_efi_gpe ssd_swap_efi_gpe 772 #define sd_validate_efi ssd_validate_efi 773 #define sd_use_efi ssd_use_efi 774 #define sd_uselabel ssd_uselabel 775 #define sd_build_default_label ssd_build_default_label 776 #define sd_has_max_chs_vals ssd_has_max_chs_vals 777 #define sd_inq_fill ssd_inq_fill 778 #define sd_register_devid ssd_register_devid 779 #define sd_get_devid_block ssd_get_devid_block 780 #define sd_get_devid ssd_get_devid 781 #define sd_create_devid ssd_create_devid 782 #define sd_write_deviceid ssd_write_deviceid 783 #define sd_check_vpd_page_support ssd_check_vpd_page_support 784 #define sd_setup_pm ssd_setup_pm 785 #define sd_create_pm_components ssd_create_pm_components 786 #define sd_ddi_suspend ssd_ddi_suspend 787 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 788 #define sd_ddi_resume ssd_ddi_resume 789 #define sd_ddi_pm_resume ssd_ddi_pm_resume 790 #define sdpower ssdpower 791 #define sdattach ssdattach 792 #define sddetach ssddetach 793 #define sd_unit_attach ssd_unit_attach 794 #define sd_unit_detach ssd_unit_detach 795 #define sd_create_minor_nodes ssd_create_minor_nodes 796 #define sd_create_errstats ssd_create_errstats 797 #define sd_set_errstats ssd_set_errstats 798 #define sd_set_pstats ssd_set_pstats 799 #define sddump ssddump 800 #define sd_scsi_poll ssd_scsi_poll 801 #define sd_send_polled_RQS ssd_send_polled_RQS 802 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 803 #define sd_init_event_callbacks ssd_init_event_callbacks 804 #define sd_event_callback ssd_event_callback 805 #define sd_disable_caching ssd_disable_caching 806 #define sd_make_device ssd_make_device 807 #define sdopen ssdopen 808 #define sdclose ssdclose 809 #define sd_ready_and_valid ssd_ready_and_valid 810 #define sdmin ssdmin 811 #define sdread ssdread 812 #define sdwrite ssdwrite 813 #define sdaread ssdaread 814 #define sdawrite ssdawrite 815 #define sdstrategy ssdstrategy 816 #define sdioctl ssdioctl 817 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 818 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 819 #define sd_checksum_iostart ssd_checksum_iostart 820 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 821 #define sd_pm_iostart ssd_pm_iostart 822 #define sd_core_iostart ssd_core_iostart 823 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 824 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 825 #define sd_checksum_iodone ssd_checksum_iodone 826 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 827 #define sd_pm_iodone ssd_pm_iodone 828 #define sd_initpkt_for_buf ssd_initpkt_for_buf 829 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 830 #define sd_setup_rw_pkt ssd_setup_rw_pkt 831 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 832 #define sd_buf_iodone ssd_buf_iodone 833 #define sd_uscsi_strategy ssd_uscsi_strategy 834 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 835 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 836 #define sd_uscsi_iodone ssd_uscsi_iodone 837 #define sd_xbuf_strategy ssd_xbuf_strategy 838 #define sd_xbuf_init ssd_xbuf_init 839 #define sd_pm_entry ssd_pm_entry 840 #define sd_pm_exit ssd_pm_exit 841 842 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 843 #define sd_pm_timeout_handler ssd_pm_timeout_handler 844 845 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 846 #define sdintr ssdintr 847 #define sd_start_cmds ssd_start_cmds 848 #define sd_send_scsi_cmd ssd_send_scsi_cmd 849 #define sd_bioclone_alloc ssd_bioclone_alloc 850 #define sd_bioclone_free ssd_bioclone_free 851 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 852 #define sd_shadow_buf_free ssd_shadow_buf_free 853 #define sd_print_transport_rejected_message \ 854 ssd_print_transport_rejected_message 855 #define sd_retry_command ssd_retry_command 856 #define sd_set_retry_bp ssd_set_retry_bp 857 #define sd_send_request_sense_command ssd_send_request_sense_command 858 #define sd_start_retry_command ssd_start_retry_command 859 #define sd_start_direct_priority_command \ 860 ssd_start_direct_priority_command 861 #define sd_return_failed_command ssd_return_failed_command 862 #define sd_return_failed_command_no_restart \ 863 ssd_return_failed_command_no_restart 864 #define sd_return_command ssd_return_command 865 #define sd_sync_with_callback ssd_sync_with_callback 866 #define sdrunout ssdrunout 867 #define sd_mark_rqs_busy ssd_mark_rqs_busy 868 #define sd_mark_rqs_idle ssd_mark_rqs_idle 869 #define sd_reduce_throttle ssd_reduce_throttle 870 #define sd_restore_throttle ssd_restore_throttle 871 #define sd_print_incomplete_msg ssd_print_incomplete_msg 872 #define sd_init_cdb_limits ssd_init_cdb_limits 873 #define sd_pkt_status_good ssd_pkt_status_good 874 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 875 #define sd_pkt_status_busy ssd_pkt_status_busy 876 #define sd_pkt_status_reservation_conflict \ 877 ssd_pkt_status_reservation_conflict 878 #define sd_pkt_status_qfull ssd_pkt_status_qfull 879 #define sd_handle_request_sense ssd_handle_request_sense 880 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 881 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 882 #define sd_validate_sense_data ssd_validate_sense_data 883 #define sd_decode_sense ssd_decode_sense 884 #define sd_print_sense_msg ssd_print_sense_msg 885 #define sd_extract_sense_info_descr ssd_extract_sense_info_descr 886 #define sd_sense_key_no_sense ssd_sense_key_no_sense 887 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 888 #define sd_sense_key_not_ready ssd_sense_key_not_ready 889 #define sd_sense_key_medium_or_hardware_error \ 890 ssd_sense_key_medium_or_hardware_error 891 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 892 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 893 #define sd_sense_key_fail_command ssd_sense_key_fail_command 894 #define sd_sense_key_blank_check ssd_sense_key_blank_check 895 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 896 #define sd_sense_key_default ssd_sense_key_default 897 #define sd_print_retry_msg ssd_print_retry_msg 898 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 899 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 900 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 901 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 902 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 903 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 904 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 905 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 906 #define sd_pkt_reason_default ssd_pkt_reason_default 907 #define sd_reset_target ssd_reset_target 908 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 909 #define sd_start_stop_unit_task ssd_start_stop_unit_task 910 #define sd_taskq_create ssd_taskq_create 911 #define sd_taskq_delete ssd_taskq_delete 912 #define sd_media_change_task ssd_media_change_task 913 #define sd_handle_mchange ssd_handle_mchange 914 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 915 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 916 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 917 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 918 #define sd_send_scsi_feature_GET_CONFIGURATION \ 919 sd_send_scsi_feature_GET_CONFIGURATION 920 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 921 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 922 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 923 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 924 ssd_send_scsi_PERSISTENT_RESERVE_IN 925 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 926 ssd_send_scsi_PERSISTENT_RESERVE_OUT 927 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 928 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 929 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 930 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 931 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 932 #define sd_alloc_rqs ssd_alloc_rqs 933 #define sd_free_rqs ssd_free_rqs 934 #define sd_dump_memory ssd_dump_memory 935 #define sd_uscsi_ioctl ssd_uscsi_ioctl 936 #define sd_get_media_info ssd_get_media_info 937 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 938 #define sd_dkio_get_geometry ssd_dkio_get_geometry 939 #define sd_dkio_set_geometry ssd_dkio_set_geometry 940 #define sd_dkio_get_partition ssd_dkio_get_partition 941 #define sd_dkio_set_partition ssd_dkio_set_partition 942 #define sd_dkio_partition ssd_dkio_partition 943 #define sd_dkio_get_vtoc ssd_dkio_get_vtoc 944 #define sd_dkio_get_efi ssd_dkio_get_efi 945 #define sd_build_user_vtoc ssd_build_user_vtoc 946 #define sd_dkio_set_vtoc ssd_dkio_set_vtoc 947 #define sd_dkio_set_efi ssd_dkio_set_efi 948 #define sd_build_label_vtoc ssd_build_label_vtoc 949 #define sd_write_label ssd_write_label 950 #define sd_clear_vtoc ssd_clear_vtoc 951 #define sd_clear_efi ssd_clear_efi 952 #define sd_fill_scsi1_lun ssd_fill_scsi1_lun 953 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 954 #define sd_setup_next_xfer ssd_setup_next_xfer 955 #define sd_dkio_get_temp ssd_dkio_get_temp 956 #define sd_dkio_get_mboot ssd_dkio_get_mboot 957 #define sd_dkio_set_mboot ssd_dkio_set_mboot 958 #define sd_setup_default_geometry ssd_setup_default_geometry 959 #define sd_update_fdisk_and_vtoc ssd_update_fdisk_and_vtoc 960 #define sd_check_mhd ssd_check_mhd 961 #define sd_mhd_watch_cb ssd_mhd_watch_cb 962 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 963 #define sd_sname ssd_sname 964 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 965 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 966 #define sd_take_ownership ssd_take_ownership 967 #define sd_reserve_release ssd_reserve_release 968 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 969 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 970 #define sd_persistent_reservation_in_read_keys \ 971 ssd_persistent_reservation_in_read_keys 972 #define sd_persistent_reservation_in_read_resv \ 973 ssd_persistent_reservation_in_read_resv 974 #define sd_mhdioc_takeown ssd_mhdioc_takeown 975 #define sd_mhdioc_failfast ssd_mhdioc_failfast 976 #define sd_mhdioc_release ssd_mhdioc_release 977 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 978 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 979 #define sd_mhdioc_inresv ssd_mhdioc_inresv 980 #define sr_change_blkmode ssr_change_blkmode 981 #define sr_change_speed ssr_change_speed 982 #define sr_atapi_change_speed ssr_atapi_change_speed 983 #define sr_pause_resume ssr_pause_resume 984 #define sr_play_msf ssr_play_msf 985 #define sr_play_trkind ssr_play_trkind 986 #define sr_read_all_subcodes ssr_read_all_subcodes 987 #define sr_read_subchannel ssr_read_subchannel 988 #define sr_read_tocentry ssr_read_tocentry 989 #define sr_read_tochdr ssr_read_tochdr 990 #define sr_read_cdda ssr_read_cdda 991 #define sr_read_cdxa ssr_read_cdxa 992 #define sr_read_mode1 ssr_read_mode1 993 #define sr_read_mode2 ssr_read_mode2 994 #define sr_read_cd_mode2 ssr_read_cd_mode2 995 #define sr_sector_mode ssr_sector_mode 996 #define sr_eject ssr_eject 997 #define sr_ejected ssr_ejected 998 #define sr_check_wp ssr_check_wp 999 #define sd_check_media ssd_check_media 1000 #define sd_media_watch_cb ssd_media_watch_cb 1001 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1002 #define sr_volume_ctrl ssr_volume_ctrl 1003 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1004 #define sd_log_page_supported ssd_log_page_supported 1005 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1006 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1007 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1008 #define sd_range_lock ssd_range_lock 1009 #define sd_get_range ssd_get_range 1010 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1011 #define sd_range_unlock ssd_range_unlock 1012 #define sd_read_modify_write_task ssd_read_modify_write_task 1013 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1014 1015 #define sd_iostart_chain ssd_iostart_chain 1016 #define sd_iodone_chain ssd_iodone_chain 1017 #define sd_initpkt_map ssd_initpkt_map 1018 #define sd_destroypkt_map ssd_destroypkt_map 1019 #define sd_chain_type_map ssd_chain_type_map 1020 #define sd_chain_index_map ssd_chain_index_map 1021 1022 #define sd_failfast_flushctl ssd_failfast_flushctl 1023 #define sd_failfast_flushq ssd_failfast_flushq 1024 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1025 1026 #endif /* #if (defined(__fibre)) */ 1027 1028 1029 int _init(void); 1030 int _fini(void); 1031 int _info(struct modinfo *modinfop); 1032 1033 /*PRINTFLIKE3*/ 1034 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1035 /*PRINTFLIKE3*/ 1036 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1037 /*PRINTFLIKE3*/ 1038 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1039 1040 static int sdprobe(dev_info_t *devi); 1041 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1042 void **result); 1043 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1044 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1045 1046 /* 1047 * Smart probe for parallel scsi 1048 */ 1049 static void sd_scsi_probe_cache_init(void); 1050 static void sd_scsi_probe_cache_fini(void); 1051 static void sd_scsi_clear_probe_cache(void); 1052 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1053 1054 static int sd_spin_up_unit(struct sd_lun *un); 1055 static void sd_enable_descr_sense(struct sd_lun *un); 1056 static void sd_set_mmc_caps(struct sd_lun *un); 1057 1058 static void sd_fill_scsi1_lun(struct sd_lun *un, struct scsi_pkt *); 1059 static void sd_read_unit_properties(struct sd_lun *un); 1060 static int sd_process_sdconf_file(struct sd_lun *un); 1061 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1062 int *data_list, sd_tunables *values); 1063 static void sd_process_sdconf_table(struct sd_lun *un); 1064 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1065 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1066 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1067 int list_len, char *dataname_ptr); 1068 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1069 sd_tunables *prop_list); 1070 static int sd_validate_geometry(struct sd_lun *un, int path_flag); 1071 1072 #if defined(_SUNOS_VTOC_16) 1073 static void sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g); 1074 #endif 1075 1076 static void sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 1077 int path_flag); 1078 static int sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, 1079 int path_flag); 1080 static void sd_get_physical_geometry(struct sd_lun *un, 1081 struct geom_cache *pgeom_p, int capacity, int lbasize, int path_flag); 1082 static void sd_get_virtual_geometry(struct sd_lun *un, int capacity, 1083 int lbasize); 1084 static int sd_uselabel(struct sd_lun *un, struct dk_label *l, int path_flag); 1085 static void sd_swap_efi_gpt(efi_gpt_t *); 1086 static void sd_swap_efi_gpe(int nparts, efi_gpe_t *); 1087 static int sd_validate_efi(efi_gpt_t *); 1088 static int sd_use_efi(struct sd_lun *, int); 1089 static void sd_build_default_label(struct sd_lun *un); 1090 1091 #if defined(_FIRMWARE_NEEDS_FDISK) 1092 static int sd_has_max_chs_vals(struct ipart *fdp); 1093 #endif 1094 static void sd_inq_fill(char *p, int l, char *s); 1095 1096 1097 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1098 int reservation_flag); 1099 static daddr_t sd_get_devid_block(struct sd_lun *un); 1100 static int sd_get_devid(struct sd_lun *un); 1101 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1102 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1103 static int sd_write_deviceid(struct sd_lun *un); 1104 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1105 static int sd_check_vpd_page_support(struct sd_lun *un); 1106 1107 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1108 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1109 1110 static int sd_ddi_suspend(dev_info_t *devi); 1111 static int sd_ddi_pm_suspend(struct sd_lun *un); 1112 static int sd_ddi_resume(dev_info_t *devi); 1113 static int sd_ddi_pm_resume(struct sd_lun *un); 1114 static int sdpower(dev_info_t *devi, int component, int level); 1115 1116 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1117 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1118 static int sd_unit_attach(dev_info_t *devi); 1119 static int sd_unit_detach(dev_info_t *devi); 1120 1121 static int sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi); 1122 static void sd_create_errstats(struct sd_lun *un, int instance); 1123 static void sd_set_errstats(struct sd_lun *un); 1124 static void sd_set_pstats(struct sd_lun *un); 1125 1126 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1127 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1128 static int sd_send_polled_RQS(struct sd_lun *un); 1129 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1130 1131 #if (defined(__fibre)) 1132 /* 1133 * Event callbacks (photon) 1134 */ 1135 static void sd_init_event_callbacks(struct sd_lun *un); 1136 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1137 #endif 1138 1139 1140 static int sd_disable_caching(struct sd_lun *un); 1141 static dev_t sd_make_device(dev_info_t *devi); 1142 1143 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1144 uint64_t capacity); 1145 1146 /* 1147 * Driver entry point functions. 1148 */ 1149 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1150 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1151 static int sd_ready_and_valid(struct sd_lun *un); 1152 1153 static void sdmin(struct buf *bp); 1154 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1155 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1156 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1157 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1158 1159 static int sdstrategy(struct buf *bp); 1160 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1161 1162 /* 1163 * Function prototypes for layering functions in the iostart chain. 1164 */ 1165 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1166 struct buf *bp); 1167 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1168 struct buf *bp); 1169 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1170 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1171 struct buf *bp); 1172 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1173 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1174 1175 /* 1176 * Function prototypes for layering functions in the iodone chain. 1177 */ 1178 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1179 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1180 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1181 struct buf *bp); 1182 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1183 struct buf *bp); 1184 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1185 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1186 struct buf *bp); 1187 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1188 1189 /* 1190 * Prototypes for functions to support buf(9S) based IO. 1191 */ 1192 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1193 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1194 static void sd_destroypkt_for_buf(struct buf *); 1195 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1196 struct buf *bp, int flags, 1197 int (*callback)(caddr_t), caddr_t callback_arg, 1198 diskaddr_t lba, uint32_t blockcount); 1199 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1200 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1201 1202 /* 1203 * Prototypes for functions to support USCSI IO. 1204 */ 1205 static int sd_uscsi_strategy(struct buf *bp); 1206 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1207 static void sd_destroypkt_for_uscsi(struct buf *); 1208 1209 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1210 uchar_t chain_type, void *pktinfop); 1211 1212 static int sd_pm_entry(struct sd_lun *un); 1213 static void sd_pm_exit(struct sd_lun *un); 1214 1215 static void sd_pm_idletimeout_handler(void *arg); 1216 1217 /* 1218 * sd_core internal functions (used at the sd_core_io layer). 1219 */ 1220 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1221 static void sdintr(struct scsi_pkt *pktp); 1222 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1223 1224 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 1225 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 1226 int path_flag); 1227 1228 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1229 daddr_t blkno, int (*func)(struct buf *)); 1230 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1231 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1232 static void sd_bioclone_free(struct buf *bp); 1233 static void sd_shadow_buf_free(struct buf *bp); 1234 1235 static void sd_print_transport_rejected_message(struct sd_lun *un, 1236 struct sd_xbuf *xp, int code); 1237 1238 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1239 int retry_check_flag, 1240 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1241 int c), 1242 void *user_arg, int failure_code, clock_t retry_delay, 1243 void (*statp)(kstat_io_t *)); 1244 1245 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1246 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1247 1248 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1249 struct scsi_pkt *pktp); 1250 static void sd_start_retry_command(void *arg); 1251 static void sd_start_direct_priority_command(void *arg); 1252 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1253 int errcode); 1254 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1255 struct buf *bp, int errcode); 1256 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1257 static void sd_sync_with_callback(struct sd_lun *un); 1258 static int sdrunout(caddr_t arg); 1259 1260 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1261 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1262 1263 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1264 static void sd_restore_throttle(void *arg); 1265 1266 static void sd_init_cdb_limits(struct sd_lun *un); 1267 1268 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1269 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1270 1271 /* 1272 * Error handling functions 1273 */ 1274 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1275 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1276 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1277 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1278 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1279 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1280 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1281 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1282 1283 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1284 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1285 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1286 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1287 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1288 struct sd_xbuf *xp); 1289 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1290 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1291 1292 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1293 void *arg, int code); 1294 static diskaddr_t sd_extract_sense_info_descr( 1295 struct scsi_descr_sense_hdr *sdsp); 1296 1297 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1298 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1299 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1300 uint8_t asc, 1301 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1302 static void sd_sense_key_not_ready(struct sd_lun *un, 1303 uint8_t asc, uint8_t ascq, 1304 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1305 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1306 int sense_key, uint8_t asc, 1307 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1308 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 static void sd_sense_key_unit_attention(struct sd_lun *un, 1311 uint8_t asc, 1312 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1313 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1314 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1315 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1318 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1319 static void sd_sense_key_default(struct sd_lun *un, 1320 int sense_key, 1321 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1322 1323 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1324 void *arg, int flag); 1325 1326 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1328 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1341 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1342 1343 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1344 1345 static void sd_start_stop_unit_callback(void *arg); 1346 static void sd_start_stop_unit_task(void *arg); 1347 1348 static void sd_taskq_create(void); 1349 static void sd_taskq_delete(void); 1350 static void sd_media_change_task(void *arg); 1351 1352 static int sd_handle_mchange(struct sd_lun *un); 1353 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1354 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1355 uint32_t *lbap, int path_flag); 1356 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1357 uint32_t *lbap, int path_flag); 1358 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1359 int path_flag); 1360 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1361 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1362 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1363 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1364 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1365 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1366 uchar_t usr_cmd, uchar_t *usr_bufp); 1367 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un); 1368 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1369 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1370 uchar_t *bufaddr, uint_t buflen); 1371 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1372 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1373 uchar_t *bufaddr, uint_t buflen, char feature); 1374 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1375 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1376 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1377 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1378 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1379 size_t buflen, daddr_t start_block, int path_flag); 1380 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1381 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1382 path_flag) 1383 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1384 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1385 path_flag) 1386 1387 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1388 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1389 uint16_t param_ptr, int path_flag); 1390 1391 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1392 static void sd_free_rqs(struct sd_lun *un); 1393 1394 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1395 uchar_t *data, int len, int fmt); 1396 1397 /* 1398 * Disk Ioctl Function Prototypes 1399 */ 1400 static int sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag); 1401 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1402 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1403 static int sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, 1404 int geom_validated); 1405 static int sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag); 1406 static int sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, 1407 int geom_validated); 1408 static int sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag); 1409 static int sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, 1410 int geom_validated); 1411 static int sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag); 1412 static int sd_dkio_partition(dev_t dev, caddr_t arg, int flag); 1413 static void sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1414 static int sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag); 1415 static int sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag); 1416 static int sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1417 static int sd_write_label(dev_t dev); 1418 static int sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl); 1419 static void sd_clear_vtoc(struct sd_lun *un); 1420 static void sd_clear_efi(struct sd_lun *un); 1421 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1422 static int sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag); 1423 static int sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag); 1424 static void sd_setup_default_geometry(struct sd_lun *un); 1425 #if defined(__i386) || defined(__amd64) 1426 static int sd_update_fdisk_and_vtoc(struct sd_lun *un); 1427 #endif 1428 1429 /* 1430 * Multi-host Ioctl Prototypes 1431 */ 1432 static int sd_check_mhd(dev_t dev, int interval); 1433 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1434 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1435 static char *sd_sname(uchar_t status); 1436 static void sd_mhd_resvd_recover(void *arg); 1437 static void sd_resv_reclaim_thread(); 1438 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1439 static int sd_reserve_release(dev_t dev, int cmd); 1440 static void sd_rmv_resv_reclaim_req(dev_t dev); 1441 static void sd_mhd_reset_notify_cb(caddr_t arg); 1442 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1443 mhioc_inkeys_t *usrp, int flag); 1444 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1445 mhioc_inresvs_t *usrp, int flag); 1446 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1447 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1448 static int sd_mhdioc_release(dev_t dev); 1449 static int sd_mhdioc_register_devid(dev_t dev); 1450 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1451 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1452 1453 /* 1454 * SCSI removable prototypes 1455 */ 1456 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1457 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1458 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1459 static int sr_pause_resume(dev_t dev, int mode); 1460 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1461 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1462 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1463 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1464 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1465 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1466 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1467 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1468 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1469 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1470 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1471 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1472 static int sr_eject(dev_t dev); 1473 static void sr_ejected(register struct sd_lun *un); 1474 static int sr_check_wp(dev_t dev); 1475 static int sd_check_media(dev_t dev, enum dkio_state state); 1476 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1477 static void sd_delayed_cv_broadcast(void *arg); 1478 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1480 1481 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1482 1483 /* 1484 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1485 */ 1486 static void sd_check_for_writable_cd(struct sd_lun *un); 1487 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1488 static void sd_wm_cache_destructor(void *wm, void *un); 1489 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1490 daddr_t endb, ushort_t typ); 1491 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1492 daddr_t endb); 1493 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1494 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1495 static void sd_read_modify_write_task(void * arg); 1496 static int 1497 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1498 struct buf **bpp); 1499 1500 1501 /* 1502 * Function prototypes for failfast support. 1503 */ 1504 static void sd_failfast_flushq(struct sd_lun *un); 1505 static int sd_failfast_flushq_callback(struct buf *bp); 1506 1507 /* 1508 * Function prototypes for x86 support 1509 */ 1510 #if defined(__i386) || defined(__amd64) 1511 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1512 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1513 #endif 1514 1515 /* 1516 * Constants for failfast support: 1517 * 1518 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1519 * failfast processing being performed. 1520 * 1521 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1522 * failfast processing on all bufs with B_FAILFAST set. 1523 */ 1524 1525 #define SD_FAILFAST_INACTIVE 0 1526 #define SD_FAILFAST_ACTIVE 1 1527 1528 /* 1529 * Bitmask to control behavior of buf(9S) flushes when a transition to 1530 * the failfast state occurs. Optional bits include: 1531 * 1532 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1533 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1534 * be flushed. 1535 * 1536 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1537 * driver, in addition to the regular wait queue. This includes the xbuf 1538 * queues. When clear, only the driver's wait queue will be flushed. 1539 */ 1540 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1541 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1542 1543 /* 1544 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1545 * to flush all queues within the driver. 1546 */ 1547 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1548 1549 1550 /* 1551 * SD Testing Fault Injection 1552 */ 1553 #ifdef SD_FAULT_INJECTION 1554 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1555 static void sd_faultinjection(struct scsi_pkt *pktp); 1556 static void sd_injection_log(char *buf, struct sd_lun *un); 1557 #endif 1558 1559 /* 1560 * Device driver ops vector 1561 */ 1562 static struct cb_ops sd_cb_ops = { 1563 sdopen, /* open */ 1564 sdclose, /* close */ 1565 sdstrategy, /* strategy */ 1566 nodev, /* print */ 1567 sddump, /* dump */ 1568 sdread, /* read */ 1569 sdwrite, /* write */ 1570 sdioctl, /* ioctl */ 1571 nodev, /* devmap */ 1572 nodev, /* mmap */ 1573 nodev, /* segmap */ 1574 nochpoll, /* poll */ 1575 sd_prop_op, /* cb_prop_op */ 1576 0, /* streamtab */ 1577 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1578 CB_REV, /* cb_rev */ 1579 sdaread, /* async I/O read entry point */ 1580 sdawrite /* async I/O write entry point */ 1581 }; 1582 1583 static struct dev_ops sd_ops = { 1584 DEVO_REV, /* devo_rev, */ 1585 0, /* refcnt */ 1586 sdinfo, /* info */ 1587 nulldev, /* identify */ 1588 sdprobe, /* probe */ 1589 sdattach, /* attach */ 1590 sddetach, /* detach */ 1591 nodev, /* reset */ 1592 &sd_cb_ops, /* driver operations */ 1593 NULL, /* bus operations */ 1594 sdpower /* power */ 1595 }; 1596 1597 1598 /* 1599 * This is the loadable module wrapper. 1600 */ 1601 #include <sys/modctl.h> 1602 1603 static struct modldrv modldrv = { 1604 &mod_driverops, /* Type of module. This one is a driver */ 1605 SD_MODULE_NAME, /* Module name. */ 1606 &sd_ops /* driver ops */ 1607 }; 1608 1609 1610 static struct modlinkage modlinkage = { 1611 MODREV_1, 1612 &modldrv, 1613 NULL 1614 }; 1615 1616 1617 static struct scsi_asq_key_strings sd_additional_codes[] = { 1618 0x81, 0, "Logical Unit is Reserved", 1619 0x85, 0, "Audio Address Not Valid", 1620 0xb6, 0, "Media Load Mechanism Failed", 1621 0xB9, 0, "Audio Play Operation Aborted", 1622 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1623 0x53, 2, "Medium removal prevented", 1624 0x6f, 0, "Authentication failed during key exchange", 1625 0x6f, 1, "Key not present", 1626 0x6f, 2, "Key not established", 1627 0x6f, 3, "Read without proper authentication", 1628 0x6f, 4, "Mismatched region to this logical unit", 1629 0x6f, 5, "Region reset count error", 1630 0xffff, 0x0, NULL 1631 }; 1632 1633 1634 /* 1635 * Struct for passing printing information for sense data messages 1636 */ 1637 struct sd_sense_info { 1638 int ssi_severity; 1639 int ssi_pfa_flag; 1640 }; 1641 1642 /* 1643 * Table of function pointers for iostart-side routines. Seperate "chains" 1644 * of layered function calls are formed by placing the function pointers 1645 * sequentially in the desired order. Functions are called according to an 1646 * incrementing table index ordering. The last function in each chain must 1647 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1648 * in the sd_iodone_chain[] array. 1649 * 1650 * Note: It may seem more natural to organize both the iostart and iodone 1651 * functions together, into an array of structures (or some similar 1652 * organization) with a common index, rather than two seperate arrays which 1653 * must be maintained in synchronization. The purpose of this division is 1654 * to achiece improved performance: individual arrays allows for more 1655 * effective cache line utilization on certain platforms. 1656 */ 1657 1658 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1659 1660 1661 static sd_chain_t sd_iostart_chain[] = { 1662 1663 /* Chain for buf IO for disk drive targets (PM enabled) */ 1664 sd_mapblockaddr_iostart, /* Index: 0 */ 1665 sd_pm_iostart, /* Index: 1 */ 1666 sd_core_iostart, /* Index: 2 */ 1667 1668 /* Chain for buf IO for disk drive targets (PM disabled) */ 1669 sd_mapblockaddr_iostart, /* Index: 3 */ 1670 sd_core_iostart, /* Index: 4 */ 1671 1672 /* Chain for buf IO for removable-media targets (PM enabled) */ 1673 sd_mapblockaddr_iostart, /* Index: 5 */ 1674 sd_mapblocksize_iostart, /* Index: 6 */ 1675 sd_pm_iostart, /* Index: 7 */ 1676 sd_core_iostart, /* Index: 8 */ 1677 1678 /* Chain for buf IO for removable-media targets (PM disabled) */ 1679 sd_mapblockaddr_iostart, /* Index: 9 */ 1680 sd_mapblocksize_iostart, /* Index: 10 */ 1681 sd_core_iostart, /* Index: 11 */ 1682 1683 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1684 sd_mapblockaddr_iostart, /* Index: 12 */ 1685 sd_checksum_iostart, /* Index: 13 */ 1686 sd_pm_iostart, /* Index: 14 */ 1687 sd_core_iostart, /* Index: 15 */ 1688 1689 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1690 sd_mapblockaddr_iostart, /* Index: 16 */ 1691 sd_checksum_iostart, /* Index: 17 */ 1692 sd_core_iostart, /* Index: 18 */ 1693 1694 /* Chain for USCSI commands (all targets) */ 1695 sd_pm_iostart, /* Index: 19 */ 1696 sd_core_iostart, /* Index: 20 */ 1697 1698 /* Chain for checksumming USCSI commands (all targets) */ 1699 sd_checksum_uscsi_iostart, /* Index: 21 */ 1700 sd_pm_iostart, /* Index: 22 */ 1701 sd_core_iostart, /* Index: 23 */ 1702 1703 /* Chain for "direct" USCSI commands (all targets) */ 1704 sd_core_iostart, /* Index: 24 */ 1705 1706 /* Chain for "direct priority" USCSI commands (all targets) */ 1707 sd_core_iostart, /* Index: 25 */ 1708 }; 1709 1710 /* 1711 * Macros to locate the first function of each iostart chain in the 1712 * sd_iostart_chain[] array. These are located by the index in the array. 1713 */ 1714 #define SD_CHAIN_DISK_IOSTART 0 1715 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1716 #define SD_CHAIN_RMMEDIA_IOSTART 5 1717 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1718 #define SD_CHAIN_CHKSUM_IOSTART 12 1719 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1720 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1721 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1722 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1723 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1724 1725 1726 /* 1727 * Table of function pointers for the iodone-side routines for the driver- 1728 * internal layering mechanism. The calling sequence for iodone routines 1729 * uses a decrementing table index, so the last routine called in a chain 1730 * must be at the lowest array index location for that chain. The last 1731 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1732 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1733 * of the functions in an iodone side chain must correspond to the ordering 1734 * of the iostart routines for that chain. Note that there is no iodone 1735 * side routine that corresponds to sd_core_iostart(), so there is no 1736 * entry in the table for this. 1737 */ 1738 1739 static sd_chain_t sd_iodone_chain[] = { 1740 1741 /* Chain for buf IO for disk drive targets (PM enabled) */ 1742 sd_buf_iodone, /* Index: 0 */ 1743 sd_mapblockaddr_iodone, /* Index: 1 */ 1744 sd_pm_iodone, /* Index: 2 */ 1745 1746 /* Chain for buf IO for disk drive targets (PM disabled) */ 1747 sd_buf_iodone, /* Index: 3 */ 1748 sd_mapblockaddr_iodone, /* Index: 4 */ 1749 1750 /* Chain for buf IO for removable-media targets (PM enabled) */ 1751 sd_buf_iodone, /* Index: 5 */ 1752 sd_mapblockaddr_iodone, /* Index: 6 */ 1753 sd_mapblocksize_iodone, /* Index: 7 */ 1754 sd_pm_iodone, /* Index: 8 */ 1755 1756 /* Chain for buf IO for removable-media targets (PM disabled) */ 1757 sd_buf_iodone, /* Index: 9 */ 1758 sd_mapblockaddr_iodone, /* Index: 10 */ 1759 sd_mapblocksize_iodone, /* Index: 11 */ 1760 1761 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1762 sd_buf_iodone, /* Index: 12 */ 1763 sd_mapblockaddr_iodone, /* Index: 13 */ 1764 sd_checksum_iodone, /* Index: 14 */ 1765 sd_pm_iodone, /* Index: 15 */ 1766 1767 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1768 sd_buf_iodone, /* Index: 16 */ 1769 sd_mapblockaddr_iodone, /* Index: 17 */ 1770 sd_checksum_iodone, /* Index: 18 */ 1771 1772 /* Chain for USCSI commands (non-checksum targets) */ 1773 sd_uscsi_iodone, /* Index: 19 */ 1774 sd_pm_iodone, /* Index: 20 */ 1775 1776 /* Chain for USCSI commands (checksum targets) */ 1777 sd_uscsi_iodone, /* Index: 21 */ 1778 sd_checksum_uscsi_iodone, /* Index: 22 */ 1779 sd_pm_iodone, /* Index: 22 */ 1780 1781 /* Chain for "direct" USCSI commands (all targets) */ 1782 sd_uscsi_iodone, /* Index: 24 */ 1783 1784 /* Chain for "direct priority" USCSI commands (all targets) */ 1785 sd_uscsi_iodone, /* Index: 25 */ 1786 }; 1787 1788 1789 /* 1790 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1791 * each iodone-side chain. These are located by the array index, but as the 1792 * iodone side functions are called in a decrementing-index order, the 1793 * highest index number in each chain must be specified (as these correspond 1794 * to the first function in the iodone chain that will be called by the core 1795 * at IO completion time). 1796 */ 1797 1798 #define SD_CHAIN_DISK_IODONE 2 1799 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1800 #define SD_CHAIN_RMMEDIA_IODONE 8 1801 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1802 #define SD_CHAIN_CHKSUM_IODONE 15 1803 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1804 #define SD_CHAIN_USCSI_CMD_IODONE 20 1805 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1806 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1807 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1808 1809 1810 1811 1812 /* 1813 * Array to map a layering chain index to the appropriate initpkt routine. 1814 * The redundant entries are present so that the index used for accessing 1815 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1816 * with this table as well. 1817 */ 1818 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1819 1820 static sd_initpkt_t sd_initpkt_map[] = { 1821 1822 /* Chain for buf IO for disk drive targets (PM enabled) */ 1823 sd_initpkt_for_buf, /* Index: 0 */ 1824 sd_initpkt_for_buf, /* Index: 1 */ 1825 sd_initpkt_for_buf, /* Index: 2 */ 1826 1827 /* Chain for buf IO for disk drive targets (PM disabled) */ 1828 sd_initpkt_for_buf, /* Index: 3 */ 1829 sd_initpkt_for_buf, /* Index: 4 */ 1830 1831 /* Chain for buf IO for removable-media targets (PM enabled) */ 1832 sd_initpkt_for_buf, /* Index: 5 */ 1833 sd_initpkt_for_buf, /* Index: 6 */ 1834 sd_initpkt_for_buf, /* Index: 7 */ 1835 sd_initpkt_for_buf, /* Index: 8 */ 1836 1837 /* Chain for buf IO for removable-media targets (PM disabled) */ 1838 sd_initpkt_for_buf, /* Index: 9 */ 1839 sd_initpkt_for_buf, /* Index: 10 */ 1840 sd_initpkt_for_buf, /* Index: 11 */ 1841 1842 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1843 sd_initpkt_for_buf, /* Index: 12 */ 1844 sd_initpkt_for_buf, /* Index: 13 */ 1845 sd_initpkt_for_buf, /* Index: 14 */ 1846 sd_initpkt_for_buf, /* Index: 15 */ 1847 1848 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1849 sd_initpkt_for_buf, /* Index: 16 */ 1850 sd_initpkt_for_buf, /* Index: 17 */ 1851 sd_initpkt_for_buf, /* Index: 18 */ 1852 1853 /* Chain for USCSI commands (non-checksum targets) */ 1854 sd_initpkt_for_uscsi, /* Index: 19 */ 1855 sd_initpkt_for_uscsi, /* Index: 20 */ 1856 1857 /* Chain for USCSI commands (checksum targets) */ 1858 sd_initpkt_for_uscsi, /* Index: 21 */ 1859 sd_initpkt_for_uscsi, /* Index: 22 */ 1860 sd_initpkt_for_uscsi, /* Index: 22 */ 1861 1862 /* Chain for "direct" USCSI commands (all targets) */ 1863 sd_initpkt_for_uscsi, /* Index: 24 */ 1864 1865 /* Chain for "direct priority" USCSI commands (all targets) */ 1866 sd_initpkt_for_uscsi, /* Index: 25 */ 1867 1868 }; 1869 1870 1871 /* 1872 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1873 * The redundant entries are present so that the index used for accessing 1874 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1875 * with this table as well. 1876 */ 1877 typedef void (*sd_destroypkt_t)(struct buf *); 1878 1879 static sd_destroypkt_t sd_destroypkt_map[] = { 1880 1881 /* Chain for buf IO for disk drive targets (PM enabled) */ 1882 sd_destroypkt_for_buf, /* Index: 0 */ 1883 sd_destroypkt_for_buf, /* Index: 1 */ 1884 sd_destroypkt_for_buf, /* Index: 2 */ 1885 1886 /* Chain for buf IO for disk drive targets (PM disabled) */ 1887 sd_destroypkt_for_buf, /* Index: 3 */ 1888 sd_destroypkt_for_buf, /* Index: 4 */ 1889 1890 /* Chain for buf IO for removable-media targets (PM enabled) */ 1891 sd_destroypkt_for_buf, /* Index: 5 */ 1892 sd_destroypkt_for_buf, /* Index: 6 */ 1893 sd_destroypkt_for_buf, /* Index: 7 */ 1894 sd_destroypkt_for_buf, /* Index: 8 */ 1895 1896 /* Chain for buf IO for removable-media targets (PM disabled) */ 1897 sd_destroypkt_for_buf, /* Index: 9 */ 1898 sd_destroypkt_for_buf, /* Index: 10 */ 1899 sd_destroypkt_for_buf, /* Index: 11 */ 1900 1901 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1902 sd_destroypkt_for_buf, /* Index: 12 */ 1903 sd_destroypkt_for_buf, /* Index: 13 */ 1904 sd_destroypkt_for_buf, /* Index: 14 */ 1905 sd_destroypkt_for_buf, /* Index: 15 */ 1906 1907 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1908 sd_destroypkt_for_buf, /* Index: 16 */ 1909 sd_destroypkt_for_buf, /* Index: 17 */ 1910 sd_destroypkt_for_buf, /* Index: 18 */ 1911 1912 /* Chain for USCSI commands (non-checksum targets) */ 1913 sd_destroypkt_for_uscsi, /* Index: 19 */ 1914 sd_destroypkt_for_uscsi, /* Index: 20 */ 1915 1916 /* Chain for USCSI commands (checksum targets) */ 1917 sd_destroypkt_for_uscsi, /* Index: 21 */ 1918 sd_destroypkt_for_uscsi, /* Index: 22 */ 1919 sd_destroypkt_for_uscsi, /* Index: 22 */ 1920 1921 /* Chain for "direct" USCSI commands (all targets) */ 1922 sd_destroypkt_for_uscsi, /* Index: 24 */ 1923 1924 /* Chain for "direct priority" USCSI commands (all targets) */ 1925 sd_destroypkt_for_uscsi, /* Index: 25 */ 1926 1927 }; 1928 1929 1930 1931 /* 1932 * Array to map a layering chain index to the appropriate chain "type". 1933 * The chain type indicates a specific property/usage of the chain. 1934 * The redundant entries are present so that the index used for accessing 1935 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1936 * with this table as well. 1937 */ 1938 1939 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1940 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1941 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1942 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1943 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1944 /* (for error recovery) */ 1945 1946 static int sd_chain_type_map[] = { 1947 1948 /* Chain for buf IO for disk drive targets (PM enabled) */ 1949 SD_CHAIN_BUFIO, /* Index: 0 */ 1950 SD_CHAIN_BUFIO, /* Index: 1 */ 1951 SD_CHAIN_BUFIO, /* Index: 2 */ 1952 1953 /* Chain for buf IO for disk drive targets (PM disabled) */ 1954 SD_CHAIN_BUFIO, /* Index: 3 */ 1955 SD_CHAIN_BUFIO, /* Index: 4 */ 1956 1957 /* Chain for buf IO for removable-media targets (PM enabled) */ 1958 SD_CHAIN_BUFIO, /* Index: 5 */ 1959 SD_CHAIN_BUFIO, /* Index: 6 */ 1960 SD_CHAIN_BUFIO, /* Index: 7 */ 1961 SD_CHAIN_BUFIO, /* Index: 8 */ 1962 1963 /* Chain for buf IO for removable-media targets (PM disabled) */ 1964 SD_CHAIN_BUFIO, /* Index: 9 */ 1965 SD_CHAIN_BUFIO, /* Index: 10 */ 1966 SD_CHAIN_BUFIO, /* Index: 11 */ 1967 1968 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1969 SD_CHAIN_BUFIO, /* Index: 12 */ 1970 SD_CHAIN_BUFIO, /* Index: 13 */ 1971 SD_CHAIN_BUFIO, /* Index: 14 */ 1972 SD_CHAIN_BUFIO, /* Index: 15 */ 1973 1974 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1975 SD_CHAIN_BUFIO, /* Index: 16 */ 1976 SD_CHAIN_BUFIO, /* Index: 17 */ 1977 SD_CHAIN_BUFIO, /* Index: 18 */ 1978 1979 /* Chain for USCSI commands (non-checksum targets) */ 1980 SD_CHAIN_USCSI, /* Index: 19 */ 1981 SD_CHAIN_USCSI, /* Index: 20 */ 1982 1983 /* Chain for USCSI commands (checksum targets) */ 1984 SD_CHAIN_USCSI, /* Index: 21 */ 1985 SD_CHAIN_USCSI, /* Index: 22 */ 1986 SD_CHAIN_USCSI, /* Index: 22 */ 1987 1988 /* Chain for "direct" USCSI commands (all targets) */ 1989 SD_CHAIN_DIRECT, /* Index: 24 */ 1990 1991 /* Chain for "direct priority" USCSI commands (all targets) */ 1992 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 1993 }; 1994 1995 1996 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 1997 #define SD_IS_BUFIO(xp) \ 1998 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 1999 2000 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2001 #define SD_IS_DIRECT_PRIORITY(xp) \ 2002 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2003 2004 2005 2006 /* 2007 * Struct, array, and macros to map a specific chain to the appropriate 2008 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2009 * 2010 * The sd_chain_index_map[] array is used at attach time to set the various 2011 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2012 * chain to be used with the instance. This allows different instances to use 2013 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2014 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2015 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2016 * dynamically & without the use of locking; and (2) a layer may update the 2017 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2018 * to allow for deferred processing of an IO within the same chain from a 2019 * different execution context. 2020 */ 2021 2022 struct sd_chain_index { 2023 int sci_iostart_index; 2024 int sci_iodone_index; 2025 }; 2026 2027 static struct sd_chain_index sd_chain_index_map[] = { 2028 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2029 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2030 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2031 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2032 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2033 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2034 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2035 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2036 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2037 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2038 }; 2039 2040 2041 /* 2042 * The following are indexes into the sd_chain_index_map[] array. 2043 */ 2044 2045 /* un->un_buf_chain_type must be set to one of these */ 2046 #define SD_CHAIN_INFO_DISK 0 2047 #define SD_CHAIN_INFO_DISK_NO_PM 1 2048 #define SD_CHAIN_INFO_RMMEDIA 2 2049 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2050 #define SD_CHAIN_INFO_CHKSUM 4 2051 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2052 2053 /* un->un_uscsi_chain_type must be set to one of these */ 2054 #define SD_CHAIN_INFO_USCSI_CMD 6 2055 /* USCSI with PM disabled is the same as DIRECT */ 2056 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2057 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2058 2059 /* un->un_direct_chain_type must be set to one of these */ 2060 #define SD_CHAIN_INFO_DIRECT_CMD 8 2061 2062 /* un->un_priority_chain_type must be set to one of these */ 2063 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2064 2065 /* size for devid inquiries */ 2066 #define MAX_INQUIRY_SIZE 0xF0 2067 2068 /* 2069 * Macros used by functions to pass a given buf(9S) struct along to the 2070 * next function in the layering chain for further processing. 2071 * 2072 * In the following macros, passing more than three arguments to the called 2073 * routines causes the optimizer for the SPARC compiler to stop doing tail 2074 * call elimination which results in significant performance degradation. 2075 */ 2076 #define SD_BEGIN_IOSTART(index, un, bp) \ 2077 ((*(sd_iostart_chain[index]))(index, un, bp)) 2078 2079 #define SD_BEGIN_IODONE(index, un, bp) \ 2080 ((*(sd_iodone_chain[index]))(index, un, bp)) 2081 2082 #define SD_NEXT_IOSTART(index, un, bp) \ 2083 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2084 2085 #define SD_NEXT_IODONE(index, un, bp) \ 2086 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2087 2088 2089 /* 2090 * Function: _init 2091 * 2092 * Description: This is the driver _init(9E) entry point. 2093 * 2094 * Return Code: Returns the value from mod_install(9F) or 2095 * ddi_soft_state_init(9F) as appropriate. 2096 * 2097 * Context: Called when driver module loaded. 2098 */ 2099 2100 int 2101 _init(void) 2102 { 2103 int err; 2104 2105 /* establish driver name from module name */ 2106 sd_label = mod_modname(&modlinkage); 2107 2108 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2109 SD_MAXUNIT); 2110 2111 if (err != 0) { 2112 return (err); 2113 } 2114 2115 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2116 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2117 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2118 2119 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2120 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2121 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2122 2123 /* 2124 * it's ok to init here even for fibre device 2125 */ 2126 sd_scsi_probe_cache_init(); 2127 2128 /* 2129 * Creating taskq before mod_install ensures that all callers (threads) 2130 * that enter the module after a successfull mod_install encounter 2131 * a valid taskq. 2132 */ 2133 sd_taskq_create(); 2134 2135 err = mod_install(&modlinkage); 2136 if (err != 0) { 2137 /* delete taskq if install fails */ 2138 sd_taskq_delete(); 2139 2140 mutex_destroy(&sd_detach_mutex); 2141 mutex_destroy(&sd_log_mutex); 2142 mutex_destroy(&sd_label_mutex); 2143 2144 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2145 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2146 cv_destroy(&sd_tr.srq_inprocess_cv); 2147 2148 sd_scsi_probe_cache_fini(); 2149 2150 ddi_soft_state_fini(&sd_state); 2151 return (err); 2152 } 2153 2154 return (err); 2155 } 2156 2157 2158 /* 2159 * Function: _fini 2160 * 2161 * Description: This is the driver _fini(9E) entry point. 2162 * 2163 * Return Code: Returns the value from mod_remove(9F) 2164 * 2165 * Context: Called when driver module is unloaded. 2166 */ 2167 2168 int 2169 _fini(void) 2170 { 2171 int err; 2172 2173 if ((err = mod_remove(&modlinkage)) != 0) { 2174 return (err); 2175 } 2176 2177 sd_taskq_delete(); 2178 2179 mutex_destroy(&sd_detach_mutex); 2180 mutex_destroy(&sd_log_mutex); 2181 mutex_destroy(&sd_label_mutex); 2182 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2183 2184 sd_scsi_probe_cache_fini(); 2185 2186 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2187 cv_destroy(&sd_tr.srq_inprocess_cv); 2188 2189 ddi_soft_state_fini(&sd_state); 2190 2191 return (err); 2192 } 2193 2194 2195 /* 2196 * Function: _info 2197 * 2198 * Description: This is the driver _info(9E) entry point. 2199 * 2200 * Arguments: modinfop - pointer to the driver modinfo structure 2201 * 2202 * Return Code: Returns the value from mod_info(9F). 2203 * 2204 * Context: Kernel thread context 2205 */ 2206 2207 int 2208 _info(struct modinfo *modinfop) 2209 { 2210 return (mod_info(&modlinkage, modinfop)); 2211 } 2212 2213 2214 static void 2215 sd_fill_scsi1_lun(struct sd_lun *un, struct scsi_pkt *pktp) 2216 { 2217 ASSERT(pktp != NULL); 2218 if (un->un_f_is_fibre == TRUE) { 2219 return; 2220 } 2221 2222 SD_FILL_SCSI1_LUN(SD_SCSI_DEVP(un), pktp); 2223 } 2224 2225 /* 2226 * The following routines implement the driver message logging facility. 2227 * They provide component- and level- based debug output filtering. 2228 * Output may also be restricted to messages for a single instance by 2229 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2230 * to NULL, then messages for all instances are printed. 2231 * 2232 * These routines have been cloned from each other due to the language 2233 * constraints of macros and variable argument list processing. 2234 */ 2235 2236 2237 /* 2238 * Function: sd_log_err 2239 * 2240 * Description: This routine is called by the SD_ERROR macro for debug 2241 * logging of error conditions. 2242 * 2243 * Arguments: comp - driver component being logged 2244 * dev - pointer to driver info structure 2245 * fmt - error string and format to be logged 2246 */ 2247 2248 static void 2249 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2250 { 2251 va_list ap; 2252 dev_info_t *dev; 2253 2254 ASSERT(un != NULL); 2255 dev = SD_DEVINFO(un); 2256 ASSERT(dev != NULL); 2257 2258 /* 2259 * Filter messages based on the global component and level masks. 2260 * Also print if un matches the value of sd_debug_un, or if 2261 * sd_debug_un is set to NULL. 2262 */ 2263 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2264 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2265 mutex_enter(&sd_log_mutex); 2266 va_start(ap, fmt); 2267 (void) vsprintf(sd_log_buf, fmt, ap); 2268 va_end(ap); 2269 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2270 mutex_exit(&sd_log_mutex); 2271 } 2272 #ifdef SD_FAULT_INJECTION 2273 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2274 if (un->sd_injection_mask & comp) { 2275 mutex_enter(&sd_log_mutex); 2276 va_start(ap, fmt); 2277 (void) vsprintf(sd_log_buf, fmt, ap); 2278 va_end(ap); 2279 sd_injection_log(sd_log_buf, un); 2280 mutex_exit(&sd_log_mutex); 2281 } 2282 #endif 2283 } 2284 2285 2286 /* 2287 * Function: sd_log_info 2288 * 2289 * Description: This routine is called by the SD_INFO macro for debug 2290 * logging of general purpose informational conditions. 2291 * 2292 * Arguments: comp - driver component being logged 2293 * dev - pointer to driver info structure 2294 * fmt - info string and format to be logged 2295 */ 2296 2297 static void 2298 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2299 { 2300 va_list ap; 2301 dev_info_t *dev; 2302 2303 ASSERT(un != NULL); 2304 dev = SD_DEVINFO(un); 2305 ASSERT(dev != NULL); 2306 2307 /* 2308 * Filter messages based on the global component and level masks. 2309 * Also print if un matches the value of sd_debug_un, or if 2310 * sd_debug_un is set to NULL. 2311 */ 2312 if ((sd_component_mask & component) && 2313 (sd_level_mask & SD_LOGMASK_INFO) && 2314 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2315 mutex_enter(&sd_log_mutex); 2316 va_start(ap, fmt); 2317 (void) vsprintf(sd_log_buf, fmt, ap); 2318 va_end(ap); 2319 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2320 mutex_exit(&sd_log_mutex); 2321 } 2322 #ifdef SD_FAULT_INJECTION 2323 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2324 if (un->sd_injection_mask & component) { 2325 mutex_enter(&sd_log_mutex); 2326 va_start(ap, fmt); 2327 (void) vsprintf(sd_log_buf, fmt, ap); 2328 va_end(ap); 2329 sd_injection_log(sd_log_buf, un); 2330 mutex_exit(&sd_log_mutex); 2331 } 2332 #endif 2333 } 2334 2335 2336 /* 2337 * Function: sd_log_trace 2338 * 2339 * Description: This routine is called by the SD_TRACE macro for debug 2340 * logging of trace conditions (i.e. function entry/exit). 2341 * 2342 * Arguments: comp - driver component being logged 2343 * dev - pointer to driver info structure 2344 * fmt - trace string and format to be logged 2345 */ 2346 2347 static void 2348 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2349 { 2350 va_list ap; 2351 dev_info_t *dev; 2352 2353 ASSERT(un != NULL); 2354 dev = SD_DEVINFO(un); 2355 ASSERT(dev != NULL); 2356 2357 /* 2358 * Filter messages based on the global component and level masks. 2359 * Also print if un matches the value of sd_debug_un, or if 2360 * sd_debug_un is set to NULL. 2361 */ 2362 if ((sd_component_mask & component) && 2363 (sd_level_mask & SD_LOGMASK_TRACE) && 2364 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2365 mutex_enter(&sd_log_mutex); 2366 va_start(ap, fmt); 2367 (void) vsprintf(sd_log_buf, fmt, ap); 2368 va_end(ap); 2369 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2370 mutex_exit(&sd_log_mutex); 2371 } 2372 #ifdef SD_FAULT_INJECTION 2373 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2374 if (un->sd_injection_mask & component) { 2375 mutex_enter(&sd_log_mutex); 2376 va_start(ap, fmt); 2377 (void) vsprintf(sd_log_buf, fmt, ap); 2378 va_end(ap); 2379 sd_injection_log(sd_log_buf, un); 2380 mutex_exit(&sd_log_mutex); 2381 } 2382 #endif 2383 } 2384 2385 2386 /* 2387 * Function: sdprobe 2388 * 2389 * Description: This is the driver probe(9e) entry point function. 2390 * 2391 * Arguments: devi - opaque device info handle 2392 * 2393 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2394 * DDI_PROBE_FAILURE: If the probe failed. 2395 * DDI_PROBE_PARTIAL: If the instance is not present now, 2396 * but may be present in the future. 2397 */ 2398 2399 static int 2400 sdprobe(dev_info_t *devi) 2401 { 2402 struct scsi_device *devp; 2403 int rval; 2404 int instance; 2405 2406 /* 2407 * if it wasn't for pln, sdprobe could actually be nulldev 2408 * in the "__fibre" case. 2409 */ 2410 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2411 return (DDI_PROBE_DONTCARE); 2412 } 2413 2414 devp = ddi_get_driver_private(devi); 2415 2416 if (devp == NULL) { 2417 /* Ooops... nexus driver is mis-configured... */ 2418 return (DDI_PROBE_FAILURE); 2419 } 2420 2421 instance = ddi_get_instance(devi); 2422 2423 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2424 return (DDI_PROBE_PARTIAL); 2425 } 2426 2427 /* 2428 * Call the SCSA utility probe routine to see if we actually 2429 * have a target at this SCSI nexus. 2430 */ 2431 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2432 case SCSIPROBE_EXISTS: 2433 switch (devp->sd_inq->inq_dtype) { 2434 case DTYPE_DIRECT: 2435 rval = DDI_PROBE_SUCCESS; 2436 break; 2437 case DTYPE_RODIRECT: 2438 /* CDs etc. Can be removable media */ 2439 rval = DDI_PROBE_SUCCESS; 2440 break; 2441 case DTYPE_OPTICAL: 2442 /* 2443 * Rewritable optical driver HP115AA 2444 * Can also be removable media 2445 */ 2446 2447 /* 2448 * Do not attempt to bind to DTYPE_OPTICAL if 2449 * pre solaris 9 sparc sd behavior is required 2450 * 2451 * If first time through and sd_dtype_optical_bind 2452 * has not been set in /etc/system check properties 2453 */ 2454 2455 if (sd_dtype_optical_bind < 0) { 2456 sd_dtype_optical_bind = ddi_prop_get_int 2457 (DDI_DEV_T_ANY, devi, 0, 2458 "optical-device-bind", 1); 2459 } 2460 2461 if (sd_dtype_optical_bind == 0) { 2462 rval = DDI_PROBE_FAILURE; 2463 } else { 2464 rval = DDI_PROBE_SUCCESS; 2465 } 2466 break; 2467 2468 case DTYPE_NOTPRESENT: 2469 default: 2470 rval = DDI_PROBE_FAILURE; 2471 break; 2472 } 2473 break; 2474 default: 2475 rval = DDI_PROBE_PARTIAL; 2476 break; 2477 } 2478 2479 /* 2480 * This routine checks for resource allocation prior to freeing, 2481 * so it will take care of the "smart probing" case where a 2482 * scsi_probe() may or may not have been issued and will *not* 2483 * free previously-freed resources. 2484 */ 2485 scsi_unprobe(devp); 2486 return (rval); 2487 } 2488 2489 2490 /* 2491 * Function: sdinfo 2492 * 2493 * Description: This is the driver getinfo(9e) entry point function. 2494 * Given the device number, return the devinfo pointer from 2495 * the scsi_device structure or the instance number 2496 * associated with the dev_t. 2497 * 2498 * Arguments: dip - pointer to device info structure 2499 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2500 * DDI_INFO_DEVT2INSTANCE) 2501 * arg - driver dev_t 2502 * resultp - user buffer for request response 2503 * 2504 * Return Code: DDI_SUCCESS 2505 * DDI_FAILURE 2506 */ 2507 /* ARGSUSED */ 2508 static int 2509 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2510 { 2511 struct sd_lun *un; 2512 dev_t dev; 2513 int instance; 2514 int error; 2515 2516 switch (infocmd) { 2517 case DDI_INFO_DEVT2DEVINFO: 2518 dev = (dev_t)arg; 2519 instance = SDUNIT(dev); 2520 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2521 return (DDI_FAILURE); 2522 } 2523 *result = (void *) SD_DEVINFO(un); 2524 error = DDI_SUCCESS; 2525 break; 2526 case DDI_INFO_DEVT2INSTANCE: 2527 dev = (dev_t)arg; 2528 instance = SDUNIT(dev); 2529 *result = (void *)(uintptr_t)instance; 2530 error = DDI_SUCCESS; 2531 break; 2532 default: 2533 error = DDI_FAILURE; 2534 } 2535 return (error); 2536 } 2537 2538 /* 2539 * Function: sd_prop_op 2540 * 2541 * Description: This is the driver prop_op(9e) entry point function. 2542 * Return the number of blocks for the partition in question 2543 * or forward the request to the property facilities. 2544 * 2545 * Arguments: dev - device number 2546 * dip - pointer to device info structure 2547 * prop_op - property operator 2548 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2549 * name - pointer to property name 2550 * valuep - pointer or address of the user buffer 2551 * lengthp - property length 2552 * 2553 * Return Code: DDI_PROP_SUCCESS 2554 * DDI_PROP_NOT_FOUND 2555 * DDI_PROP_UNDEFINED 2556 * DDI_PROP_NO_MEMORY 2557 * DDI_PROP_BUF_TOO_SMALL 2558 */ 2559 2560 static int 2561 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2562 char *name, caddr_t valuep, int *lengthp) 2563 { 2564 int instance = ddi_get_instance(dip); 2565 struct sd_lun *un; 2566 uint64_t nblocks64; 2567 2568 /* 2569 * Our dynamic properties are all device specific and size oriented. 2570 * Requests issued under conditions where size is valid are passed 2571 * to ddi_prop_op_nblocks with the size information, otherwise the 2572 * request is passed to ddi_prop_op. Size depends on valid geometry. 2573 */ 2574 un = ddi_get_soft_state(sd_state, instance); 2575 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2576 (un->un_f_geometry_is_valid == FALSE)) { 2577 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2578 name, valuep, lengthp)); 2579 } else { 2580 /* get nblocks value */ 2581 ASSERT(!mutex_owned(SD_MUTEX(un))); 2582 mutex_enter(SD_MUTEX(un)); 2583 nblocks64 = (ulong_t)un->un_map[SDPART(dev)].dkl_nblk; 2584 mutex_exit(SD_MUTEX(un)); 2585 2586 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2587 name, valuep, lengthp, nblocks64)); 2588 } 2589 } 2590 2591 /* 2592 * The following functions are for smart probing: 2593 * sd_scsi_probe_cache_init() 2594 * sd_scsi_probe_cache_fini() 2595 * sd_scsi_clear_probe_cache() 2596 * sd_scsi_probe_with_cache() 2597 */ 2598 2599 /* 2600 * Function: sd_scsi_probe_cache_init 2601 * 2602 * Description: Initializes the probe response cache mutex and head pointer. 2603 * 2604 * Context: Kernel thread context 2605 */ 2606 2607 static void 2608 sd_scsi_probe_cache_init(void) 2609 { 2610 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2611 sd_scsi_probe_cache_head = NULL; 2612 } 2613 2614 2615 /* 2616 * Function: sd_scsi_probe_cache_fini 2617 * 2618 * Description: Frees all resources associated with the probe response cache. 2619 * 2620 * Context: Kernel thread context 2621 */ 2622 2623 static void 2624 sd_scsi_probe_cache_fini(void) 2625 { 2626 struct sd_scsi_probe_cache *cp; 2627 struct sd_scsi_probe_cache *ncp; 2628 2629 /* Clean up our smart probing linked list */ 2630 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2631 ncp = cp->next; 2632 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2633 } 2634 sd_scsi_probe_cache_head = NULL; 2635 mutex_destroy(&sd_scsi_probe_cache_mutex); 2636 } 2637 2638 2639 /* 2640 * Function: sd_scsi_clear_probe_cache 2641 * 2642 * Description: This routine clears the probe response cache. This is 2643 * done when open() returns ENXIO so that when deferred 2644 * attach is attempted (possibly after a device has been 2645 * turned on) we will retry the probe. Since we don't know 2646 * which target we failed to open, we just clear the 2647 * entire cache. 2648 * 2649 * Context: Kernel thread context 2650 */ 2651 2652 static void 2653 sd_scsi_clear_probe_cache(void) 2654 { 2655 struct sd_scsi_probe_cache *cp; 2656 int i; 2657 2658 mutex_enter(&sd_scsi_probe_cache_mutex); 2659 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2660 /* 2661 * Reset all entries to SCSIPROBE_EXISTS. This will 2662 * force probing to be performed the next time 2663 * sd_scsi_probe_with_cache is called. 2664 */ 2665 for (i = 0; i < NTARGETS_WIDE; i++) { 2666 cp->cache[i] = SCSIPROBE_EXISTS; 2667 } 2668 } 2669 mutex_exit(&sd_scsi_probe_cache_mutex); 2670 } 2671 2672 2673 /* 2674 * Function: sd_scsi_probe_with_cache 2675 * 2676 * Description: This routine implements support for a scsi device probe 2677 * with cache. The driver maintains a cache of the target 2678 * responses to scsi probes. If we get no response from a 2679 * target during a probe inquiry, we remember that, and we 2680 * avoid additional calls to scsi_probe on non-zero LUNs 2681 * on the same target until the cache is cleared. By doing 2682 * so we avoid the 1/4 sec selection timeout for nonzero 2683 * LUNs. lun0 of a target is always probed. 2684 * 2685 * Arguments: devp - Pointer to a scsi_device(9S) structure 2686 * waitfunc - indicates what the allocator routines should 2687 * do when resources are not available. This value 2688 * is passed on to scsi_probe() when that routine 2689 * is called. 2690 * 2691 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2692 * otherwise the value returned by scsi_probe(9F). 2693 * 2694 * Context: Kernel thread context 2695 */ 2696 2697 static int 2698 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2699 { 2700 struct sd_scsi_probe_cache *cp; 2701 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2702 int lun = devp->sd_address.a_lun; 2703 int tgt = devp->sd_address.a_target; 2704 2705 /* Make sure caching enabled and target in range */ 2706 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2707 /* do it the old way (no cache) */ 2708 return (scsi_probe(devp, waitfn)); 2709 } 2710 2711 mutex_enter(&sd_scsi_probe_cache_mutex); 2712 2713 /* Find the cache for this scsi bus instance */ 2714 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2715 if (cp->pdip == pdip) { 2716 break; 2717 } 2718 } 2719 2720 /* If we can't find a cache for this pdip, create one */ 2721 if (cp == NULL) { 2722 int i; 2723 2724 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2725 KM_SLEEP); 2726 cp->pdip = pdip; 2727 cp->next = sd_scsi_probe_cache_head; 2728 sd_scsi_probe_cache_head = cp; 2729 for (i = 0; i < NTARGETS_WIDE; i++) { 2730 cp->cache[i] = SCSIPROBE_EXISTS; 2731 } 2732 } 2733 2734 mutex_exit(&sd_scsi_probe_cache_mutex); 2735 2736 /* Recompute the cache for this target if LUN zero */ 2737 if (lun == 0) { 2738 cp->cache[tgt] = SCSIPROBE_EXISTS; 2739 } 2740 2741 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2742 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2743 return (SCSIPROBE_NORESP); 2744 } 2745 2746 /* Do the actual probe; save & return the result */ 2747 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2748 } 2749 2750 2751 /* 2752 * Function: sd_spin_up_unit 2753 * 2754 * Description: Issues the following commands to spin-up the device: 2755 * START STOP UNIT, and INQUIRY. 2756 * 2757 * Arguments: un - driver soft state (unit) structure 2758 * 2759 * Return Code: 0 - success 2760 * EIO - failure 2761 * EACCES - reservation conflict 2762 * 2763 * Context: Kernel thread context 2764 */ 2765 2766 static int 2767 sd_spin_up_unit(struct sd_lun *un) 2768 { 2769 size_t resid = 0; 2770 int has_conflict = FALSE; 2771 uchar_t *bufaddr; 2772 2773 ASSERT(un != NULL); 2774 2775 /* 2776 * Send a throwaway START UNIT command. 2777 * 2778 * If we fail on this, we don't care presently what precisely 2779 * is wrong. EMC's arrays will also fail this with a check 2780 * condition (0x2/0x4/0x3) if the device is "inactive," but 2781 * we don't want to fail the attach because it may become 2782 * "active" later. 2783 */ 2784 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2785 == EACCES) 2786 has_conflict = TRUE; 2787 2788 /* 2789 * Send another INQUIRY command to the target. This is necessary for 2790 * non-removable media direct access devices because their INQUIRY data 2791 * may not be fully qualified until they are spun up (perhaps via the 2792 * START command above). Note: This seems to be needed for some 2793 * legacy devices only.) The INQUIRY command should succeed even if a 2794 * Reservation Conflict is present. 2795 */ 2796 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2797 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2798 kmem_free(bufaddr, SUN_INQSIZE); 2799 return (EIO); 2800 } 2801 2802 /* 2803 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2804 * Note that this routine does not return a failure here even if the 2805 * INQUIRY command did not return any data. This is a legacy behavior. 2806 */ 2807 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2808 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2809 } 2810 2811 kmem_free(bufaddr, SUN_INQSIZE); 2812 2813 /* If we hit a reservation conflict above, tell the caller. */ 2814 if (has_conflict == TRUE) { 2815 return (EACCES); 2816 } 2817 2818 return (0); 2819 } 2820 2821 /* 2822 * Function: sd_enable_descr_sense 2823 * 2824 * Description: This routine attempts to select descriptor sense format 2825 * using the Control mode page. Devices that support 64 bit 2826 * LBAs (for >2TB luns) should also implement descriptor 2827 * sense data so we will call this function whenever we see 2828 * a lun larger than 2TB. If for some reason the device 2829 * supports 64 bit LBAs but doesn't support descriptor sense 2830 * presumably the mode select will fail. Everything will 2831 * continue to work normally except that we will not get 2832 * complete sense data for commands that fail with an LBA 2833 * larger than 32 bits. 2834 * 2835 * Arguments: un - driver soft state (unit) structure 2836 * 2837 * Context: Kernel thread context only 2838 */ 2839 2840 static void 2841 sd_enable_descr_sense(struct sd_lun *un) 2842 { 2843 uchar_t *header; 2844 struct mode_control_scsi3 *ctrl_bufp; 2845 size_t buflen; 2846 size_t bd_len; 2847 2848 /* 2849 * Read MODE SENSE page 0xA, Control Mode Page 2850 */ 2851 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 2852 sizeof (struct mode_control_scsi3); 2853 header = kmem_zalloc(buflen, KM_SLEEP); 2854 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 2855 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 2856 SD_ERROR(SD_LOG_COMMON, un, 2857 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 2858 goto eds_exit; 2859 } 2860 2861 /* 2862 * Determine size of Block Descriptors in order to locate 2863 * the mode page data. ATAPI devices return 0, SCSI devices 2864 * should return MODE_BLK_DESC_LENGTH. 2865 */ 2866 bd_len = ((struct mode_header *)header)->bdesc_length; 2867 2868 ctrl_bufp = (struct mode_control_scsi3 *) 2869 (header + MODE_HEADER_LENGTH + bd_len); 2870 2871 /* 2872 * Clear PS bit for MODE SELECT 2873 */ 2874 ctrl_bufp->mode_page.ps = 0; 2875 2876 /* 2877 * Set D_SENSE to enable descriptor sense format. 2878 */ 2879 ctrl_bufp->d_sense = 1; 2880 2881 /* 2882 * Use MODE SELECT to commit the change to the D_SENSE bit 2883 */ 2884 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 2885 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 2886 SD_INFO(SD_LOG_COMMON, un, 2887 "sd_enable_descr_sense: mode select ctrl page failed\n"); 2888 goto eds_exit; 2889 } 2890 2891 eds_exit: 2892 kmem_free(header, buflen); 2893 } 2894 2895 2896 /* 2897 * Function: sd_set_mmc_caps 2898 * 2899 * Description: This routine determines if the device is MMC compliant and if 2900 * the device supports CDDA via a mode sense of the CDVD 2901 * capabilities mode page. Also checks if the device is a 2902 * dvdram writable device. 2903 * 2904 * Arguments: un - driver soft state (unit) structure 2905 * 2906 * Context: Kernel thread context only 2907 */ 2908 2909 static void 2910 sd_set_mmc_caps(struct sd_lun *un) 2911 { 2912 struct mode_header_grp2 *sense_mhp; 2913 uchar_t *sense_page; 2914 caddr_t buf; 2915 int bd_len; 2916 int status; 2917 struct uscsi_cmd com; 2918 int rtn; 2919 uchar_t *out_data_rw, *out_data_hd; 2920 uchar_t *rqbuf_rw, *rqbuf_hd; 2921 2922 ASSERT(un != NULL); 2923 2924 /* 2925 * The flags which will be set in this function are - mmc compliant, 2926 * dvdram writable device, cdda support. Initialize them to FALSE 2927 * and if a capability is detected - it will be set to TRUE. 2928 */ 2929 un->un_f_mmc_cap = FALSE; 2930 un->un_f_dvdram_writable_device = FALSE; 2931 un->un_f_cfg_cdda = FALSE; 2932 2933 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 2934 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 2935 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 2936 2937 if (status != 0) { 2938 /* command failed; just return */ 2939 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2940 return; 2941 } 2942 /* 2943 * If the mode sense request for the CDROM CAPABILITIES 2944 * page (0x2A) succeeds the device is assumed to be MMC. 2945 */ 2946 un->un_f_mmc_cap = TRUE; 2947 2948 /* Get to the page data */ 2949 sense_mhp = (struct mode_header_grp2 *)buf; 2950 bd_len = (sense_mhp->bdesc_length_hi << 8) | 2951 sense_mhp->bdesc_length_lo; 2952 if (bd_len > MODE_BLK_DESC_LENGTH) { 2953 /* 2954 * We did not get back the expected block descriptor 2955 * length so we cannot determine if the device supports 2956 * CDDA. However, we still indicate the device is MMC 2957 * according to the successful response to the page 2958 * 0x2A mode sense request. 2959 */ 2960 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 2961 "sd_set_mmc_caps: Mode Sense returned " 2962 "invalid block descriptor length\n"); 2963 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2964 return; 2965 } 2966 2967 /* See if read CDDA is supported */ 2968 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 2969 bd_len); 2970 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 2971 2972 /* See if writing DVD RAM is supported. */ 2973 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 2974 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2975 if (un->un_f_dvdram_writable_device == TRUE) { 2976 return; 2977 } 2978 2979 /* 2980 * If un->un_f_dvdram_writable_device is still FALSE, 2981 * check for Iomega RRD type device. Iomega is identifying 2982 * their RRD type devices by the features RANDOM_WRITABLE and 2983 * HARDWARE_DEFECT_MANAGEMENT. 2984 */ 2985 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 2986 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 2987 2988 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 2989 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 2990 RANDOM_WRITABLE); 2991 if (rtn != 0) { 2992 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 2993 kmem_free(rqbuf_rw, SENSE_LENGTH); 2994 return; 2995 } 2996 2997 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 2998 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 2999 3000 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3001 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3002 HARDWARE_DEFECT_MANAGEMENT); 3003 if (rtn == 0) { 3004 /* 3005 * We have good information, check for random writable 3006 * and hardware defect features. 3007 */ 3008 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3009 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3010 un->un_f_dvdram_writable_device = TRUE; 3011 } 3012 } 3013 3014 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3015 kmem_free(rqbuf_rw, SENSE_LENGTH); 3016 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3017 kmem_free(rqbuf_hd, SENSE_LENGTH); 3018 } 3019 3020 /* 3021 * Function: sd_check_for_writable_cd 3022 * 3023 * Description: This routine determines if the media in the device is 3024 * writable or not. It uses the get configuration command (0x46) 3025 * to determine if the media is writable 3026 * 3027 * Arguments: un - driver soft state (unit) structure 3028 * 3029 * Context: Never called at interrupt context. 3030 */ 3031 3032 static void 3033 sd_check_for_writable_cd(struct sd_lun *un) 3034 { 3035 struct uscsi_cmd com; 3036 uchar_t *out_data; 3037 uchar_t *rqbuf; 3038 int rtn; 3039 uchar_t *out_data_rw, *out_data_hd; 3040 uchar_t *rqbuf_rw, *rqbuf_hd; 3041 3042 ASSERT(un != NULL); 3043 ASSERT(mutex_owned(SD_MUTEX(un))); 3044 3045 /* 3046 * Initialize the writable media to false, if configuration info. 3047 * tells us otherwise then only we will set it. 3048 */ 3049 un->un_f_mmc_writable_media = FALSE; 3050 mutex_exit(SD_MUTEX(un)); 3051 3052 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3053 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3054 3055 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3056 out_data, SD_PROFILE_HEADER_LEN); 3057 3058 mutex_enter(SD_MUTEX(un)); 3059 if (rtn == 0) { 3060 /* 3061 * We have good information, check for writable DVD. 3062 */ 3063 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3064 un->un_f_mmc_writable_media = TRUE; 3065 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3066 kmem_free(rqbuf, SENSE_LENGTH); 3067 return; 3068 } 3069 } 3070 3071 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3072 kmem_free(rqbuf, SENSE_LENGTH); 3073 3074 /* 3075 * If un->un_f_mmc_writable_media is still FALSE, 3076 * check for Iomega RRD type media. Iomega is identifying 3077 * their RRD type devices by the features RANDOM_WRITABLE and 3078 * HARDWARE_DEFECT_MANAGEMENT. 3079 */ 3080 mutex_exit(SD_MUTEX(un)); 3081 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3082 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3083 3084 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3085 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3086 RANDOM_WRITABLE); 3087 if (rtn != 0) { 3088 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3089 kmem_free(rqbuf_rw, SENSE_LENGTH); 3090 mutex_enter(SD_MUTEX(un)); 3091 return; 3092 } 3093 3094 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3095 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3096 3097 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3098 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3099 HARDWARE_DEFECT_MANAGEMENT); 3100 mutex_enter(SD_MUTEX(un)); 3101 if (rtn == 0) { 3102 /* 3103 * We have good information, check for random writable 3104 * and hardware defect features as current. 3105 */ 3106 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3107 (out_data_rw[10] & 0x1) && 3108 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3109 (out_data_hd[10] & 0x1)) { 3110 un->un_f_mmc_writable_media = TRUE; 3111 } 3112 } 3113 3114 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3115 kmem_free(rqbuf_rw, SENSE_LENGTH); 3116 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3117 kmem_free(rqbuf_hd, SENSE_LENGTH); 3118 } 3119 3120 /* 3121 * Function: sd_read_unit_properties 3122 * 3123 * Description: The following implements a property lookup mechanism. 3124 * Properties for particular disks (keyed on vendor, model 3125 * and rev numbers) are sought in the sd.conf file via 3126 * sd_process_sdconf_file(), and if not found there, are 3127 * looked for in a list hardcoded in this driver via 3128 * sd_process_sdconf_table() Once located the properties 3129 * are used to update the driver unit structure. 3130 * 3131 * Arguments: un - driver soft state (unit) structure 3132 */ 3133 3134 static void 3135 sd_read_unit_properties(struct sd_lun *un) 3136 { 3137 /* 3138 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3139 * the "sd-config-list" property (from the sd.conf file) or if 3140 * there was not a match for the inquiry vid/pid. If this event 3141 * occurs the static driver configuration table is searched for 3142 * a match. 3143 */ 3144 ASSERT(un != NULL); 3145 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3146 sd_process_sdconf_table(un); 3147 } 3148 3149 /* 3150 * Set this in sd.conf to 0 in order to disable kstats. The default 3151 * is 1, so they are enabled by default. 3152 */ 3153 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 3154 SD_DEVINFO(un), DDI_PROP_DONTPASS, "enable-partition-kstats", 1)); 3155 } 3156 3157 3158 /* 3159 * Function: sd_process_sdconf_file 3160 * 3161 * Description: Use ddi_getlongprop to obtain the properties from the 3162 * driver's config file (ie, sd.conf) and update the driver 3163 * soft state structure accordingly. 3164 * 3165 * Arguments: un - driver soft state (unit) structure 3166 * 3167 * Return Code: SD_SUCCESS - The properties were successfully set according 3168 * to the driver configuration file. 3169 * SD_FAILURE - The driver config list was not obtained or 3170 * there was no vid/pid match. This indicates that 3171 * the static config table should be used. 3172 * 3173 * The config file has a property, "sd-config-list", which consists of 3174 * one or more duplets as follows: 3175 * 3176 * sd-config-list= 3177 * <duplet>, 3178 * [<duplet>,] 3179 * [<duplet>]; 3180 * 3181 * The structure of each duplet is as follows: 3182 * 3183 * <duplet>:= <vid+pid>,<data-property-name_list> 3184 * 3185 * The first entry of the duplet is the device ID string (the concatenated 3186 * vid & pid; not to be confused with a device_id). This is defined in 3187 * the same way as in the sd_disk_table. 3188 * 3189 * The second part of the duplet is a string that identifies a 3190 * data-property-name-list. The data-property-name-list is defined as 3191 * follows: 3192 * 3193 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3194 * 3195 * The syntax of <data-property-name> depends on the <version> field. 3196 * 3197 * If version = SD_CONF_VERSION_1 we have the following syntax: 3198 * 3199 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3200 * 3201 * where the prop0 value will be used to set prop0 if bit0 set in the 3202 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3203 * 3204 * If version = SD_CONF_VERSION_10 we have the following syntax: 3205 * 3206 * <data-property-name>:=<version>,<prop0>,<prop1>,<prop2>,<prop3> 3207 */ 3208 3209 static int 3210 sd_process_sdconf_file(struct sd_lun *un) 3211 { 3212 char *config_list = NULL; 3213 int config_list_len; 3214 int len; 3215 int dupletlen = 0; 3216 char *vidptr; 3217 int vidlen; 3218 char *dnlist_ptr; 3219 char *dataname_ptr; 3220 int dnlist_len; 3221 int dataname_len; 3222 int *data_list; 3223 int data_list_len; 3224 int rval = SD_FAILURE; 3225 int i; 3226 3227 ASSERT(un != NULL); 3228 3229 /* Obtain the configuration list associated with the .conf file */ 3230 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3231 sd_config_list, (caddr_t)&config_list, &config_list_len) 3232 != DDI_PROP_SUCCESS) { 3233 return (SD_FAILURE); 3234 } 3235 3236 /* 3237 * Compare vids in each duplet to the inquiry vid - if a match is 3238 * made, get the data value and update the soft state structure 3239 * accordingly. 3240 * 3241 * Note: This algorithm is complex and difficult to maintain. It should 3242 * be replaced with a more robust implementation. 3243 */ 3244 for (len = config_list_len, vidptr = config_list; len > 0; 3245 vidptr += dupletlen, len -= dupletlen) { 3246 /* 3247 * Note: The assumption here is that each vid entry is on 3248 * a unique line from its associated duplet. 3249 */ 3250 vidlen = dupletlen = (int)strlen(vidptr); 3251 if ((vidlen == 0) || 3252 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3253 dupletlen++; 3254 continue; 3255 } 3256 3257 /* 3258 * dnlist contains 1 or more blank separated 3259 * data-property-name entries 3260 */ 3261 dnlist_ptr = vidptr + vidlen + 1; 3262 dnlist_len = (int)strlen(dnlist_ptr); 3263 dupletlen += dnlist_len + 2; 3264 3265 /* 3266 * Set a pointer for the first data-property-name 3267 * entry in the list 3268 */ 3269 dataname_ptr = dnlist_ptr; 3270 dataname_len = 0; 3271 3272 /* 3273 * Loop through all data-property-name entries in the 3274 * data-property-name-list setting the properties for each. 3275 */ 3276 while (dataname_len < dnlist_len) { 3277 int version; 3278 3279 /* 3280 * Determine the length of the current 3281 * data-property-name entry by indexing until a 3282 * blank or NULL is encountered. When the space is 3283 * encountered reset it to a NULL for compliance 3284 * with ddi_getlongprop(). 3285 */ 3286 for (i = 0; ((dataname_ptr[i] != ' ') && 3287 (dataname_ptr[i] != '\0')); i++) { 3288 ; 3289 } 3290 3291 dataname_len += i; 3292 /* If not null terminated, Make it so */ 3293 if (dataname_ptr[i] == ' ') { 3294 dataname_ptr[i] = '\0'; 3295 } 3296 dataname_len++; 3297 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3298 "sd_process_sdconf_file: disk:%s, data:%s\n", 3299 vidptr, dataname_ptr); 3300 3301 /* Get the data list */ 3302 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3303 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3304 != DDI_PROP_SUCCESS) { 3305 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3306 "sd_process_sdconf_file: data property (%s)" 3307 " has no value\n", dataname_ptr); 3308 dataname_ptr = dnlist_ptr + dataname_len; 3309 continue; 3310 } 3311 3312 version = data_list[0]; 3313 3314 if (version == SD_CONF_VERSION_1) { 3315 sd_tunables values; 3316 3317 /* Set the properties */ 3318 if (sd_chk_vers1_data(un, data_list[1], 3319 &data_list[2], data_list_len, dataname_ptr) 3320 == SD_SUCCESS) { 3321 sd_get_tunables_from_conf(un, 3322 data_list[1], &data_list[2], 3323 &values); 3324 sd_set_vers1_properties(un, 3325 data_list[1], &values); 3326 rval = SD_SUCCESS; 3327 } else { 3328 rval = SD_FAILURE; 3329 } 3330 } else { 3331 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3332 "data property %s version 0x%x is invalid.", 3333 dataname_ptr, version); 3334 rval = SD_FAILURE; 3335 } 3336 kmem_free(data_list, data_list_len); 3337 dataname_ptr = dnlist_ptr + dataname_len; 3338 } 3339 } 3340 3341 /* free up the memory allocated by ddi_getlongprop */ 3342 if (config_list) { 3343 kmem_free(config_list, config_list_len); 3344 } 3345 3346 return (rval); 3347 } 3348 3349 /* 3350 * Function: sd_get_tunables_from_conf() 3351 * 3352 * 3353 * This function reads the data list from the sd.conf file and pulls 3354 * the values that can have numeric values as arguments and places 3355 * the values in the apropriate sd_tunables member. 3356 * Since the order of the data list members varies across platforms 3357 * This function reads them from the data list in a platform specific 3358 * order and places them into the correct sd_tunable member that is 3359 * a consistant across all platforms. 3360 */ 3361 static void 3362 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3363 sd_tunables *values) 3364 { 3365 int i; 3366 int mask; 3367 3368 bzero(values, sizeof (sd_tunables)); 3369 3370 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3371 3372 mask = 1 << i; 3373 if (mask > flags) { 3374 break; 3375 } 3376 3377 switch (mask & flags) { 3378 case 0: /* This mask bit not set in flags */ 3379 continue; 3380 case SD_CONF_BSET_THROTTLE: 3381 values->sdt_throttle = data_list[i]; 3382 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3383 "sd_get_tunables_from_conf: throttle = %d\n", 3384 values->sdt_throttle); 3385 break; 3386 case SD_CONF_BSET_CTYPE: 3387 values->sdt_ctype = data_list[i]; 3388 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3389 "sd_get_tunables_from_conf: ctype = %d\n", 3390 values->sdt_ctype); 3391 break; 3392 case SD_CONF_BSET_NRR_COUNT: 3393 values->sdt_not_rdy_retries = data_list[i]; 3394 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3395 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3396 values->sdt_not_rdy_retries); 3397 break; 3398 case SD_CONF_BSET_BSY_RETRY_COUNT: 3399 values->sdt_busy_retries = data_list[i]; 3400 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3401 "sd_get_tunables_from_conf: busy_retries = %d\n", 3402 values->sdt_busy_retries); 3403 break; 3404 case SD_CONF_BSET_RST_RETRIES: 3405 values->sdt_reset_retries = data_list[i]; 3406 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3407 "sd_get_tunables_from_conf: reset_retries = %d\n", 3408 values->sdt_reset_retries); 3409 break; 3410 case SD_CONF_BSET_RSV_REL_TIME: 3411 values->sdt_reserv_rel_time = data_list[i]; 3412 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3413 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3414 values->sdt_reserv_rel_time); 3415 break; 3416 case SD_CONF_BSET_MIN_THROTTLE: 3417 values->sdt_min_throttle = data_list[i]; 3418 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3419 "sd_get_tunables_from_conf: min_throttle = %d\n", 3420 values->sdt_min_throttle); 3421 break; 3422 case SD_CONF_BSET_DISKSORT_DISABLED: 3423 values->sdt_disk_sort_dis = data_list[i]; 3424 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3425 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3426 values->sdt_disk_sort_dis); 3427 break; 3428 case SD_CONF_BSET_LUN_RESET_ENABLED: 3429 values->sdt_lun_reset_enable = data_list[i]; 3430 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3431 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3432 "\n", values->sdt_lun_reset_enable); 3433 break; 3434 } 3435 } 3436 } 3437 3438 /* 3439 * Function: sd_process_sdconf_table 3440 * 3441 * Description: Search the static configuration table for a match on the 3442 * inquiry vid/pid and update the driver soft state structure 3443 * according to the table property values for the device. 3444 * 3445 * The form of a configuration table entry is: 3446 * <vid+pid>,<flags>,<property-data> 3447 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3448 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3449 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3450 * 3451 * Arguments: un - driver soft state (unit) structure 3452 */ 3453 3454 static void 3455 sd_process_sdconf_table(struct sd_lun *un) 3456 { 3457 char *id = NULL; 3458 int table_index; 3459 int idlen; 3460 3461 ASSERT(un != NULL); 3462 for (table_index = 0; table_index < sd_disk_table_size; 3463 table_index++) { 3464 id = sd_disk_table[table_index].device_id; 3465 idlen = strlen(id); 3466 if (idlen == 0) { 3467 continue; 3468 } 3469 3470 /* 3471 * The static configuration table currently does not 3472 * implement version 10 properties. Additionally, 3473 * multiple data-property-name entries are not 3474 * implemented in the static configuration table. 3475 */ 3476 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3477 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3478 "sd_process_sdconf_table: disk %s\n", id); 3479 sd_set_vers1_properties(un, 3480 sd_disk_table[table_index].flags, 3481 sd_disk_table[table_index].properties); 3482 break; 3483 } 3484 } 3485 } 3486 3487 3488 /* 3489 * Function: sd_sdconf_id_match 3490 * 3491 * Description: This local function implements a case sensitive vid/pid 3492 * comparison as well as the boundary cases of wild card and 3493 * multiple blanks. 3494 * 3495 * Note: An implicit assumption made here is that the scsi 3496 * inquiry structure will always keep the vid, pid and 3497 * revision strings in consecutive sequence, so they can be 3498 * read as a single string. If this assumption is not the 3499 * case, a separate string, to be used for the check, needs 3500 * to be built with these strings concatenated. 3501 * 3502 * Arguments: un - driver soft state (unit) structure 3503 * id - table or config file vid/pid 3504 * idlen - length of the vid/pid (bytes) 3505 * 3506 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3507 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3508 */ 3509 3510 static int 3511 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3512 { 3513 struct scsi_inquiry *sd_inq; 3514 int rval = SD_SUCCESS; 3515 3516 ASSERT(un != NULL); 3517 sd_inq = un->un_sd->sd_inq; 3518 ASSERT(id != NULL); 3519 3520 /* 3521 * We use the inq_vid as a pointer to a buffer containing the 3522 * vid and pid and use the entire vid/pid length of the table 3523 * entry for the comparison. This works because the inq_pid 3524 * data member follows inq_vid in the scsi_inquiry structure. 3525 */ 3526 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3527 /* 3528 * The user id string is compared to the inquiry vid/pid 3529 * using a case insensitive comparison and ignoring 3530 * multiple spaces. 3531 */ 3532 rval = sd_blank_cmp(un, id, idlen); 3533 if (rval != SD_SUCCESS) { 3534 /* 3535 * User id strings that start and end with a "*" 3536 * are a special case. These do not have a 3537 * specific vendor, and the product string can 3538 * appear anywhere in the 16 byte PID portion of 3539 * the inquiry data. This is a simple strstr() 3540 * type search for the user id in the inquiry data. 3541 */ 3542 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3543 char *pidptr = &id[1]; 3544 int i; 3545 int j; 3546 int pidstrlen = idlen - 2; 3547 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3548 pidstrlen; 3549 3550 if (j < 0) { 3551 return (SD_FAILURE); 3552 } 3553 for (i = 0; i < j; i++) { 3554 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3555 pidptr, pidstrlen) == 0) { 3556 rval = SD_SUCCESS; 3557 break; 3558 } 3559 } 3560 } 3561 } 3562 } 3563 return (rval); 3564 } 3565 3566 3567 /* 3568 * Function: sd_blank_cmp 3569 * 3570 * Description: If the id string starts and ends with a space, treat 3571 * multiple consecutive spaces as equivalent to a single 3572 * space. For example, this causes a sd_disk_table entry 3573 * of " NEC CDROM " to match a device's id string of 3574 * "NEC CDROM". 3575 * 3576 * Note: The success exit condition for this routine is if 3577 * the pointer to the table entry is '\0' and the cnt of 3578 * the inquiry length is zero. This will happen if the inquiry 3579 * string returned by the device is padded with spaces to be 3580 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3581 * SCSI spec states that the inquiry string is to be padded with 3582 * spaces. 3583 * 3584 * Arguments: un - driver soft state (unit) structure 3585 * id - table or config file vid/pid 3586 * idlen - length of the vid/pid (bytes) 3587 * 3588 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3589 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3590 */ 3591 3592 static int 3593 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3594 { 3595 char *p1; 3596 char *p2; 3597 int cnt; 3598 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3599 sizeof (SD_INQUIRY(un)->inq_pid); 3600 3601 ASSERT(un != NULL); 3602 p2 = un->un_sd->sd_inq->inq_vid; 3603 ASSERT(id != NULL); 3604 p1 = id; 3605 3606 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3607 /* 3608 * Note: string p1 is terminated by a NUL but string p2 3609 * isn't. The end of p2 is determined by cnt. 3610 */ 3611 for (;;) { 3612 /* skip over any extra blanks in both strings */ 3613 while ((*p1 != '\0') && (*p1 == ' ')) { 3614 p1++; 3615 } 3616 while ((cnt != 0) && (*p2 == ' ')) { 3617 p2++; 3618 cnt--; 3619 } 3620 3621 /* compare the two strings */ 3622 if ((cnt == 0) || 3623 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3624 break; 3625 } 3626 while ((cnt > 0) && 3627 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3628 p1++; 3629 p2++; 3630 cnt--; 3631 } 3632 } 3633 } 3634 3635 /* return SD_SUCCESS if both strings match */ 3636 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3637 } 3638 3639 3640 /* 3641 * Function: sd_chk_vers1_data 3642 * 3643 * Description: Verify the version 1 device properties provided by the 3644 * user via the configuration file 3645 * 3646 * Arguments: un - driver soft state (unit) structure 3647 * flags - integer mask indicating properties to be set 3648 * prop_list - integer list of property values 3649 * list_len - length of user provided data 3650 * 3651 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3652 * SD_FAILURE - Indicates the user provided data is invalid 3653 */ 3654 3655 static int 3656 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3657 int list_len, char *dataname_ptr) 3658 { 3659 int i; 3660 int mask = 1; 3661 int index = 0; 3662 3663 ASSERT(un != NULL); 3664 3665 /* Check for a NULL property name and list */ 3666 if (dataname_ptr == NULL) { 3667 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3668 "sd_chk_vers1_data: NULL data property name."); 3669 return (SD_FAILURE); 3670 } 3671 if (prop_list == NULL) { 3672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3673 "sd_chk_vers1_data: %s NULL data property list.", 3674 dataname_ptr); 3675 return (SD_FAILURE); 3676 } 3677 3678 /* Display a warning if undefined bits are set in the flags */ 3679 if (flags & ~SD_CONF_BIT_MASK) { 3680 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3681 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3682 "Properties not set.", 3683 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3684 return (SD_FAILURE); 3685 } 3686 3687 /* 3688 * Verify the length of the list by identifying the highest bit set 3689 * in the flags and validating that the property list has a length 3690 * up to the index of this bit. 3691 */ 3692 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3693 if (flags & mask) { 3694 index++; 3695 } 3696 mask = 1 << i; 3697 } 3698 if ((list_len / sizeof (int)) < (index + 2)) { 3699 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3700 "sd_chk_vers1_data: " 3701 "Data property list %s size is incorrect. " 3702 "Properties not set.", dataname_ptr); 3703 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3704 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3705 return (SD_FAILURE); 3706 } 3707 return (SD_SUCCESS); 3708 } 3709 3710 3711 /* 3712 * Function: sd_set_vers1_properties 3713 * 3714 * Description: Set version 1 device properties based on a property list 3715 * retrieved from the driver configuration file or static 3716 * configuration table. Version 1 properties have the format: 3717 * 3718 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3719 * 3720 * where the prop0 value will be used to set prop0 if bit0 3721 * is set in the flags 3722 * 3723 * Arguments: un - driver soft state (unit) structure 3724 * flags - integer mask indicating properties to be set 3725 * prop_list - integer list of property values 3726 */ 3727 3728 static void 3729 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3730 { 3731 ASSERT(un != NULL); 3732 3733 /* 3734 * Set the flag to indicate cache is to be disabled. An attempt 3735 * to disable the cache via sd_disable_caching() will be made 3736 * later during attach once the basic initialization is complete. 3737 */ 3738 if (flags & SD_CONF_BSET_NOCACHE) { 3739 un->un_f_opt_disable_cache = TRUE; 3740 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3741 "sd_set_vers1_properties: caching disabled flag set\n"); 3742 } 3743 3744 /* CD-specific configuration parameters */ 3745 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3746 un->un_f_cfg_playmsf_bcd = TRUE; 3747 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3748 "sd_set_vers1_properties: playmsf_bcd set\n"); 3749 } 3750 if (flags & SD_CONF_BSET_READSUB_BCD) { 3751 un->un_f_cfg_readsub_bcd = TRUE; 3752 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3753 "sd_set_vers1_properties: readsub_bcd set\n"); 3754 } 3755 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3756 un->un_f_cfg_read_toc_trk_bcd = TRUE; 3757 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3758 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 3759 } 3760 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 3761 un->un_f_cfg_read_toc_addr_bcd = TRUE; 3762 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3763 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 3764 } 3765 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 3766 un->un_f_cfg_no_read_header = TRUE; 3767 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3768 "sd_set_vers1_properties: no_read_header set\n"); 3769 } 3770 if (flags & SD_CONF_BSET_READ_CD_XD4) { 3771 un->un_f_cfg_read_cd_xd4 = TRUE; 3772 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3773 "sd_set_vers1_properties: read_cd_xd4 set\n"); 3774 } 3775 3776 /* Support for devices which do not have valid/unique serial numbers */ 3777 if (flags & SD_CONF_BSET_FAB_DEVID) { 3778 un->un_f_opt_fab_devid = TRUE; 3779 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3780 "sd_set_vers1_properties: fab_devid bit set\n"); 3781 } 3782 3783 /* Support for user throttle configuration */ 3784 if (flags & SD_CONF_BSET_THROTTLE) { 3785 ASSERT(prop_list != NULL); 3786 un->un_saved_throttle = un->un_throttle = 3787 prop_list->sdt_throttle; 3788 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3789 "sd_set_vers1_properties: throttle set to %d\n", 3790 prop_list->sdt_throttle); 3791 } 3792 3793 /* Set the per disk retry count according to the conf file or table. */ 3794 if (flags & SD_CONF_BSET_NRR_COUNT) { 3795 ASSERT(prop_list != NULL); 3796 if (prop_list->sdt_not_rdy_retries) { 3797 un->un_notready_retry_count = 3798 prop_list->sdt_not_rdy_retries; 3799 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3800 "sd_set_vers1_properties: not ready retry count" 3801 " set to %d\n", un->un_notready_retry_count); 3802 } 3803 } 3804 3805 /* The controller type is reported for generic disk driver ioctls */ 3806 if (flags & SD_CONF_BSET_CTYPE) { 3807 ASSERT(prop_list != NULL); 3808 switch (prop_list->sdt_ctype) { 3809 case CTYPE_CDROM: 3810 un->un_ctype = prop_list->sdt_ctype; 3811 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3812 "sd_set_vers1_properties: ctype set to " 3813 "CTYPE_CDROM\n"); 3814 break; 3815 case CTYPE_CCS: 3816 un->un_ctype = prop_list->sdt_ctype; 3817 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3818 "sd_set_vers1_properties: ctype set to " 3819 "CTYPE_CCS\n"); 3820 break; 3821 case CTYPE_ROD: /* RW optical */ 3822 un->un_ctype = prop_list->sdt_ctype; 3823 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3824 "sd_set_vers1_properties: ctype set to " 3825 "CTYPE_ROD\n"); 3826 break; 3827 default: 3828 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3829 "sd_set_vers1_properties: Could not set " 3830 "invalid ctype value (%d)", 3831 prop_list->sdt_ctype); 3832 } 3833 } 3834 3835 /* Purple failover timeout */ 3836 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 3837 ASSERT(prop_list != NULL); 3838 un->un_busy_retry_count = 3839 prop_list->sdt_busy_retries; 3840 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3841 "sd_set_vers1_properties: " 3842 "busy retry count set to %d\n", 3843 un->un_busy_retry_count); 3844 } 3845 3846 /* Purple reset retry count */ 3847 if (flags & SD_CONF_BSET_RST_RETRIES) { 3848 ASSERT(prop_list != NULL); 3849 un->un_reset_retry_count = 3850 prop_list->sdt_reset_retries; 3851 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3852 "sd_set_vers1_properties: " 3853 "reset retry count set to %d\n", 3854 un->un_reset_retry_count); 3855 } 3856 3857 /* Purple reservation release timeout */ 3858 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 3859 ASSERT(prop_list != NULL); 3860 un->un_reserve_release_time = 3861 prop_list->sdt_reserv_rel_time; 3862 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3863 "sd_set_vers1_properties: " 3864 "reservation release timeout set to %d\n", 3865 un->un_reserve_release_time); 3866 } 3867 3868 /* 3869 * Driver flag telling the driver to verify that no commands are pending 3870 * for a device before issuing a Test Unit Ready. This is a workaround 3871 * for a firmware bug in some Seagate eliteI drives. 3872 */ 3873 if (flags & SD_CONF_BSET_TUR_CHECK) { 3874 un->un_f_cfg_tur_check = TRUE; 3875 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3876 "sd_set_vers1_properties: tur queue check set\n"); 3877 } 3878 3879 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 3880 un->un_min_throttle = prop_list->sdt_min_throttle; 3881 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3882 "sd_set_vers1_properties: min throttle set to %d\n", 3883 un->un_min_throttle); 3884 } 3885 3886 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 3887 un->un_f_disksort_disabled = 3888 (prop_list->sdt_disk_sort_dis != 0) ? 3889 TRUE : FALSE; 3890 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3891 "sd_set_vers1_properties: disksort disabled " 3892 "flag set to %d\n", 3893 prop_list->sdt_disk_sort_dis); 3894 } 3895 3896 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 3897 un->un_f_lun_reset_enabled = 3898 (prop_list->sdt_lun_reset_enable != 0) ? 3899 TRUE : FALSE; 3900 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3901 "sd_set_vers1_properties: lun reset enabled " 3902 "flag set to %d\n", 3903 prop_list->sdt_lun_reset_enable); 3904 } 3905 3906 /* 3907 * Validate the throttle values. 3908 * If any of the numbers are invalid, set everything to defaults. 3909 */ 3910 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3911 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3912 (un->un_min_throttle > un->un_throttle)) { 3913 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3914 un->un_min_throttle = sd_min_throttle; 3915 } 3916 } 3917 3918 /* 3919 * The following routines support reading and interpretation of disk labels, 3920 * including Solaris BE (8-slice) vtoc's, Solaris LE (16-slice) vtoc's, and 3921 * fdisk tables. 3922 */ 3923 3924 /* 3925 * Function: sd_validate_geometry 3926 * 3927 * Description: Read the label from the disk (if present). Update the unit's 3928 * geometry and vtoc information from the data in the label. 3929 * Verify that the label is valid. 3930 * 3931 * Arguments: un - driver soft state (unit) structure 3932 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 3933 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 3934 * to use the USCSI "direct" chain and bypass the normal 3935 * command waitq. 3936 * 3937 * Return Code: 0 - Successful completion 3938 * EINVAL - Invalid value in un->un_tgt_blocksize or 3939 * un->un_blockcount; or label on disk is corrupted 3940 * or unreadable. 3941 * EACCES - Reservation conflict at the device. 3942 * ENOMEM - Resource allocation error 3943 * ENOTSUP - geometry not applicable 3944 * 3945 * Context: Kernel thread only (can sleep). 3946 */ 3947 3948 static int 3949 sd_validate_geometry(struct sd_lun *un, int path_flag) 3950 { 3951 static char labelstring[128]; 3952 static char buf[256]; 3953 char *label = NULL; 3954 int label_error = 0; 3955 int gvalid = un->un_f_geometry_is_valid; 3956 int lbasize; 3957 uint_t capacity; 3958 int count; 3959 3960 ASSERT(un != NULL); 3961 ASSERT(mutex_owned(SD_MUTEX(un))); 3962 3963 /* 3964 * If the required values are not valid, then try getting them 3965 * once via read capacity. If that fails, then fail this call. 3966 * This is necessary with the new mpxio failover behavior in 3967 * the T300 where we can get an attach for the inactive path 3968 * before the active path. The inactive path fails commands with 3969 * sense data of 02,04,88 which happens to the read capacity 3970 * before mpxio has had sufficient knowledge to know if it should 3971 * force a fail over or not. (Which it won't do at attach anyhow). 3972 * If the read capacity at attach time fails, un_tgt_blocksize and 3973 * un_blockcount won't be valid. 3974 */ 3975 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 3976 (un->un_f_blockcount_is_valid != TRUE)) { 3977 uint64_t cap; 3978 uint32_t lbasz; 3979 int rval; 3980 3981 mutex_exit(SD_MUTEX(un)); 3982 rval = sd_send_scsi_READ_CAPACITY(un, &cap, 3983 &lbasz, SD_PATH_DIRECT); 3984 mutex_enter(SD_MUTEX(un)); 3985 if (rval == 0) { 3986 /* 3987 * The following relies on 3988 * sd_send_scsi_READ_CAPACITY never 3989 * returning 0 for capacity and/or lbasize. 3990 */ 3991 sd_update_block_info(un, lbasz, cap); 3992 } 3993 3994 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 3995 (un->un_f_blockcount_is_valid != TRUE)) { 3996 return (EINVAL); 3997 } 3998 } 3999 4000 /* 4001 * Copy the lbasize and capacity so that if they're reset while we're 4002 * not holding the SD_MUTEX, we will continue to use valid values 4003 * after the SD_MUTEX is reacquired. (4119659) 4004 */ 4005 lbasize = un->un_tgt_blocksize; 4006 capacity = un->un_blockcount; 4007 4008 #if defined(_SUNOS_VTOC_16) 4009 /* 4010 * Set up the "whole disk" fdisk partition; this should always 4011 * exist, regardless of whether the disk contains an fdisk table 4012 * or vtoc. 4013 */ 4014 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 4015 un->un_map[P0_RAW_DISK].dkl_nblk = capacity; 4016 #endif 4017 4018 /* 4019 * Refresh the logical and physical geometry caches. 4020 * (data from MODE SENSE format/rigid disk geometry pages, 4021 * and scsi_ifgetcap("geometry"). 4022 */ 4023 sd_resync_geom_caches(un, capacity, lbasize, path_flag); 4024 4025 label_error = sd_use_efi(un, path_flag); 4026 if (label_error == 0) { 4027 /* found a valid EFI label */ 4028 SD_TRACE(SD_LOG_IO_PARTITION, un, 4029 "sd_validate_geometry: found EFI label\n"); 4030 un->un_solaris_offset = 0; 4031 un->un_solaris_size = capacity; 4032 return (ENOTSUP); 4033 } 4034 if (un->un_blockcount > DK_MAX_BLOCKS) { 4035 if (label_error == ESRCH) { 4036 /* 4037 * they've configured a LUN over 1TB, but used 4038 * format.dat to restrict format's view of the 4039 * capacity to be under 1TB 4040 */ 4041 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4042 "is >1TB and has a VTOC label: use format(1M) to either decrease the"); 4043 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 4044 "size to be < 1TB or relabel the disk with an EFI label"); 4045 } else { 4046 /* unlabeled disk over 1TB */ 4047 return (ENOTSUP); 4048 } 4049 } 4050 label_error = 0; 4051 4052 /* 4053 * at this point it is either labeled with a VTOC or it is 4054 * under 1TB 4055 */ 4056 4057 /* 4058 * Only DIRECT ACCESS devices will have Sun labels. 4059 * CD's supposedly have a Sun label, too 4060 */ 4061 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 4062 struct dk_label *dkl; 4063 offset_t dkl1; 4064 offset_t label_addr, real_addr; 4065 int rval; 4066 size_t buffer_size; 4067 4068 /* 4069 * Note: This will set up un->un_solaris_size and 4070 * un->un_solaris_offset. 4071 */ 4072 switch (sd_read_fdisk(un, capacity, lbasize, path_flag)) { 4073 case SD_CMD_RESERVATION_CONFLICT: 4074 ASSERT(mutex_owned(SD_MUTEX(un))); 4075 return (EACCES); 4076 case SD_CMD_FAILURE: 4077 ASSERT(mutex_owned(SD_MUTEX(un))); 4078 return (ENOMEM); 4079 } 4080 4081 if (un->un_solaris_size <= DK_LABEL_LOC) { 4082 /* 4083 * Found fdisk table but no Solaris partition entry, 4084 * so don't call sd_uselabel() and don't create 4085 * a default label. 4086 */ 4087 label_error = 0; 4088 un->un_f_geometry_is_valid = TRUE; 4089 goto no_solaris_partition; 4090 } 4091 label_addr = (daddr_t)(un->un_solaris_offset + DK_LABEL_LOC); 4092 4093 /* 4094 * sys_blocksize != tgt_blocksize, need to re-adjust 4095 * blkno and save the index to beginning of dk_label 4096 */ 4097 real_addr = SD_SYS2TGTBLOCK(un, label_addr); 4098 buffer_size = SD_REQBYTES2TGTBYTES(un, 4099 sizeof (struct dk_label)); 4100 4101 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_validate_geometry: " 4102 "label_addr: 0x%x allocation size: 0x%x\n", 4103 label_addr, buffer_size); 4104 dkl = kmem_zalloc(buffer_size, KM_NOSLEEP); 4105 if (dkl == NULL) { 4106 return (ENOMEM); 4107 } 4108 4109 mutex_exit(SD_MUTEX(un)); 4110 rval = sd_send_scsi_READ(un, dkl, buffer_size, real_addr, 4111 path_flag); 4112 mutex_enter(SD_MUTEX(un)); 4113 4114 switch (rval) { 4115 case 0: 4116 /* 4117 * sd_uselabel will establish that the geometry 4118 * is valid. 4119 * For sys_blocksize != tgt_blocksize, need 4120 * to index into the beginning of dk_label 4121 */ 4122 dkl1 = (daddr_t)dkl 4123 + SD_TGTBYTEOFFSET(un, label_addr, real_addr); 4124 if (sd_uselabel(un, (struct dk_label *)(uintptr_t)dkl1, 4125 path_flag) != SD_LABEL_IS_VALID) { 4126 label_error = EINVAL; 4127 } 4128 break; 4129 case EACCES: 4130 label_error = EACCES; 4131 break; 4132 default: 4133 label_error = EINVAL; 4134 break; 4135 } 4136 4137 kmem_free(dkl, buffer_size); 4138 4139 #if defined(_SUNOS_VTOC_8) 4140 label = (char *)un->un_asciilabel; 4141 #elif defined(_SUNOS_VTOC_16) 4142 label = (char *)un->un_vtoc.v_asciilabel; 4143 #else 4144 #error "No VTOC format defined." 4145 #endif 4146 } 4147 4148 /* 4149 * If a valid label was not found, AND if no reservation conflict 4150 * was detected, then go ahead and create a default label (4069506). 4151 * 4152 * Note: currently, for VTOC_8 devices, the default label is created 4153 * for removables only. For VTOC_16 devices, the default label will 4154 * be created for both removables and non-removables alike. 4155 * (see sd_build_default_label) 4156 */ 4157 #if defined(_SUNOS_VTOC_8) 4158 if (ISREMOVABLE(un) && (label_error != EACCES)) { 4159 #elif defined(_SUNOS_VTOC_16) 4160 if (label_error != EACCES) { 4161 #endif 4162 if (un->un_f_geometry_is_valid == FALSE) { 4163 sd_build_default_label(un); 4164 } 4165 label_error = 0; 4166 } 4167 4168 no_solaris_partition: 4169 if ((!ISREMOVABLE(un) || 4170 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 4171 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 4172 /* 4173 * Print out a message indicating who and what we are. 4174 * We do this only when we happen to really validate the 4175 * geometry. We may call sd_validate_geometry() at other 4176 * times, e.g., ioctl()'s like Get VTOC in which case we 4177 * don't want to print the label. 4178 * If the geometry is valid, print the label string, 4179 * else print vendor and product info, if available 4180 */ 4181 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, "?<%s>\n", label); 4183 } else { 4184 mutex_enter(&sd_label_mutex); 4185 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 4186 labelstring); 4187 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 4188 &labelstring[64]); 4189 (void) sprintf(buf, "?Vendor '%s', product '%s'", 4190 labelstring, &labelstring[64]); 4191 if (un->un_f_blockcount_is_valid == TRUE) { 4192 (void) sprintf(&buf[strlen(buf)], 4193 ", %llu %u byte blocks\n", 4194 (longlong_t)un->un_blockcount, 4195 un->un_tgt_blocksize); 4196 } else { 4197 (void) sprintf(&buf[strlen(buf)], 4198 ", (unknown capacity)\n"); 4199 } 4200 SD_INFO(SD_LOG_ATTACH_DETACH, un, buf); 4201 mutex_exit(&sd_label_mutex); 4202 } 4203 } 4204 4205 #if defined(_SUNOS_VTOC_16) 4206 /* 4207 * If we have valid geometry, set up the remaining fdisk partitions. 4208 * Note that dkl_cylno is not used for the fdisk map entries, so 4209 * we set it to an entirely bogus value. 4210 */ 4211 for (count = 0; count < FD_NUMPART; count++) { 4212 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 4213 un->un_map[FDISK_P1 + count].dkl_nblk = 4214 un->un_fmap[count].fmap_nblk; 4215 4216 un->un_offset[FDISK_P1 + count] = 4217 un->un_fmap[count].fmap_start; 4218 } 4219 #endif 4220 4221 for (count = 0; count < NDKMAP; count++) { 4222 #if defined(_SUNOS_VTOC_8) 4223 struct dk_map *lp = &un->un_map[count]; 4224 un->un_offset[count] = 4225 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 4226 #elif defined(_SUNOS_VTOC_16) 4227 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 4228 4229 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 4230 #else 4231 #error "No VTOC format defined." 4232 #endif 4233 } 4234 4235 return (label_error); 4236 } 4237 4238 4239 #if defined(_SUNOS_VTOC_16) 4240 /* 4241 * Macro: MAX_BLKS 4242 * 4243 * This macro is used for table entries where we need to have the largest 4244 * possible sector value for that head & SPT (sectors per track) 4245 * combination. Other entries for some smaller disk sizes are set by 4246 * convention to match those used by X86 BIOS usage. 4247 */ 4248 #define MAX_BLKS(heads, spt) UINT16_MAX * heads * spt, heads, spt 4249 4250 /* 4251 * Function: sd_convert_geometry 4252 * 4253 * Description: Convert physical geometry into a dk_geom structure. In 4254 * other words, make sure we don't wrap 16-bit values. 4255 * e.g. converting from geom_cache to dk_geom 4256 * 4257 * Context: Kernel thread only 4258 */ 4259 static void 4260 sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g) 4261 { 4262 int i; 4263 static const struct chs_values { 4264 uint_t max_cap; /* Max Capacity for this HS. */ 4265 uint_t nhead; /* Heads to use. */ 4266 uint_t nsect; /* SPT to use. */ 4267 } CHS_values[] = { 4268 {0x00200000, 64, 32}, /* 1GB or smaller disk. */ 4269 {0x01000000, 128, 32}, /* 8GB or smaller disk. */ 4270 {MAX_BLKS(255, 63)}, /* 502.02GB or smaller disk. */ 4271 {MAX_BLKS(255, 126)}, /* .98TB or smaller disk. */ 4272 {DK_MAX_BLOCKS, 255, 189} /* Max size is just under 1TB */ 4273 }; 4274 4275 /* Unlabeled SCSI floppy device */ 4276 if (capacity <= 0x1000) { 4277 un_g->dkg_nhead = 2; 4278 un_g->dkg_ncyl = 80; 4279 un_g->dkg_nsect = capacity / (un_g->dkg_nhead * un_g->dkg_ncyl); 4280 return; 4281 } 4282 4283 /* 4284 * For all devices we calculate cylinders using the 4285 * heads and sectors we assign based on capacity of the 4286 * device. The table is designed to be compatible with the 4287 * way other operating systems lay out fdisk tables for X86 4288 * and to insure that the cylinders never exceed 65535 to 4289 * prevent problems with X86 ioctls that report geometry. 4290 * We use SPT that are multiples of 63, since other OSes that 4291 * are not limited to 16-bits for cylinders stop at 63 SPT 4292 * we make do by using multiples of 63 SPT. 4293 * 4294 * Note than capacities greater than or equal to 1TB will simply 4295 * get the largest geometry from the table. This should be okay 4296 * since disks this large shouldn't be using CHS values anyway. 4297 */ 4298 for (i = 0; CHS_values[i].max_cap < capacity && 4299 CHS_values[i].max_cap != DK_MAX_BLOCKS; i++) 4300 ; 4301 4302 un_g->dkg_nhead = CHS_values[i].nhead; 4303 un_g->dkg_nsect = CHS_values[i].nsect; 4304 } 4305 #endif 4306 4307 4308 /* 4309 * Function: sd_resync_geom_caches 4310 * 4311 * Description: (Re)initialize both geometry caches: the virtual geometry 4312 * information is extracted from the HBA (the "geometry" 4313 * capability), and the physical geometry cache data is 4314 * generated by issuing MODE SENSE commands. 4315 * 4316 * Arguments: un - driver soft state (unit) structure 4317 * capacity - disk capacity in #blocks 4318 * lbasize - disk block size in bytes 4319 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4320 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4321 * to use the USCSI "direct" chain and bypass the normal 4322 * command waitq. 4323 * 4324 * Context: Kernel thread only (can sleep). 4325 */ 4326 4327 static void 4328 sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 4329 int path_flag) 4330 { 4331 struct geom_cache pgeom; 4332 struct geom_cache *pgeom_p = &pgeom; 4333 int spc; 4334 unsigned short nhead; 4335 unsigned short nsect; 4336 4337 ASSERT(un != NULL); 4338 ASSERT(mutex_owned(SD_MUTEX(un))); 4339 4340 /* 4341 * Ask the controller for its logical geometry. 4342 * Note: if the HBA does not support scsi_ifgetcap("geometry"), 4343 * then the lgeom cache will be invalid. 4344 */ 4345 sd_get_virtual_geometry(un, capacity, lbasize); 4346 4347 /* 4348 * Initialize the pgeom cache from lgeom, so that if MODE SENSE 4349 * doesn't work, DKIOCG_PHYSGEOM can return reasonable values. 4350 */ 4351 if (un->un_lgeom.g_nsect == 0 || un->un_lgeom.g_nhead == 0) { 4352 /* 4353 * Note: Perhaps this needs to be more adaptive? The rationale 4354 * is that, if there's no HBA geometry from the HBA driver, any 4355 * guess is good, since this is the physical geometry. If MODE 4356 * SENSE fails this gives a max cylinder size for non-LBA access 4357 */ 4358 nhead = 255; 4359 nsect = 63; 4360 } else { 4361 nhead = un->un_lgeom.g_nhead; 4362 nsect = un->un_lgeom.g_nsect; 4363 } 4364 4365 if (ISCD(un)) { 4366 pgeom_p->g_nhead = 1; 4367 pgeom_p->g_nsect = nsect * nhead; 4368 } else { 4369 pgeom_p->g_nhead = nhead; 4370 pgeom_p->g_nsect = nsect; 4371 } 4372 4373 spc = pgeom_p->g_nhead * pgeom_p->g_nsect; 4374 pgeom_p->g_capacity = capacity; 4375 pgeom_p->g_ncyl = pgeom_p->g_capacity / spc; 4376 pgeom_p->g_acyl = 0; 4377 4378 /* 4379 * Retrieve fresh geometry data from the hardware, stash it 4380 * here temporarily before we rebuild the incore label. 4381 * 4382 * We want to use the MODE SENSE commands to derive the 4383 * physical geometry of the device, but if either command 4384 * fails, the logical geometry is used as the fallback for 4385 * disk label geometry. 4386 */ 4387 mutex_exit(SD_MUTEX(un)); 4388 sd_get_physical_geometry(un, pgeom_p, capacity, lbasize, path_flag); 4389 mutex_enter(SD_MUTEX(un)); 4390 4391 /* 4392 * Now update the real copy while holding the mutex. This 4393 * way the global copy is never in an inconsistent state. 4394 */ 4395 bcopy(pgeom_p, &un->un_pgeom, sizeof (un->un_pgeom)); 4396 4397 SD_INFO(SD_LOG_COMMON, un, "sd_resync_geom_caches: " 4398 "(cached from lgeom)\n"); 4399 SD_INFO(SD_LOG_COMMON, un, 4400 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4401 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4402 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4403 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4404 "intrlv: %d; rpm: %d\n", un->un_pgeom.g_secsize, 4405 un->un_pgeom.g_capacity, un->un_pgeom.g_intrlv, 4406 un->un_pgeom.g_rpm); 4407 } 4408 4409 4410 /* 4411 * Function: sd_read_fdisk 4412 * 4413 * Description: utility routine to read the fdisk table. 4414 * 4415 * Arguments: un - driver soft state (unit) structure 4416 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4417 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4418 * to use the USCSI "direct" chain and bypass the normal 4419 * command waitq. 4420 * 4421 * Return Code: SD_CMD_SUCCESS 4422 * SD_CMD_FAILURE 4423 * 4424 * Context: Kernel thread only (can sleep). 4425 */ 4426 /* ARGSUSED */ 4427 static int 4428 sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, int path_flag) 4429 { 4430 #if defined(_NO_FDISK_PRESENT) 4431 4432 un->un_solaris_offset = 0; 4433 un->un_solaris_size = capacity; 4434 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4435 return (SD_CMD_SUCCESS); 4436 4437 #elif defined(_FIRMWARE_NEEDS_FDISK) 4438 4439 struct ipart *fdp; 4440 struct mboot *mbp; 4441 struct ipart fdisk[FD_NUMPART]; 4442 int i; 4443 char sigbuf[2]; 4444 caddr_t bufp; 4445 int uidx; 4446 int rval; 4447 int lba = 0; 4448 uint_t solaris_offset; /* offset to solaris part. */ 4449 daddr_t solaris_size; /* size of solaris partition */ 4450 uint32_t blocksize; 4451 4452 ASSERT(un != NULL); 4453 ASSERT(mutex_owned(SD_MUTEX(un))); 4454 ASSERT(un->un_f_tgt_blocksize_is_valid == TRUE); 4455 4456 blocksize = un->un_tgt_blocksize; 4457 4458 /* 4459 * Start off assuming no fdisk table 4460 */ 4461 solaris_offset = 0; 4462 solaris_size = capacity; 4463 4464 mutex_exit(SD_MUTEX(un)); 4465 bufp = kmem_zalloc(blocksize, KM_SLEEP); 4466 rval = sd_send_scsi_READ(un, bufp, blocksize, 0, path_flag); 4467 mutex_enter(SD_MUTEX(un)); 4468 4469 if (rval != 0) { 4470 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4471 "sd_read_fdisk: fdisk read err\n"); 4472 kmem_free(bufp, blocksize); 4473 return (SD_CMD_FAILURE); 4474 } 4475 4476 mbp = (struct mboot *)bufp; 4477 4478 /* 4479 * The fdisk table does not begin on a 4-byte boundary within the 4480 * master boot record, so we copy it to an aligned structure to avoid 4481 * alignment exceptions on some processors. 4482 */ 4483 bcopy(&mbp->parts[0], fdisk, sizeof (fdisk)); 4484 4485 /* 4486 * Check for lba support before verifying sig; sig might not be 4487 * there, say on a blank disk, but the max_chs mark may still 4488 * be present. 4489 * 4490 * Note: LBA support and BEFs are an x86-only concept but this 4491 * code should work OK on SPARC as well. 4492 */ 4493 4494 /* 4495 * First, check for lba-access-ok on root node (or prom root node) 4496 * if present there, don't need to search fdisk table. 4497 */ 4498 if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0, 4499 "lba-access-ok", 0) != 0) { 4500 /* All drives do LBA; don't search fdisk table */ 4501 lba = 1; 4502 } else { 4503 /* Okay, look for mark in fdisk table */ 4504 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4505 /* accumulate "lba" value from all partitions */ 4506 lba = (lba || sd_has_max_chs_vals(fdp)); 4507 } 4508 } 4509 4510 /* 4511 * Next, look for 'no-bef-lba-access' prop on parent. 4512 * Its presence means the realmode driver doesn't support 4513 * LBA, so the target driver shouldn't advertise it as ok. 4514 * This should be a temporary condition; one day all 4515 * BEFs should support the LBA access functions. 4516 */ 4517 if ((lba != 0) && (ddi_getprop(DDI_DEV_T_ANY, 4518 ddi_get_parent(SD_DEVINFO(un)), DDI_PROP_DONTPASS, 4519 "no-bef-lba-access", 0) != 0)) { 4520 /* BEF doesn't support LBA; don't advertise it as ok */ 4521 lba = 0; 4522 } 4523 4524 if (lba != 0) { 4525 dev_t dev = sd_make_device(SD_DEVINFO(un)); 4526 4527 if (ddi_getprop(dev, SD_DEVINFO(un), DDI_PROP_DONTPASS, 4528 "lba-access-ok", 0) == 0) { 4529 /* not found; create it */ 4530 if (ddi_prop_create(dev, SD_DEVINFO(un), 0, 4531 "lba-access-ok", (caddr_t)NULL, 0) != 4532 DDI_PROP_SUCCESS) { 4533 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4534 "sd_read_fdisk: Can't create lba property " 4535 "for instance %d\n", 4536 ddi_get_instance(SD_DEVINFO(un))); 4537 } 4538 } 4539 } 4540 4541 bcopy(&mbp->signature, sigbuf, sizeof (sigbuf)); 4542 4543 /* 4544 * Endian-independent signature check 4545 */ 4546 if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) || 4547 (sigbuf[0] != (MBB_MAGIC & 0xFF))) { 4548 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4549 "sd_read_fdisk: no fdisk\n"); 4550 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4551 rval = SD_CMD_SUCCESS; 4552 goto done; 4553 } 4554 4555 #ifdef SDDEBUG 4556 if (sd_level_mask & SD_LOGMASK_INFO) { 4557 fdp = fdisk; 4558 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_read_fdisk:\n"); 4559 SD_INFO(SD_LOG_ATTACH_DETACH, un, " relsect " 4560 "numsect sysid bootid\n"); 4561 for (i = 0; i < FD_NUMPART; i++, fdp++) { 4562 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4563 " %d: %8d %8d 0x%08x 0x%08x\n", 4564 i, fdp->relsect, fdp->numsect, 4565 fdp->systid, fdp->bootid); 4566 } 4567 } 4568 #endif 4569 4570 /* 4571 * Try to find the unix partition 4572 */ 4573 uidx = -1; 4574 solaris_offset = 0; 4575 solaris_size = 0; 4576 4577 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4578 int relsect; 4579 int numsect; 4580 4581 if (fdp->numsect == 0) { 4582 un->un_fmap[i].fmap_start = 0; 4583 un->un_fmap[i].fmap_nblk = 0; 4584 continue; 4585 } 4586 4587 /* 4588 * Data in the fdisk table is little-endian. 4589 */ 4590 relsect = LE_32(fdp->relsect); 4591 numsect = LE_32(fdp->numsect); 4592 4593 un->un_fmap[i].fmap_start = relsect; 4594 un->un_fmap[i].fmap_nblk = numsect; 4595 4596 if (fdp->systid != SUNIXOS && 4597 fdp->systid != SUNIXOS2 && 4598 fdp->systid != EFI_PMBR) { 4599 continue; 4600 } 4601 4602 /* 4603 * use the last active solaris partition id found 4604 * (there should only be 1 active partition id) 4605 * 4606 * if there are no active solaris partition id 4607 * then use the first inactive solaris partition id 4608 */ 4609 if ((uidx == -1) || (fdp->bootid == ACTIVE)) { 4610 uidx = i; 4611 solaris_offset = relsect; 4612 solaris_size = numsect; 4613 } 4614 } 4615 4616 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk 0x%x 0x%lx", 4617 un->un_solaris_offset, un->un_solaris_size); 4618 4619 rval = SD_CMD_SUCCESS; 4620 4621 done: 4622 4623 /* 4624 * Clear the VTOC info, only if the Solaris partition entry 4625 * has moved, changed size, been deleted, or if the size of 4626 * the partition is too small to even fit the label sector. 4627 */ 4628 if ((un->un_solaris_offset != solaris_offset) || 4629 (un->un_solaris_size != solaris_size) || 4630 solaris_size <= DK_LABEL_LOC) { 4631 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk moved 0x%x 0x%lx", 4632 solaris_offset, solaris_size); 4633 bzero(&un->un_g, sizeof (struct dk_geom)); 4634 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 4635 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 4636 un->un_f_geometry_is_valid = FALSE; 4637 } 4638 un->un_solaris_offset = solaris_offset; 4639 un->un_solaris_size = solaris_size; 4640 kmem_free(bufp, blocksize); 4641 return (rval); 4642 4643 #else /* #elif defined(_FIRMWARE_NEEDS_FDISK) */ 4644 #error "fdisk table presence undetermined for this platform." 4645 #endif /* #if defined(_NO_FDISK_PRESENT) */ 4646 } 4647 4648 4649 /* 4650 * Function: sd_get_physical_geometry 4651 * 4652 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4653 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4654 * target, and use this information to initialize the physical 4655 * geometry cache specified by pgeom_p. 4656 * 4657 * MODE SENSE is an optional command, so failure in this case 4658 * does not necessarily denote an error. We want to use the 4659 * MODE SENSE commands to derive the physical geometry of the 4660 * device, but if either command fails, the logical geometry is 4661 * used as the fallback for disk label geometry. 4662 * 4663 * This requires that un->un_blockcount and un->un_tgt_blocksize 4664 * have already been initialized for the current target and 4665 * that the current values be passed as args so that we don't 4666 * end up ever trying to use -1 as a valid value. This could 4667 * happen if either value is reset while we're not holding 4668 * the mutex. 4669 * 4670 * Arguments: un - driver soft state (unit) structure 4671 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4672 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4673 * to use the USCSI "direct" chain and bypass the normal 4674 * command waitq. 4675 * 4676 * Context: Kernel thread only (can sleep). 4677 */ 4678 4679 static void 4680 sd_get_physical_geometry(struct sd_lun *un, struct geom_cache *pgeom_p, 4681 int capacity, int lbasize, int path_flag) 4682 { 4683 struct mode_format *page3p; 4684 struct mode_geometry *page4p; 4685 struct mode_header *headerp; 4686 int sector_size; 4687 int nsect; 4688 int nhead; 4689 int ncyl; 4690 int intrlv; 4691 int spc; 4692 int modesense_capacity; 4693 int rpm; 4694 int bd_len; 4695 int mode_header_length; 4696 uchar_t *p3bufp; 4697 uchar_t *p4bufp; 4698 int cdbsize; 4699 4700 ASSERT(un != NULL); 4701 ASSERT(!(mutex_owned(SD_MUTEX(un)))); 4702 4703 if (un->un_f_blockcount_is_valid != TRUE) { 4704 return; 4705 } 4706 4707 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 4708 return; 4709 } 4710 4711 if (lbasize == 0) { 4712 if (ISCD(un)) { 4713 lbasize = 2048; 4714 } else { 4715 lbasize = un->un_sys_blocksize; 4716 } 4717 } 4718 pgeom_p->g_secsize = (unsigned short)lbasize; 4719 4720 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4721 4722 /* 4723 * Retrieve MODE SENSE page 3 - Format Device Page 4724 */ 4725 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4726 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4727 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4728 != 0) { 4729 SD_ERROR(SD_LOG_COMMON, un, 4730 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4731 goto page3_exit; 4732 } 4733 4734 /* 4735 * Determine size of Block Descriptors in order to locate the mode 4736 * page data. ATAPI devices return 0, SCSI devices should return 4737 * MODE_BLK_DESC_LENGTH. 4738 */ 4739 headerp = (struct mode_header *)p3bufp; 4740 if (un->un_f_cfg_is_atapi == TRUE) { 4741 struct mode_header_grp2 *mhp = 4742 (struct mode_header_grp2 *)headerp; 4743 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4744 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4745 } else { 4746 mode_header_length = MODE_HEADER_LENGTH; 4747 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4748 } 4749 4750 if (bd_len > MODE_BLK_DESC_LENGTH) { 4751 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4752 "received unexpected bd_len of %d, page3\n", bd_len); 4753 goto page3_exit; 4754 } 4755 4756 page3p = (struct mode_format *) 4757 ((caddr_t)headerp + mode_header_length + bd_len); 4758 4759 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4760 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4761 "mode sense pg3 code mismatch %d\n", 4762 page3p->mode_page.code); 4763 goto page3_exit; 4764 } 4765 4766 /* 4767 * Use this physical geometry data only if BOTH MODE SENSE commands 4768 * complete successfully; otherwise, revert to the logical geometry. 4769 * So, we need to save everything in temporary variables. 4770 */ 4771 sector_size = BE_16(page3p->data_bytes_sect); 4772 4773 /* 4774 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4775 */ 4776 if (sector_size == 0) { 4777 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4778 } else { 4779 sector_size &= ~(un->un_sys_blocksize - 1); 4780 } 4781 4782 nsect = BE_16(page3p->sect_track); 4783 intrlv = BE_16(page3p->interleave); 4784 4785 SD_INFO(SD_LOG_COMMON, un, 4786 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4787 SD_INFO(SD_LOG_COMMON, un, 4788 " mode page: %d; nsect: %d; sector size: %d;\n", 4789 page3p->mode_page.code, nsect, sector_size); 4790 SD_INFO(SD_LOG_COMMON, un, 4791 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4792 BE_16(page3p->track_skew), 4793 BE_16(page3p->cylinder_skew)); 4794 4795 4796 /* 4797 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4798 */ 4799 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4800 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4801 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4802 != 0) { 4803 SD_ERROR(SD_LOG_COMMON, un, 4804 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4805 goto page4_exit; 4806 } 4807 4808 /* 4809 * Determine size of Block Descriptors in order to locate the mode 4810 * page data. ATAPI devices return 0, SCSI devices should return 4811 * MODE_BLK_DESC_LENGTH. 4812 */ 4813 headerp = (struct mode_header *)p4bufp; 4814 if (un->un_f_cfg_is_atapi == TRUE) { 4815 struct mode_header_grp2 *mhp = 4816 (struct mode_header_grp2 *)headerp; 4817 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4818 } else { 4819 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4820 } 4821 4822 if (bd_len > MODE_BLK_DESC_LENGTH) { 4823 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4824 "received unexpected bd_len of %d, page4\n", bd_len); 4825 goto page4_exit; 4826 } 4827 4828 page4p = (struct mode_geometry *) 4829 ((caddr_t)headerp + mode_header_length + bd_len); 4830 4831 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4832 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4833 "mode sense pg4 code mismatch %d\n", 4834 page4p->mode_page.code); 4835 goto page4_exit; 4836 } 4837 4838 /* 4839 * Stash the data now, after we know that both commands completed. 4840 */ 4841 4842 mutex_enter(SD_MUTEX(un)); 4843 4844 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4845 spc = nhead * nsect; 4846 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4847 rpm = BE_16(page4p->rpm); 4848 4849 modesense_capacity = spc * ncyl; 4850 4851 SD_INFO(SD_LOG_COMMON, un, 4852 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4853 SD_INFO(SD_LOG_COMMON, un, 4854 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4855 SD_INFO(SD_LOG_COMMON, un, 4856 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4857 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4858 (void *)pgeom_p, capacity); 4859 4860 /* 4861 * Compensate if the drive's geometry is not rectangular, i.e., 4862 * the product of C * H * S returned by MODE SENSE >= that returned 4863 * by read capacity. This is an idiosyncrasy of the original x86 4864 * disk subsystem. 4865 */ 4866 if (modesense_capacity >= capacity) { 4867 SD_INFO(SD_LOG_COMMON, un, 4868 "sd_get_physical_geometry: adjusting acyl; " 4869 "old: %d; new: %d\n", pgeom_p->g_acyl, 4870 (modesense_capacity - capacity + spc - 1) / spc); 4871 if (sector_size != 0) { 4872 /* 1243403: NEC D38x7 drives don't support sec size */ 4873 pgeom_p->g_secsize = (unsigned short)sector_size; 4874 } 4875 pgeom_p->g_nsect = (unsigned short)nsect; 4876 pgeom_p->g_nhead = (unsigned short)nhead; 4877 pgeom_p->g_capacity = capacity; 4878 pgeom_p->g_acyl = 4879 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4880 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4881 } 4882 4883 pgeom_p->g_rpm = (unsigned short)rpm; 4884 pgeom_p->g_intrlv = (unsigned short)intrlv; 4885 4886 SD_INFO(SD_LOG_COMMON, un, 4887 "sd_get_physical_geometry: mode sense geometry:\n"); 4888 SD_INFO(SD_LOG_COMMON, un, 4889 " nsect: %d; sector size: %d; interlv: %d\n", 4890 nsect, sector_size, intrlv); 4891 SD_INFO(SD_LOG_COMMON, un, 4892 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4893 nhead, ncyl, rpm, modesense_capacity); 4894 SD_INFO(SD_LOG_COMMON, un, 4895 "sd_get_physical_geometry: (cached)\n"); 4896 SD_INFO(SD_LOG_COMMON, un, 4897 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4898 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4899 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4900 SD_INFO(SD_LOG_COMMON, un, 4901 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4902 un->un_pgeom.g_secsize, un->un_pgeom.g_capacity, 4903 un->un_pgeom.g_intrlv, un->un_pgeom.g_rpm); 4904 4905 mutex_exit(SD_MUTEX(un)); 4906 4907 page4_exit: 4908 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4909 page3_exit: 4910 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4911 } 4912 4913 4914 /* 4915 * Function: sd_get_virtual_geometry 4916 * 4917 * Description: Ask the controller to tell us about the target device. 4918 * 4919 * Arguments: un - pointer to softstate 4920 * capacity - disk capacity in #blocks 4921 * lbasize - disk block size in bytes 4922 * 4923 * Context: Kernel thread only 4924 */ 4925 4926 static void 4927 sd_get_virtual_geometry(struct sd_lun *un, int capacity, int lbasize) 4928 { 4929 struct geom_cache *lgeom_p = &un->un_lgeom; 4930 uint_t geombuf; 4931 int spc; 4932 4933 ASSERT(un != NULL); 4934 ASSERT(mutex_owned(SD_MUTEX(un))); 4935 4936 mutex_exit(SD_MUTEX(un)); 4937 4938 /* Set sector size, and total number of sectors */ 4939 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4940 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4941 4942 /* Let the HBA tell us its geometry */ 4943 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4944 4945 mutex_enter(SD_MUTEX(un)); 4946 4947 /* A value of -1 indicates an undefined "geometry" property */ 4948 if (geombuf == (-1)) { 4949 return; 4950 } 4951 4952 /* Initialize the logical geometry cache. */ 4953 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 4954 lgeom_p->g_nsect = geombuf & 0xffff; 4955 lgeom_p->g_secsize = un->un_sys_blocksize; 4956 4957 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 4958 4959 /* 4960 * Note: The driver originally converted the capacity value from 4961 * target blocks to system blocks. However, the capacity value passed 4962 * to this routine is already in terms of system blocks (this scaling 4963 * is done when the READ CAPACITY command is issued and processed). 4964 * This 'error' may have gone undetected because the usage of g_ncyl 4965 * (which is based upon g_capacity) is very limited within the driver 4966 */ 4967 lgeom_p->g_capacity = capacity; 4968 4969 /* 4970 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 4971 * hba may return zero values if the device has been removed. 4972 */ 4973 if (spc == 0) { 4974 lgeom_p->g_ncyl = 0; 4975 } else { 4976 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 4977 } 4978 lgeom_p->g_acyl = 0; 4979 4980 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 4981 SD_INFO(SD_LOG_COMMON, un, 4982 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4983 un->un_lgeom.g_ncyl, un->un_lgeom.g_acyl, 4984 un->un_lgeom.g_nhead, un->un_lgeom.g_nsect); 4985 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4986 "intrlv: %d; rpm: %d\n", un->un_lgeom.g_secsize, 4987 un->un_lgeom.g_capacity, un->un_lgeom.g_intrlv, un->un_lgeom.g_rpm); 4988 } 4989 4990 4991 /* 4992 * Function: sd_update_block_info 4993 * 4994 * Description: Calculate a byte count to sector count bitshift value 4995 * from sector size. 4996 * 4997 * Arguments: un: unit struct. 4998 * lbasize: new target sector size 4999 * capacity: new target capacity, ie. block count 5000 * 5001 * Context: Kernel thread context 5002 */ 5003 5004 static void 5005 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5006 { 5007 if (lbasize != 0) { 5008 un->un_tgt_blocksize = lbasize; 5009 un->un_f_tgt_blocksize_is_valid = TRUE; 5010 } 5011 5012 if (capacity != 0) { 5013 un->un_blockcount = capacity; 5014 un->un_f_blockcount_is_valid = TRUE; 5015 } 5016 } 5017 5018 5019 static void 5020 sd_swap_efi_gpt(efi_gpt_t *e) 5021 { 5022 _NOTE(ASSUMING_PROTECTED(*e)) 5023 e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature); 5024 e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision); 5025 e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize); 5026 e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32); 5027 e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA); 5028 e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA); 5029 e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA); 5030 e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA); 5031 UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID); 5032 e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA); 5033 e->efi_gpt_NumberOfPartitionEntries = 5034 LE_32(e->efi_gpt_NumberOfPartitionEntries); 5035 e->efi_gpt_SizeOfPartitionEntry = 5036 LE_32(e->efi_gpt_SizeOfPartitionEntry); 5037 e->efi_gpt_PartitionEntryArrayCRC32 = 5038 LE_32(e->efi_gpt_PartitionEntryArrayCRC32); 5039 } 5040 5041 static void 5042 sd_swap_efi_gpe(int nparts, efi_gpe_t *p) 5043 { 5044 int i; 5045 5046 _NOTE(ASSUMING_PROTECTED(*p)) 5047 for (i = 0; i < nparts; i++) { 5048 UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID, 5049 p[i].efi_gpe_PartitionTypeGUID); 5050 p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA); 5051 p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA); 5052 /* PartitionAttrs */ 5053 } 5054 } 5055 5056 static int 5057 sd_validate_efi(efi_gpt_t *labp) 5058 { 5059 if (labp->efi_gpt_Signature != EFI_SIGNATURE) 5060 return (EINVAL); 5061 /* at least 96 bytes in this version of the spec. */ 5062 if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) > 5063 labp->efi_gpt_HeaderSize) 5064 return (EINVAL); 5065 /* this should be 128 bytes */ 5066 if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t)) 5067 return (EINVAL); 5068 return (0); 5069 } 5070 5071 static int 5072 sd_use_efi(struct sd_lun *un, int path_flag) 5073 { 5074 int i; 5075 int rval = 0; 5076 efi_gpe_t *partitions; 5077 uchar_t *buf; 5078 uint_t lbasize; 5079 uint64_t cap; 5080 uint_t nparts; 5081 diskaddr_t gpe_lba; 5082 5083 ASSERT(mutex_owned(SD_MUTEX(un))); 5084 lbasize = un->un_tgt_blocksize; 5085 5086 mutex_exit(SD_MUTEX(un)); 5087 5088 buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 5089 5090 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 5091 rval = EINVAL; 5092 goto done_err; 5093 } 5094 5095 rval = sd_send_scsi_READ(un, buf, lbasize, 0, path_flag); 5096 if (rval) { 5097 goto done_err; 5098 } 5099 if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) { 5100 /* not ours */ 5101 rval = ESRCH; 5102 goto done_err; 5103 } 5104 5105 rval = sd_send_scsi_READ(un, buf, lbasize, 1, path_flag); 5106 if (rval) { 5107 goto done_err; 5108 } 5109 sd_swap_efi_gpt((efi_gpt_t *)buf); 5110 5111 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) { 5112 /* 5113 * Couldn't read the primary, try the backup. Our 5114 * capacity at this point could be based on CHS, so 5115 * check what the device reports. 5116 */ 5117 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 5118 path_flag); 5119 if (rval) { 5120 goto done_err; 5121 } 5122 if ((rval = sd_send_scsi_READ(un, buf, lbasize, 5123 cap - 1, path_flag)) != 0) { 5124 goto done_err; 5125 } 5126 sd_swap_efi_gpt((efi_gpt_t *)buf); 5127 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) 5128 goto done_err; 5129 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5130 "primary label corrupt; using backup\n"); 5131 } 5132 5133 nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries; 5134 gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA; 5135 5136 rval = sd_send_scsi_READ(un, buf, EFI_MIN_ARRAY_SIZE, gpe_lba, 5137 path_flag); 5138 if (rval) { 5139 goto done_err; 5140 } 5141 partitions = (efi_gpe_t *)buf; 5142 5143 if (nparts > MAXPART) { 5144 nparts = MAXPART; 5145 } 5146 sd_swap_efi_gpe(nparts, partitions); 5147 5148 mutex_enter(SD_MUTEX(un)); 5149 5150 /* Fill in partition table. */ 5151 for (i = 0; i < nparts; i++) { 5152 if (partitions->efi_gpe_StartingLBA != 0 || 5153 partitions->efi_gpe_EndingLBA != 0) { 5154 un->un_map[i].dkl_cylno = 5155 partitions->efi_gpe_StartingLBA; 5156 un->un_map[i].dkl_nblk = 5157 partitions->efi_gpe_EndingLBA - 5158 partitions->efi_gpe_StartingLBA + 1; 5159 un->un_offset[i] = 5160 partitions->efi_gpe_StartingLBA; 5161 } 5162 if (i == WD_NODE) { 5163 /* 5164 * minor number 7 corresponds to the whole disk 5165 */ 5166 un->un_map[i].dkl_cylno = 0; 5167 un->un_map[i].dkl_nblk = un->un_blockcount; 5168 un->un_offset[i] = 0; 5169 } 5170 partitions++; 5171 } 5172 un->un_solaris_offset = 0; 5173 un->un_solaris_size = cap; 5174 un->un_f_geometry_is_valid = TRUE; 5175 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5176 return (0); 5177 5178 done_err: 5179 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5180 mutex_enter(SD_MUTEX(un)); 5181 /* 5182 * if we didn't find something that could look like a VTOC 5183 * and the disk is over 1TB, we know there isn't a valid label. 5184 * Otherwise let sd_uselabel decide what to do. We only 5185 * want to invalidate this if we're certain the label isn't 5186 * valid because sd_prop_op will now fail, which in turn 5187 * causes things like opens and stats on the partition to fail. 5188 */ 5189 if ((un->un_blockcount > DK_MAX_BLOCKS) && (rval != ESRCH)) { 5190 un->un_f_geometry_is_valid = FALSE; 5191 } 5192 return (rval); 5193 } 5194 5195 5196 /* 5197 * Function: sd_uselabel 5198 * 5199 * Description: Validate the disk label and update the relevant data (geometry, 5200 * partition, vtoc, and capacity data) in the sd_lun struct. 5201 * Marks the geometry of the unit as being valid. 5202 * 5203 * Arguments: un: unit struct. 5204 * dk_label: disk label 5205 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 5206 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 5207 * to use the USCSI "direct" chain and bypass the normal 5208 * command waitq. 5209 * 5210 * Return Code: SD_LABEL_IS_VALID: Label read from disk is OK; geometry, 5211 * partition, vtoc, and capacity data are good. 5212 * 5213 * SD_LABEL_IS_INVALID: Magic number or checksum error in the 5214 * label; or computed capacity does not jibe with capacity 5215 * reported from the READ CAPACITY command. 5216 * 5217 * Context: Kernel thread only (can sleep). 5218 */ 5219 5220 static int 5221 sd_uselabel(struct sd_lun *un, struct dk_label *labp, int path_flag) 5222 { 5223 short *sp; 5224 short sum; 5225 short count; 5226 int label_error = SD_LABEL_IS_VALID; 5227 int i; 5228 int capacity; 5229 int part_end; 5230 int track_capacity; 5231 int err; 5232 #if defined(_SUNOS_VTOC_16) 5233 struct dkl_partition *vpartp; 5234 #endif 5235 ASSERT(un != NULL); 5236 ASSERT(mutex_owned(SD_MUTEX(un))); 5237 5238 /* Validate the magic number of the label. */ 5239 if (labp->dkl_magic != DKL_MAGIC) { 5240 #if defined(__sparc) 5241 if ((un->un_state == SD_STATE_NORMAL) && 5242 !ISREMOVABLE(un)) { 5243 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5244 "Corrupt label; wrong magic number\n"); 5245 } 5246 #endif 5247 return (SD_LABEL_IS_INVALID); 5248 } 5249 5250 /* Validate the checksum of the label. */ 5251 sp = (short *)labp; 5252 sum = 0; 5253 count = sizeof (struct dk_label) / sizeof (short); 5254 while (count--) { 5255 sum ^= *sp++; 5256 } 5257 5258 if (sum != 0) { 5259 #if defined(_SUNOS_VTOC_16) 5260 if (un->un_state == SD_STATE_NORMAL && !ISCD(un)) { 5261 #elif defined(_SUNOS_VTOC_8) 5262 if (un->un_state == SD_STATE_NORMAL && !ISREMOVABLE(un)) { 5263 #endif 5264 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5265 "Corrupt label - label checksum failed\n"); 5266 } 5267 return (SD_LABEL_IS_INVALID); 5268 } 5269 5270 5271 /* 5272 * Fill in geometry structure with data from label. 5273 */ 5274 bzero(&un->un_g, sizeof (struct dk_geom)); 5275 un->un_g.dkg_ncyl = labp->dkl_ncyl; 5276 un->un_g.dkg_acyl = labp->dkl_acyl; 5277 un->un_g.dkg_bcyl = 0; 5278 un->un_g.dkg_nhead = labp->dkl_nhead; 5279 un->un_g.dkg_nsect = labp->dkl_nsect; 5280 un->un_g.dkg_intrlv = labp->dkl_intrlv; 5281 5282 #if defined(_SUNOS_VTOC_8) 5283 un->un_g.dkg_gap1 = labp->dkl_gap1; 5284 un->un_g.dkg_gap2 = labp->dkl_gap2; 5285 un->un_g.dkg_bhead = labp->dkl_bhead; 5286 #endif 5287 #if defined(_SUNOS_VTOC_16) 5288 un->un_dkg_skew = labp->dkl_skew; 5289 #endif 5290 5291 #if defined(__i386) || defined(__amd64) 5292 un->un_g.dkg_apc = labp->dkl_apc; 5293 #endif 5294 5295 /* 5296 * Currently we rely on the values in the label being accurate. If 5297 * dlk_rpm or dlk_pcly are zero in the label, use a default value. 5298 * 5299 * Note: In the future a MODE SENSE may be used to retrieve this data, 5300 * although this command is optional in SCSI-2. 5301 */ 5302 un->un_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600; 5303 un->un_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl : 5304 (un->un_g.dkg_ncyl + un->un_g.dkg_acyl); 5305 5306 /* 5307 * The Read and Write reinstruct values may not be valid 5308 * for older disks. 5309 */ 5310 un->un_g.dkg_read_reinstruct = labp->dkl_read_reinstruct; 5311 un->un_g.dkg_write_reinstruct = labp->dkl_write_reinstruct; 5312 5313 /* Fill in partition table. */ 5314 #if defined(_SUNOS_VTOC_8) 5315 for (i = 0; i < NDKMAP; i++) { 5316 un->un_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno; 5317 un->un_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk; 5318 } 5319 #endif 5320 #if defined(_SUNOS_VTOC_16) 5321 vpartp = labp->dkl_vtoc.v_part; 5322 track_capacity = labp->dkl_nhead * labp->dkl_nsect; 5323 5324 for (i = 0; i < NDKMAP; i++, vpartp++) { 5325 un->un_map[i].dkl_cylno = vpartp->p_start / track_capacity; 5326 un->un_map[i].dkl_nblk = vpartp->p_size; 5327 } 5328 #endif 5329 5330 /* Fill in VTOC Structure. */ 5331 bcopy(&labp->dkl_vtoc, &un->un_vtoc, sizeof (struct dk_vtoc)); 5332 #if defined(_SUNOS_VTOC_8) 5333 /* 5334 * The 8-slice vtoc does not include the ascii label; save it into 5335 * the device's soft state structure here. 5336 */ 5337 bcopy(labp->dkl_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 5338 #endif 5339 5340 /* Mark the geometry as valid. */ 5341 un->un_f_geometry_is_valid = TRUE; 5342 5343 /* Now look for a valid capacity. */ 5344 track_capacity = (un->un_g.dkg_nhead * un->un_g.dkg_nsect); 5345 capacity = (un->un_g.dkg_ncyl * track_capacity); 5346 5347 if (un->un_g.dkg_acyl) { 5348 #if defined(__i386) || defined(__amd64) 5349 /* we may have > 1 alts cylinder */ 5350 capacity += (track_capacity * un->un_g.dkg_acyl); 5351 #else 5352 capacity += track_capacity; 5353 #endif 5354 } 5355 5356 /* 5357 * At this point, un->un_blockcount should contain valid data from 5358 * the READ CAPACITY command. 5359 */ 5360 if (un->un_f_blockcount_is_valid != TRUE) { 5361 /* 5362 * We have a situation where the target didn't give us a good 5363 * READ CAPACITY value, yet there appears to be a valid label. 5364 * In this case, we'll fake the capacity. 5365 */ 5366 un->un_blockcount = capacity; 5367 un->un_f_blockcount_is_valid = TRUE; 5368 goto done; 5369 } 5370 5371 5372 if ((capacity <= un->un_blockcount) || 5373 (un->un_state != SD_STATE_NORMAL)) { 5374 #if defined(_SUNOS_VTOC_8) 5375 /* 5376 * We can't let this happen on drives that are subdivided 5377 * into logical disks (i.e., that have an fdisk table). 5378 * The un_blockcount field should always hold the full media 5379 * size in sectors, period. This code would overwrite 5380 * un_blockcount with the size of the Solaris fdisk partition. 5381 */ 5382 SD_ERROR(SD_LOG_COMMON, un, 5383 "sd_uselabel: Label %d blocks; Drive %d blocks\n", 5384 capacity, un->un_blockcount); 5385 un->un_blockcount = capacity; 5386 un->un_f_blockcount_is_valid = TRUE; 5387 #endif /* defined(_SUNOS_VTOC_8) */ 5388 goto done; 5389 } 5390 5391 if (ISCD(un)) { 5392 /* For CDROMs, we trust that the data in the label is OK. */ 5393 #if defined(_SUNOS_VTOC_8) 5394 for (i = 0; i < NDKMAP; i++) { 5395 part_end = labp->dkl_nhead * labp->dkl_nsect * 5396 labp->dkl_map[i].dkl_cylno + 5397 labp->dkl_map[i].dkl_nblk - 1; 5398 5399 if ((labp->dkl_map[i].dkl_nblk) && 5400 (part_end > un->un_blockcount)) { 5401 un->un_f_geometry_is_valid = FALSE; 5402 break; 5403 } 5404 } 5405 #endif 5406 #if defined(_SUNOS_VTOC_16) 5407 vpartp = &(labp->dkl_vtoc.v_part[0]); 5408 for (i = 0; i < NDKMAP; i++, vpartp++) { 5409 part_end = vpartp->p_start + vpartp->p_size; 5410 if ((vpartp->p_size > 0) && 5411 (part_end > un->un_blockcount)) { 5412 un->un_f_geometry_is_valid = FALSE; 5413 break; 5414 } 5415 } 5416 #endif 5417 } else { 5418 uint64_t t_capacity; 5419 uint32_t t_lbasize; 5420 5421 mutex_exit(SD_MUTEX(un)); 5422 err = sd_send_scsi_READ_CAPACITY(un, &t_capacity, &t_lbasize, 5423 path_flag); 5424 ASSERT(t_capacity <= DK_MAX_BLOCKS); 5425 mutex_enter(SD_MUTEX(un)); 5426 5427 if (err == 0) { 5428 sd_update_block_info(un, t_lbasize, t_capacity); 5429 } 5430 5431 if (capacity > un->un_blockcount) { 5432 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5433 "Corrupt label - bad geometry\n"); 5434 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 5435 "Label says %u blocks; Drive says %llu blocks\n", 5436 capacity, (unsigned long long)un->un_blockcount); 5437 un->un_f_geometry_is_valid = FALSE; 5438 label_error = SD_LABEL_IS_INVALID; 5439 } 5440 } 5441 5442 done: 5443 5444 SD_INFO(SD_LOG_COMMON, un, "sd_uselabel: (label geometry)\n"); 5445 SD_INFO(SD_LOG_COMMON, un, 5446 " ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n", 5447 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5448 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5449 SD_INFO(SD_LOG_COMMON, un, 5450 " lbasize: %d; capacity: %d; intrlv: %d; rpm: %d\n", 5451 un->un_tgt_blocksize, un->un_blockcount, 5452 un->un_g.dkg_intrlv, un->un_g.dkg_rpm); 5453 SD_INFO(SD_LOG_COMMON, un, " wrt_reinstr: %d; rd_reinstr: %d\n", 5454 un->un_g.dkg_write_reinstruct, un->un_g.dkg_read_reinstruct); 5455 5456 ASSERT(mutex_owned(SD_MUTEX(un))); 5457 5458 return (label_error); 5459 } 5460 5461 5462 /* 5463 * Function: sd_build_default_label 5464 * 5465 * Description: Generate a default label for those devices that do not have 5466 * one, e.g., new media, removable cartridges, etc.. 5467 * 5468 * Context: Kernel thread only 5469 */ 5470 5471 static void 5472 sd_build_default_label(struct sd_lun *un) 5473 { 5474 #if defined(_SUNOS_VTOC_16) 5475 uint_t phys_spc; 5476 uint_t disksize; 5477 struct dk_geom un_g; 5478 #endif 5479 5480 ASSERT(un != NULL); 5481 ASSERT(mutex_owned(SD_MUTEX(un))); 5482 5483 #if defined(_SUNOS_VTOC_8) 5484 /* 5485 * Note: This is a legacy check for non-removable devices on VTOC_8 5486 * only. This may be a valid check for VTOC_16 as well. 5487 */ 5488 if (!ISREMOVABLE(un)) { 5489 return; 5490 } 5491 #endif 5492 5493 bzero(&un->un_g, sizeof (struct dk_geom)); 5494 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 5495 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 5496 5497 #if defined(_SUNOS_VTOC_8) 5498 5499 /* 5500 * It's a REMOVABLE media, therefore no label (on sparc, anyway). 5501 * But it is still necessary to set up various geometry information, 5502 * and we are doing this here. 5503 */ 5504 5505 /* 5506 * For the rpm, we use the minimum for the disk. For the head, cyl, 5507 * and number of sector per track, if the capacity <= 1GB, head = 64, 5508 * sect = 32. else head = 255, sect 63 Note: the capacity should be 5509 * equal to C*H*S values. This will cause some truncation of size due 5510 * to round off errors. For CD-ROMs, this truncation can have adverse 5511 * side effects, so returning ncyl and nhead as 1. The nsect will 5512 * overflow for most of CD-ROMs as nsect is of type ushort. (4190569) 5513 */ 5514 if (ISCD(un)) { 5515 /* 5516 * Preserve the old behavior for non-writable 5517 * medias. Since dkg_nsect is a ushort, it 5518 * will lose bits as cdroms have more than 5519 * 65536 sectors. So if we recalculate 5520 * capacity, it will become much shorter. 5521 * But the dkg_* information is not 5522 * used for CDROMs so it is OK. But for 5523 * Writable CDs we need this information 5524 * to be valid (for newfs say). So we 5525 * make nsect and nhead > 1 that way 5526 * nsect can still stay within ushort limit 5527 * without losing any bits. 5528 */ 5529 if (un->un_f_mmc_writable_media == TRUE) { 5530 un->un_g.dkg_nhead = 64; 5531 un->un_g.dkg_nsect = 32; 5532 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5533 un->un_blockcount = un->un_g.dkg_ncyl * 5534 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5535 } else { 5536 un->un_g.dkg_ncyl = 1; 5537 un->un_g.dkg_nhead = 1; 5538 un->un_g.dkg_nsect = un->un_blockcount; 5539 } 5540 } else { 5541 if (un->un_blockcount <= 0x1000) { 5542 /* unlabeled SCSI floppy device */ 5543 un->un_g.dkg_nhead = 2; 5544 un->un_g.dkg_ncyl = 80; 5545 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 5546 } else if (un->un_blockcount <= 0x200000) { 5547 un->un_g.dkg_nhead = 64; 5548 un->un_g.dkg_nsect = 32; 5549 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5550 } else { 5551 un->un_g.dkg_nhead = 255; 5552 un->un_g.dkg_nsect = 63; 5553 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 5554 } 5555 un->un_blockcount = 5556 un->un_g.dkg_ncyl * un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5557 } 5558 5559 un->un_g.dkg_acyl = 0; 5560 un->un_g.dkg_bcyl = 0; 5561 un->un_g.dkg_rpm = 200; 5562 un->un_asciilabel[0] = '\0'; 5563 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl; 5564 5565 un->un_map[0].dkl_cylno = 0; 5566 un->un_map[0].dkl_nblk = un->un_blockcount; 5567 un->un_map[2].dkl_cylno = 0; 5568 un->un_map[2].dkl_nblk = un->un_blockcount; 5569 5570 #elif defined(_SUNOS_VTOC_16) 5571 5572 if (un->un_solaris_size == 0) { 5573 /* 5574 * Got fdisk table but no solaris entry therefore 5575 * don't create a default label 5576 */ 5577 un->un_f_geometry_is_valid = TRUE; 5578 return; 5579 } 5580 5581 /* 5582 * For CDs we continue to use the physical geometry to calculate 5583 * number of cylinders. All other devices must convert the 5584 * physical geometry (geom_cache) to values that will fit 5585 * in a dk_geom structure. 5586 */ 5587 if (ISCD(un)) { 5588 phys_spc = un->un_pgeom.g_nhead * un->un_pgeom.g_nsect; 5589 } else { 5590 /* Convert physical geometry to disk geometry */ 5591 bzero(&un_g, sizeof (struct dk_geom)); 5592 sd_convert_geometry(un->un_blockcount, &un_g); 5593 bcopy(&un_g, &un->un_g, sizeof (un->un_g)); 5594 phys_spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5595 } 5596 5597 un->un_g.dkg_pcyl = un->un_solaris_size / phys_spc; 5598 un->un_g.dkg_acyl = DK_ACYL; 5599 un->un_g.dkg_ncyl = un->un_g.dkg_pcyl - DK_ACYL; 5600 disksize = un->un_g.dkg_ncyl * phys_spc; 5601 5602 if (ISCD(un)) { 5603 /* 5604 * CD's don't use the "heads * sectors * cyls"-type of 5605 * geometry, but instead use the entire capacity of the media. 5606 */ 5607 disksize = un->un_solaris_size; 5608 un->un_g.dkg_nhead = 1; 5609 un->un_g.dkg_nsect = 1; 5610 un->un_g.dkg_rpm = 5611 (un->un_pgeom.g_rpm == 0) ? 200 : un->un_pgeom.g_rpm; 5612 5613 un->un_vtoc.v_part[0].p_start = 0; 5614 un->un_vtoc.v_part[0].p_size = disksize; 5615 un->un_vtoc.v_part[0].p_tag = V_BACKUP; 5616 un->un_vtoc.v_part[0].p_flag = V_UNMNT; 5617 5618 un->un_map[0].dkl_cylno = 0; 5619 un->un_map[0].dkl_nblk = disksize; 5620 un->un_offset[0] = 0; 5621 5622 } else { 5623 /* 5624 * Hard disks and removable media cartridges 5625 */ 5626 un->un_g.dkg_rpm = 5627 (un->un_pgeom.g_rpm == 0) ? 3600: un->un_pgeom.g_rpm; 5628 un->un_vtoc.v_sectorsz = un->un_sys_blocksize; 5629 5630 /* Add boot slice */ 5631 un->un_vtoc.v_part[8].p_start = 0; 5632 un->un_vtoc.v_part[8].p_size = phys_spc; 5633 un->un_vtoc.v_part[8].p_tag = V_BOOT; 5634 un->un_vtoc.v_part[8].p_flag = V_UNMNT; 5635 5636 un->un_map[8].dkl_cylno = 0; 5637 un->un_map[8].dkl_nblk = phys_spc; 5638 un->un_offset[8] = 0; 5639 } 5640 5641 un->un_g.dkg_apc = 0; 5642 un->un_vtoc.v_nparts = V_NUMPAR; 5643 un->un_vtoc.v_version = V_VERSION; 5644 5645 /* Add backup slice */ 5646 un->un_vtoc.v_part[2].p_start = 0; 5647 un->un_vtoc.v_part[2].p_size = disksize; 5648 un->un_vtoc.v_part[2].p_tag = V_BACKUP; 5649 un->un_vtoc.v_part[2].p_flag = V_UNMNT; 5650 5651 un->un_map[2].dkl_cylno = 0; 5652 un->un_map[2].dkl_nblk = disksize; 5653 un->un_offset[2] = 0; 5654 5655 (void) sprintf(un->un_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d" 5656 " hd %d sec %d", un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5657 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5658 5659 #else 5660 #error "No VTOC format defined." 5661 #endif 5662 5663 un->un_g.dkg_read_reinstruct = 0; 5664 un->un_g.dkg_write_reinstruct = 0; 5665 5666 un->un_g.dkg_intrlv = 1; 5667 5668 un->un_vtoc.v_sanity = VTOC_SANE; 5669 5670 un->un_f_geometry_is_valid = TRUE; 5671 5672 SD_INFO(SD_LOG_COMMON, un, 5673 "sd_build_default_label: Default label created: " 5674 "cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n", 5675 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, un->un_g.dkg_nhead, 5676 un->un_g.dkg_nsect, un->un_blockcount); 5677 } 5678 5679 5680 #if defined(_FIRMWARE_NEEDS_FDISK) 5681 /* 5682 * Max CHS values, as they are encoded into bytes, for 1022/254/63 5683 */ 5684 #define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2)) 5685 #define LBA_MAX_CYL (1022 & 0xFF) 5686 #define LBA_MAX_HEAD (254) 5687 5688 5689 /* 5690 * Function: sd_has_max_chs_vals 5691 * 5692 * Description: Return TRUE if Cylinder-Head-Sector values are all at maximum. 5693 * 5694 * Arguments: fdp - ptr to CHS info 5695 * 5696 * Return Code: True or false 5697 * 5698 * Context: Any. 5699 */ 5700 5701 static int 5702 sd_has_max_chs_vals(struct ipart *fdp) 5703 { 5704 return ((fdp->begcyl == LBA_MAX_CYL) && 5705 (fdp->beghead == LBA_MAX_HEAD) && 5706 (fdp->begsect == LBA_MAX_SECT) && 5707 (fdp->endcyl == LBA_MAX_CYL) && 5708 (fdp->endhead == LBA_MAX_HEAD) && 5709 (fdp->endsect == LBA_MAX_SECT)); 5710 } 5711 #endif 5712 5713 5714 /* 5715 * Function: sd_inq_fill 5716 * 5717 * Description: Print a piece of inquiry data, cleaned up for non-printable 5718 * characters and stopping at the first space character after 5719 * the beginning of the passed string; 5720 * 5721 * Arguments: p - source string 5722 * l - maximum length to copy 5723 * s - destination string 5724 * 5725 * Context: Any. 5726 */ 5727 5728 static void 5729 sd_inq_fill(char *p, int l, char *s) 5730 { 5731 unsigned i = 0; 5732 char c; 5733 5734 while (i++ < l) { 5735 if ((c = *p++) < ' ' || c >= 0x7F) { 5736 c = '*'; 5737 } else if (i != 1 && c == ' ') { 5738 break; 5739 } 5740 *s++ = c; 5741 } 5742 *s++ = 0; 5743 } 5744 5745 5746 /* 5747 * Function: sd_register_devid 5748 * 5749 * Description: This routine will obtain the device id information from the 5750 * target, obtain the serial number, and register the device 5751 * id with the ddi framework. 5752 * 5753 * Arguments: devi - the system's dev_info_t for the device. 5754 * un - driver soft state (unit) structure 5755 * reservation_flag - indicates if a reservation conflict 5756 * occurred during attach 5757 * 5758 * Context: Kernel Thread 5759 */ 5760 static void 5761 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 5762 { 5763 int rval = 0; 5764 uchar_t *inq80 = NULL; 5765 size_t inq80_len = MAX_INQUIRY_SIZE; 5766 size_t inq80_resid = 0; 5767 uchar_t *inq83 = NULL; 5768 size_t inq83_len = MAX_INQUIRY_SIZE; 5769 size_t inq83_resid = 0; 5770 5771 ASSERT(un != NULL); 5772 ASSERT(mutex_owned(SD_MUTEX(un))); 5773 ASSERT((SD_DEVINFO(un)) == devi); 5774 5775 /* 5776 * This is the case of antiquated Sun disk drives that have the 5777 * FAB_DEVID property set in the disk_table. These drives 5778 * manage the devid's by storing them in last 2 available sectors 5779 * on the drive and have them fabricated by the ddi layer by calling 5780 * ddi_devid_init and passing the DEVID_FAB flag. 5781 */ 5782 if (un->un_f_opt_fab_devid == TRUE) { 5783 /* 5784 * Depending on EINVAL isn't reliable, since a reserved disk 5785 * may result in invalid geometry, so check to make sure a 5786 * reservation conflict did not occur during attach. 5787 */ 5788 if ((sd_get_devid(un) == EINVAL) && 5789 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5790 /* 5791 * The devid is invalid AND there is no reservation 5792 * conflict. Fabricate a new devid. 5793 */ 5794 (void) sd_create_devid(un); 5795 } 5796 5797 /* Register the devid if it exists */ 5798 if (un->un_devid != NULL) { 5799 (void) ddi_devid_register(SD_DEVINFO(un), 5800 un->un_devid); 5801 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5802 "sd_register_devid: Devid Fabricated\n"); 5803 } 5804 return; 5805 } 5806 5807 /* 5808 * We check the availibility of the World Wide Name (0x83) and Unit 5809 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5810 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5811 * 0x83 is availible, that is the best choice. Our next choice is 5812 * 0x80. If neither are availible, we munge the devid from the device 5813 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5814 * to fabricate a devid for non-Sun qualified disks. 5815 */ 5816 if (sd_check_vpd_page_support(un) == 0) { 5817 /* collect page 80 data if available */ 5818 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5819 5820 mutex_exit(SD_MUTEX(un)); 5821 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5822 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 5823 0x01, 0x80, &inq80_resid); 5824 5825 if (rval != 0) { 5826 kmem_free(inq80, inq80_len); 5827 inq80 = NULL; 5828 inq80_len = 0; 5829 } 5830 mutex_enter(SD_MUTEX(un)); 5831 } 5832 5833 /* collect page 83 data if available */ 5834 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5835 5836 mutex_exit(SD_MUTEX(un)); 5837 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5838 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 5839 0x01, 0x83, &inq83_resid); 5840 5841 if (rval != 0) { 5842 kmem_free(inq83, inq83_len); 5843 inq83 = NULL; 5844 inq83_len = 0; 5845 } 5846 mutex_enter(SD_MUTEX(un)); 5847 } 5848 } 5849 5850 /* encode best devid possible based on data available */ 5851 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5852 (char *)ddi_driver_name(SD_DEVINFO(un)), 5853 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5854 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5855 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5856 5857 /* devid successfully encoded, register devid */ 5858 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5859 5860 } else { 5861 /* 5862 * Unable to encode a devid based on data available. 5863 * This is not a Sun qualified disk. Older Sun disk 5864 * drives that have the SD_FAB_DEVID property 5865 * set in the disk_table and non Sun qualified 5866 * disks are treated in the same manner. These 5867 * drives manage the devid's by storing them in 5868 * last 2 available sectors on the drive and 5869 * have them fabricated by the ddi layer by 5870 * calling ddi_devid_init and passing the 5871 * DEVID_FAB flag. 5872 * Create a fabricate devid only if there's no 5873 * fabricate devid existed. 5874 */ 5875 if (sd_get_devid(un) == EINVAL) { 5876 (void) sd_create_devid(un); 5877 un->un_f_opt_fab_devid = TRUE; 5878 } 5879 5880 /* Register the devid if it exists */ 5881 if (un->un_devid != NULL) { 5882 (void) ddi_devid_register(SD_DEVINFO(un), 5883 un->un_devid); 5884 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5885 "sd_register_devid: devid fabricated using " 5886 "ddi framework\n"); 5887 } 5888 } 5889 5890 /* clean up resources */ 5891 if (inq80 != NULL) { 5892 kmem_free(inq80, inq80_len); 5893 } 5894 if (inq83 != NULL) { 5895 kmem_free(inq83, inq83_len); 5896 } 5897 } 5898 5899 static daddr_t 5900 sd_get_devid_block(struct sd_lun *un) 5901 { 5902 daddr_t spc, blk, head, cyl; 5903 5904 if (un->un_blockcount <= DK_MAX_BLOCKS) { 5905 /* this geometry doesn't allow us to write a devid */ 5906 if (un->un_g.dkg_acyl < 2) { 5907 return (-1); 5908 } 5909 5910 /* 5911 * Subtract 2 guarantees that the next to last cylinder 5912 * is used 5913 */ 5914 cyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl - 2; 5915 spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5916 head = un->un_g.dkg_nhead - 1; 5917 blk = (cyl * (spc - un->un_g.dkg_apc)) + 5918 (head * un->un_g.dkg_nsect) + 1; 5919 } else { 5920 if (un->un_reserved != -1) { 5921 blk = un->un_map[un->un_reserved].dkl_cylno + 1; 5922 } else { 5923 return (-1); 5924 } 5925 } 5926 return (blk); 5927 } 5928 5929 /* 5930 * Function: sd_get_devid 5931 * 5932 * Description: This routine will return 0 if a valid device id has been 5933 * obtained from the target and stored in the soft state. If a 5934 * valid device id has not been previously read and stored, a 5935 * read attempt will be made. 5936 * 5937 * Arguments: un - driver soft state (unit) structure 5938 * 5939 * Return Code: 0 if we successfully get the device id 5940 * 5941 * Context: Kernel Thread 5942 */ 5943 5944 static int 5945 sd_get_devid(struct sd_lun *un) 5946 { 5947 struct dk_devid *dkdevid; 5948 ddi_devid_t tmpid; 5949 uint_t *ip; 5950 size_t sz; 5951 daddr_t blk; 5952 int status; 5953 int chksum; 5954 int i; 5955 size_t buffer_size; 5956 5957 ASSERT(un != NULL); 5958 ASSERT(mutex_owned(SD_MUTEX(un))); 5959 5960 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 5961 un); 5962 5963 if (un->un_devid != NULL) { 5964 return (0); 5965 } 5966 5967 blk = sd_get_devid_block(un); 5968 if (blk < 0) 5969 return (EINVAL); 5970 5971 /* 5972 * Read and verify device id, stored in the reserved cylinders at the 5973 * end of the disk. Backup label is on the odd sectors of the last 5974 * track of the last cylinder. Device id will be on track of the next 5975 * to last cylinder. 5976 */ 5977 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 5978 mutex_exit(SD_MUTEX(un)); 5979 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 5980 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 5981 SD_PATH_DIRECT); 5982 if (status != 0) { 5983 goto error; 5984 } 5985 5986 /* Validate the revision */ 5987 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 5988 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 5989 status = EINVAL; 5990 goto error; 5991 } 5992 5993 /* Calculate the checksum */ 5994 chksum = 0; 5995 ip = (uint_t *)dkdevid; 5996 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 5997 i++) { 5998 chksum ^= ip[i]; 5999 } 6000 6001 /* Compare the checksums */ 6002 if (DKD_GETCHKSUM(dkdevid) != chksum) { 6003 status = EINVAL; 6004 goto error; 6005 } 6006 6007 /* Validate the device id */ 6008 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 6009 status = EINVAL; 6010 goto error; 6011 } 6012 6013 /* 6014 * Store the device id in the driver soft state 6015 */ 6016 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 6017 tmpid = kmem_alloc(sz, KM_SLEEP); 6018 6019 mutex_enter(SD_MUTEX(un)); 6020 6021 un->un_devid = tmpid; 6022 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 6023 6024 kmem_free(dkdevid, buffer_size); 6025 6026 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 6027 6028 return (status); 6029 error: 6030 mutex_enter(SD_MUTEX(un)); 6031 kmem_free(dkdevid, buffer_size); 6032 return (status); 6033 } 6034 6035 6036 /* 6037 * Function: sd_create_devid 6038 * 6039 * Description: This routine will fabricate the device id and write it 6040 * to the disk. 6041 * 6042 * Arguments: un - driver soft state (unit) structure 6043 * 6044 * Return Code: value of the fabricated device id 6045 * 6046 * Context: Kernel Thread 6047 */ 6048 6049 static ddi_devid_t 6050 sd_create_devid(struct sd_lun *un) 6051 { 6052 ASSERT(un != NULL); 6053 6054 /* Fabricate the devid */ 6055 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 6056 == DDI_FAILURE) { 6057 return (NULL); 6058 } 6059 6060 /* Write the devid to disk */ 6061 if (sd_write_deviceid(un) != 0) { 6062 ddi_devid_free(un->un_devid); 6063 un->un_devid = NULL; 6064 } 6065 6066 return (un->un_devid); 6067 } 6068 6069 6070 /* 6071 * Function: sd_write_deviceid 6072 * 6073 * Description: This routine will write the device id to the disk 6074 * reserved sector. 6075 * 6076 * Arguments: un - driver soft state (unit) structure 6077 * 6078 * Return Code: EINVAL 6079 * value returned by sd_send_scsi_cmd 6080 * 6081 * Context: Kernel Thread 6082 */ 6083 6084 static int 6085 sd_write_deviceid(struct sd_lun *un) 6086 { 6087 struct dk_devid *dkdevid; 6088 daddr_t blk; 6089 uint_t *ip, chksum; 6090 int status; 6091 int i; 6092 6093 ASSERT(mutex_owned(SD_MUTEX(un))); 6094 6095 blk = sd_get_devid_block(un); 6096 if (blk < 0) 6097 return (-1); 6098 mutex_exit(SD_MUTEX(un)); 6099 6100 /* Allocate the buffer */ 6101 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 6102 6103 /* Fill in the revision */ 6104 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 6105 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 6106 6107 /* Copy in the device id */ 6108 mutex_enter(SD_MUTEX(un)); 6109 bcopy(un->un_devid, &dkdevid->dkd_devid, 6110 ddi_devid_sizeof(un->un_devid)); 6111 mutex_exit(SD_MUTEX(un)); 6112 6113 /* Calculate the checksum */ 6114 chksum = 0; 6115 ip = (uint_t *)dkdevid; 6116 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6117 i++) { 6118 chksum ^= ip[i]; 6119 } 6120 6121 /* Fill-in checksum */ 6122 DKD_FORMCHKSUM(chksum, dkdevid); 6123 6124 /* Write the reserved sector */ 6125 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 6126 SD_PATH_DIRECT); 6127 6128 kmem_free(dkdevid, un->un_sys_blocksize); 6129 6130 mutex_enter(SD_MUTEX(un)); 6131 return (status); 6132 } 6133 6134 6135 /* 6136 * Function: sd_check_vpd_page_support 6137 * 6138 * Description: This routine sends an inquiry command with the EVPD bit set and 6139 * a page code of 0x00 to the device. It is used to determine which 6140 * vital product pages are availible to find the devid. We are 6141 * looking for pages 0x83 or 0x80. If we return a negative 1, the 6142 * device does not support that command. 6143 * 6144 * Arguments: un - driver soft state (unit) structure 6145 * 6146 * Return Code: 0 - success 6147 * 1 - check condition 6148 * 6149 * Context: This routine can sleep. 6150 */ 6151 6152 static int 6153 sd_check_vpd_page_support(struct sd_lun *un) 6154 { 6155 uchar_t *page_list = NULL; 6156 uchar_t page_length = 0xff; /* Use max possible length */ 6157 uchar_t evpd = 0x01; /* Set the EVPD bit */ 6158 uchar_t page_code = 0x00; /* Supported VPD Pages */ 6159 int rval = 0; 6160 int counter; 6161 6162 ASSERT(un != NULL); 6163 ASSERT(mutex_owned(SD_MUTEX(un))); 6164 6165 mutex_exit(SD_MUTEX(un)); 6166 6167 /* 6168 * We'll set the page length to the maximum to save figuring it out 6169 * with an additional call. 6170 */ 6171 page_list = kmem_zalloc(page_length, KM_SLEEP); 6172 6173 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 6174 page_code, NULL); 6175 6176 mutex_enter(SD_MUTEX(un)); 6177 6178 /* 6179 * Now we must validate that the device accepted the command, as some 6180 * drives do not support it. If the drive does support it, we will 6181 * return 0, and the supported pages will be in un_vpd_page_mask. If 6182 * not, we return -1. 6183 */ 6184 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 6185 /* Loop to find one of the 2 pages we need */ 6186 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 6187 6188 /* 6189 * Pages are returned in ascending order, and 0x83 is what we 6190 * are hoping for. 6191 */ 6192 while ((page_list[counter] <= 0x83) && 6193 (counter <= (page_list[VPD_PAGE_LENGTH] + 6194 VPD_HEAD_OFFSET))) { 6195 /* 6196 * Add 3 because page_list[3] is the number of 6197 * pages minus 3 6198 */ 6199 6200 switch (page_list[counter]) { 6201 case 0x00: 6202 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 6203 break; 6204 case 0x80: 6205 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 6206 break; 6207 case 0x81: 6208 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 6209 break; 6210 case 0x82: 6211 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 6212 break; 6213 case 0x83: 6214 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 6215 break; 6216 } 6217 counter++; 6218 } 6219 6220 } else { 6221 rval = -1; 6222 6223 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6224 "sd_check_vpd_page_support: This drive does not implement " 6225 "VPD pages.\n"); 6226 } 6227 6228 kmem_free(page_list, page_length); 6229 6230 return (rval); 6231 } 6232 6233 6234 /* 6235 * Function: sd_setup_pm 6236 * 6237 * Description: Initialize Power Management on the device 6238 * 6239 * Context: Kernel Thread 6240 */ 6241 6242 static void 6243 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 6244 { 6245 uint_t log_page_size; 6246 uchar_t *log_page_data; 6247 int rval; 6248 6249 /* 6250 * Since we are called from attach, holding a mutex for 6251 * un is unnecessary. Because some of the routines called 6252 * from here require SD_MUTEX to not be held, assert this 6253 * right up front. 6254 */ 6255 ASSERT(!mutex_owned(SD_MUTEX(un))); 6256 /* 6257 * Since the sd device does not have the 'reg' property, 6258 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6259 * The following code is to tell cpr that this device 6260 * DOES need to be suspended and resumed. 6261 */ 6262 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6263 "pm-hardware-state", "needs-suspend-resume"); 6264 6265 /* 6266 * Check if HBA has set the "pm-capable" property. 6267 * If "pm-capable" exists and is non-zero then we can 6268 * power manage the device without checking the start/stop 6269 * cycle count log sense page. 6270 * 6271 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 6272 * then we should not power manage the device. 6273 * 6274 * If "pm-capable" doesn't exist then un->un_pm_capable_prop will 6275 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, sd will 6276 * check the start/stop cycle count log sense page and power manage 6277 * the device if the cycle count limit has not been exceeded. 6278 */ 6279 un->un_pm_capable_prop = 6280 ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6281 "pm-capable", SD_PM_CAPABLE_UNDEFINED); 6282 if (un->un_pm_capable_prop != SD_PM_CAPABLE_UNDEFINED) { 6283 /* 6284 * pm-capable property exists. 6285 * 6286 * Convert "TRUE" values for un_pm_capable_prop to 6287 * SD_PM_CAPABLE_TRUE (1) to make it easier to check later. 6288 * "TRUE" values are any values except SD_PM_CAPABLE_FALSE (0) 6289 * and SD_PM_CAPABLE_UNDEFINED (-1) 6290 */ 6291 if (un->un_pm_capable_prop != SD_PM_CAPABLE_FALSE) { 6292 un->un_pm_capable_prop = SD_PM_CAPABLE_TRUE; 6293 } 6294 6295 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6296 "sd_unit_attach: un:0x%p pm-capable " 6297 "property set to %d.\n", un, un->un_pm_capable_prop); 6298 } 6299 6300 /* 6301 * This complies with the new power management framework 6302 * for certain desktop machines. Create the pm_components 6303 * property as a string array property. 6304 * 6305 * If this is a removable device or if the pm-capable property 6306 * is SD_PM_CAPABLE_TRUE (1) then we should create the 6307 * pm_components property without checking for the existance of 6308 * the start-stop cycle counter log page 6309 */ 6310 if (ISREMOVABLE(un) || 6311 un->un_pm_capable_prop == SD_PM_CAPABLE_TRUE) { 6312 /* 6313 * not all devices have a motor, try it first. 6314 * some devices may return ILLEGAL REQUEST, some 6315 * will hang 6316 */ 6317 un->un_f_start_stop_supported = TRUE; 6318 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 6319 SD_PATH_DIRECT) != 0) { 6320 un->un_f_start_stop_supported = FALSE; 6321 } 6322 6323 /* 6324 * create pm properties anyways otherwise the parent can't 6325 * go to sleep 6326 */ 6327 (void) sd_create_pm_components(devi, un); 6328 un->un_f_pm_is_enabled = TRUE; 6329 6330 /* 6331 * Need to create a zero length (Boolean) property 6332 * removable-media for the removable media devices. 6333 * Note that the return value of the property is not being 6334 * checked, since if unable to create the property 6335 * then do not want the attach to fail altogether. Consistent 6336 * with other property creation in attach. 6337 */ 6338 if (ISREMOVABLE(un)) { 6339 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 6340 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 6341 } 6342 return; 6343 } 6344 6345 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 6346 6347 #ifdef SDDEBUG 6348 if (sd_force_pm_supported) { 6349 /* Force a successful result */ 6350 rval = 1; 6351 } 6352 #endif 6353 6354 /* 6355 * If the start-stop cycle counter log page is not supported 6356 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 6357 * then we should not create the pm_components property. 6358 */ 6359 if (rval == -1 || un->un_pm_capable_prop == SD_PM_CAPABLE_FALSE) { 6360 /* 6361 * Error. 6362 * Reading log sense failed, most likely this is 6363 * an older drive that does not support log sense. 6364 * If this fails auto-pm is not supported. 6365 */ 6366 un->un_power_level = SD_SPINDLE_ON; 6367 un->un_f_pm_is_enabled = FALSE; 6368 6369 } else if (rval == 0) { 6370 /* 6371 * Page not found. 6372 * The start stop cycle counter is implemented as page 6373 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6374 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6375 */ 6376 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 6377 /* 6378 * Page found, use this one. 6379 */ 6380 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6381 un->un_f_pm_is_enabled = TRUE; 6382 } else { 6383 /* 6384 * Error or page not found. 6385 * auto-pm is not supported for this device. 6386 */ 6387 un->un_power_level = SD_SPINDLE_ON; 6388 un->un_f_pm_is_enabled = FALSE; 6389 } 6390 } else { 6391 /* 6392 * Page found, use it. 6393 */ 6394 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6395 un->un_f_pm_is_enabled = TRUE; 6396 } 6397 6398 6399 if (un->un_f_pm_is_enabled == TRUE) { 6400 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6401 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6402 6403 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6404 log_page_size, un->un_start_stop_cycle_page, 6405 0x01, 0, SD_PATH_DIRECT); 6406 #ifdef SDDEBUG 6407 if (sd_force_pm_supported) { 6408 /* Force a successful result */ 6409 rval = 0; 6410 } 6411 #endif 6412 6413 /* 6414 * If the Log sense for Page( Start/stop cycle counter page) 6415 * succeeds, then power managment is supported and we can 6416 * enable auto-pm. 6417 */ 6418 if (rval == 0) { 6419 (void) sd_create_pm_components(devi, un); 6420 } else { 6421 un->un_power_level = SD_SPINDLE_ON; 6422 un->un_f_pm_is_enabled = FALSE; 6423 } 6424 6425 kmem_free(log_page_data, log_page_size); 6426 } 6427 } 6428 6429 6430 /* 6431 * Function: sd_create_pm_components 6432 * 6433 * Description: Initialize PM property. 6434 * 6435 * Context: Kernel thread context 6436 */ 6437 6438 static void 6439 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6440 { 6441 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 6442 6443 ASSERT(!mutex_owned(SD_MUTEX(un))); 6444 6445 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6446 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 6447 /* 6448 * When components are initially created they are idle, 6449 * power up any non-removables. 6450 * Note: the return value of pm_raise_power can't be used 6451 * for determining if PM should be enabled for this device. 6452 * Even if you check the return values and remove this 6453 * property created above, the PM framework will not honor the 6454 * change after the first call to pm_raise_power. Hence, 6455 * removal of that property does not help if pm_raise_power 6456 * fails. In the case of removable media, the start/stop 6457 * will fail if the media is not present. 6458 */ 6459 if ((!ISREMOVABLE(un)) && (pm_raise_power(SD_DEVINFO(un), 0, 6460 SD_SPINDLE_ON) == DDI_SUCCESS)) { 6461 mutex_enter(SD_MUTEX(un)); 6462 un->un_power_level = SD_SPINDLE_ON; 6463 mutex_enter(&un->un_pm_mutex); 6464 /* Set to on and not busy. */ 6465 un->un_pm_count = 0; 6466 } else { 6467 mutex_enter(SD_MUTEX(un)); 6468 un->un_power_level = SD_SPINDLE_OFF; 6469 mutex_enter(&un->un_pm_mutex); 6470 /* Set to off. */ 6471 un->un_pm_count = -1; 6472 } 6473 mutex_exit(&un->un_pm_mutex); 6474 mutex_exit(SD_MUTEX(un)); 6475 } else { 6476 un->un_power_level = SD_SPINDLE_ON; 6477 un->un_f_pm_is_enabled = FALSE; 6478 } 6479 } 6480 6481 6482 /* 6483 * Function: sd_ddi_suspend 6484 * 6485 * Description: Performs system power-down operations. This includes 6486 * setting the drive state to indicate its suspended so 6487 * that no new commands will be accepted. Also, wait for 6488 * all commands that are in transport or queued to a timer 6489 * for retry to complete. All timeout threads are cancelled. 6490 * 6491 * Return Code: DDI_FAILURE or DDI_SUCCESS 6492 * 6493 * Context: Kernel thread context 6494 */ 6495 6496 static int 6497 sd_ddi_suspend(dev_info_t *devi) 6498 { 6499 struct sd_lun *un; 6500 clock_t wait_cmds_complete; 6501 6502 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6503 if (un == NULL) { 6504 return (DDI_FAILURE); 6505 } 6506 6507 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6508 6509 mutex_enter(SD_MUTEX(un)); 6510 6511 /* Return success if the device is already suspended. */ 6512 if (un->un_state == SD_STATE_SUSPENDED) { 6513 mutex_exit(SD_MUTEX(un)); 6514 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6515 "device already suspended, exiting\n"); 6516 return (DDI_SUCCESS); 6517 } 6518 6519 /* Return failure if the device is being used by HA */ 6520 if (un->un_resvd_status & 6521 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6522 mutex_exit(SD_MUTEX(un)); 6523 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6524 "device in use by HA, exiting\n"); 6525 return (DDI_FAILURE); 6526 } 6527 6528 /* 6529 * Return failure if the device is in a resource wait 6530 * or power changing state. 6531 */ 6532 if ((un->un_state == SD_STATE_RWAIT) || 6533 (un->un_state == SD_STATE_PM_CHANGING)) { 6534 mutex_exit(SD_MUTEX(un)); 6535 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6536 "device in resource wait state, exiting\n"); 6537 return (DDI_FAILURE); 6538 } 6539 6540 6541 un->un_save_state = un->un_last_state; 6542 New_state(un, SD_STATE_SUSPENDED); 6543 6544 /* 6545 * Wait for all commands that are in transport or queued to a timer 6546 * for retry to complete. 6547 * 6548 * While waiting, no new commands will be accepted or sent because of 6549 * the new state we set above. 6550 * 6551 * Wait till current operation has completed. If we are in the resource 6552 * wait state (with an intr outstanding) then we need to wait till the 6553 * intr completes and starts the next cmd. We want to wait for 6554 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6555 */ 6556 wait_cmds_complete = ddi_get_lbolt() + 6557 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6558 6559 while (un->un_ncmds_in_transport != 0) { 6560 /* 6561 * Fail if commands do not finish in the specified time. 6562 */ 6563 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6564 wait_cmds_complete) == -1) { 6565 /* 6566 * Undo the state changes made above. Everything 6567 * must go back to it's original value. 6568 */ 6569 Restore_state(un); 6570 un->un_last_state = un->un_save_state; 6571 /* Wake up any threads that might be waiting. */ 6572 cv_broadcast(&un->un_suspend_cv); 6573 mutex_exit(SD_MUTEX(un)); 6574 SD_ERROR(SD_LOG_IO_PM, un, 6575 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6576 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6577 return (DDI_FAILURE); 6578 } 6579 } 6580 6581 /* 6582 * Cancel SCSI watch thread and timeouts, if any are active 6583 */ 6584 6585 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6586 opaque_t temp_token = un->un_swr_token; 6587 mutex_exit(SD_MUTEX(un)); 6588 scsi_watch_suspend(temp_token); 6589 mutex_enter(SD_MUTEX(un)); 6590 } 6591 6592 if (un->un_reset_throttle_timeid != NULL) { 6593 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6594 un->un_reset_throttle_timeid = NULL; 6595 mutex_exit(SD_MUTEX(un)); 6596 (void) untimeout(temp_id); 6597 mutex_enter(SD_MUTEX(un)); 6598 } 6599 6600 if (un->un_dcvb_timeid != NULL) { 6601 timeout_id_t temp_id = un->un_dcvb_timeid; 6602 un->un_dcvb_timeid = NULL; 6603 mutex_exit(SD_MUTEX(un)); 6604 (void) untimeout(temp_id); 6605 mutex_enter(SD_MUTEX(un)); 6606 } 6607 6608 mutex_enter(&un->un_pm_mutex); 6609 if (un->un_pm_timeid != NULL) { 6610 timeout_id_t temp_id = un->un_pm_timeid; 6611 un->un_pm_timeid = NULL; 6612 mutex_exit(&un->un_pm_mutex); 6613 mutex_exit(SD_MUTEX(un)); 6614 (void) untimeout(temp_id); 6615 mutex_enter(SD_MUTEX(un)); 6616 } else { 6617 mutex_exit(&un->un_pm_mutex); 6618 } 6619 6620 if (un->un_retry_timeid != NULL) { 6621 timeout_id_t temp_id = un->un_retry_timeid; 6622 un->un_retry_timeid = NULL; 6623 mutex_exit(SD_MUTEX(un)); 6624 (void) untimeout(temp_id); 6625 mutex_enter(SD_MUTEX(un)); 6626 } 6627 6628 if (un->un_direct_priority_timeid != NULL) { 6629 timeout_id_t temp_id = un->un_direct_priority_timeid; 6630 un->un_direct_priority_timeid = NULL; 6631 mutex_exit(SD_MUTEX(un)); 6632 (void) untimeout(temp_id); 6633 mutex_enter(SD_MUTEX(un)); 6634 } 6635 6636 if (un->un_f_is_fibre == TRUE) { 6637 /* 6638 * Remove callbacks for insert and remove events 6639 */ 6640 if (un->un_insert_event != NULL) { 6641 mutex_exit(SD_MUTEX(un)); 6642 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6643 mutex_enter(SD_MUTEX(un)); 6644 un->un_insert_event = NULL; 6645 } 6646 6647 if (un->un_remove_event != NULL) { 6648 mutex_exit(SD_MUTEX(un)); 6649 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6650 mutex_enter(SD_MUTEX(un)); 6651 un->un_remove_event = NULL; 6652 } 6653 } 6654 6655 mutex_exit(SD_MUTEX(un)); 6656 6657 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6658 6659 return (DDI_SUCCESS); 6660 } 6661 6662 6663 /* 6664 * Function: sd_ddi_pm_suspend 6665 * 6666 * Description: Set the drive state to low power. 6667 * Someone else is required to actually change the drive 6668 * power level. 6669 * 6670 * Arguments: un - driver soft state (unit) structure 6671 * 6672 * Return Code: DDI_FAILURE or DDI_SUCCESS 6673 * 6674 * Context: Kernel thread context 6675 */ 6676 6677 static int 6678 sd_ddi_pm_suspend(struct sd_lun *un) 6679 { 6680 ASSERT(un != NULL); 6681 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6682 6683 ASSERT(!mutex_owned(SD_MUTEX(un))); 6684 mutex_enter(SD_MUTEX(un)); 6685 6686 /* 6687 * Exit if power management is not enabled for this device, or if 6688 * the device is being used by HA. 6689 */ 6690 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6691 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6692 mutex_exit(SD_MUTEX(un)); 6693 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6694 return (DDI_SUCCESS); 6695 } 6696 6697 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6698 un->un_ncmds_in_driver); 6699 6700 /* 6701 * See if the device is not busy, ie.: 6702 * - we have no commands in the driver for this device 6703 * - not waiting for resources 6704 */ 6705 if ((un->un_ncmds_in_driver == 0) && 6706 (un->un_state != SD_STATE_RWAIT)) { 6707 /* 6708 * The device is not busy, so it is OK to go to low power state. 6709 * Indicate low power, but rely on someone else to actually 6710 * change it. 6711 */ 6712 mutex_enter(&un->un_pm_mutex); 6713 un->un_pm_count = -1; 6714 mutex_exit(&un->un_pm_mutex); 6715 un->un_power_level = SD_SPINDLE_OFF; 6716 } 6717 6718 mutex_exit(SD_MUTEX(un)); 6719 6720 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6721 6722 return (DDI_SUCCESS); 6723 } 6724 6725 6726 /* 6727 * Function: sd_ddi_resume 6728 * 6729 * Description: Performs system power-up operations.. 6730 * 6731 * Return Code: DDI_SUCCESS 6732 * DDI_FAILURE 6733 * 6734 * Context: Kernel thread context 6735 */ 6736 6737 static int 6738 sd_ddi_resume(dev_info_t *devi) 6739 { 6740 struct sd_lun *un; 6741 6742 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6743 if (un == NULL) { 6744 return (DDI_FAILURE); 6745 } 6746 6747 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6748 6749 mutex_enter(SD_MUTEX(un)); 6750 Restore_state(un); 6751 6752 /* 6753 * Restore the state which was saved to give the 6754 * the right state in un_last_state 6755 */ 6756 un->un_last_state = un->un_save_state; 6757 /* 6758 * Note: throttle comes back at full. 6759 * Also note: this MUST be done before calling pm_raise_power 6760 * otherwise the system can get hung in biowait. The scenario where 6761 * this'll happen is under cpr suspend. Writing of the system 6762 * state goes through sddump, which writes 0 to un_throttle. If 6763 * writing the system state then fails, example if the partition is 6764 * too small, then cpr attempts a resume. If throttle isn't restored 6765 * from the saved value until after calling pm_raise_power then 6766 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6767 * in biowait. 6768 */ 6769 un->un_throttle = un->un_saved_throttle; 6770 6771 /* 6772 * The chance of failure is very rare as the only command done in power 6773 * entry point is START command when you transition from 0->1 or 6774 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6775 * which suspend was done. Ignore the return value as the resume should 6776 * not be failed. In the case of removable media the media need not be 6777 * inserted and hence there is a chance that raise power will fail with 6778 * media not present. 6779 */ 6780 if (!ISREMOVABLE(un)) { 6781 mutex_exit(SD_MUTEX(un)); 6782 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6783 mutex_enter(SD_MUTEX(un)); 6784 } 6785 6786 /* 6787 * Don't broadcast to the suspend cv and therefore possibly 6788 * start I/O until after power has been restored. 6789 */ 6790 cv_broadcast(&un->un_suspend_cv); 6791 cv_broadcast(&un->un_state_cv); 6792 6793 /* restart thread */ 6794 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6795 scsi_watch_resume(un->un_swr_token); 6796 } 6797 6798 #if (defined(__fibre)) 6799 if (un->un_f_is_fibre == TRUE) { 6800 /* 6801 * Add callbacks for insert and remove events 6802 */ 6803 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6804 sd_init_event_callbacks(un); 6805 } 6806 } 6807 #endif 6808 6809 /* 6810 * Transport any pending commands to the target. 6811 * 6812 * If this is a low-activity device commands in queue will have to wait 6813 * until new commands come in, which may take awhile. Also, we 6814 * specifically don't check un_ncmds_in_transport because we know that 6815 * there really are no commands in progress after the unit was 6816 * suspended and we could have reached the throttle level, been 6817 * suspended, and have no new commands coming in for awhile. Highly 6818 * unlikely, but so is the low-activity disk scenario. 6819 */ 6820 ddi_xbuf_dispatch(un->un_xbuf_attr); 6821 6822 sd_start_cmds(un, NULL); 6823 mutex_exit(SD_MUTEX(un)); 6824 6825 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6826 6827 return (DDI_SUCCESS); 6828 } 6829 6830 6831 /* 6832 * Function: sd_ddi_pm_resume 6833 * 6834 * Description: Set the drive state to powered on. 6835 * Someone else is required to actually change the drive 6836 * power level. 6837 * 6838 * Arguments: un - driver soft state (unit) structure 6839 * 6840 * Return Code: DDI_SUCCESS 6841 * 6842 * Context: Kernel thread context 6843 */ 6844 6845 static int 6846 sd_ddi_pm_resume(struct sd_lun *un) 6847 { 6848 ASSERT(un != NULL); 6849 6850 ASSERT(!mutex_owned(SD_MUTEX(un))); 6851 mutex_enter(SD_MUTEX(un)); 6852 un->un_power_level = SD_SPINDLE_ON; 6853 6854 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6855 mutex_enter(&un->un_pm_mutex); 6856 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6857 un->un_pm_count++; 6858 ASSERT(un->un_pm_count == 0); 6859 /* 6860 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6861 * un_suspend_cv is for a system resume, not a power management 6862 * device resume. (4297749) 6863 * cv_broadcast(&un->un_suspend_cv); 6864 */ 6865 } 6866 mutex_exit(&un->un_pm_mutex); 6867 mutex_exit(SD_MUTEX(un)); 6868 6869 return (DDI_SUCCESS); 6870 } 6871 6872 6873 /* 6874 * Function: sd_pm_idletimeout_handler 6875 * 6876 * Description: A timer routine that's active only while a device is busy. 6877 * The purpose is to extend slightly the pm framework's busy 6878 * view of the device to prevent busy/idle thrashing for 6879 * back-to-back commands. Do this by comparing the current time 6880 * to the time at which the last command completed and when the 6881 * difference is greater than sd_pm_idletime, call 6882 * pm_idle_component. In addition to indicating idle to the pm 6883 * framework, update the chain type to again use the internal pm 6884 * layers of the driver. 6885 * 6886 * Arguments: arg - driver soft state (unit) structure 6887 * 6888 * Context: Executes in a timeout(9F) thread context 6889 */ 6890 6891 static void 6892 sd_pm_idletimeout_handler(void *arg) 6893 { 6894 struct sd_lun *un = arg; 6895 6896 time_t now; 6897 6898 mutex_enter(&sd_detach_mutex); 6899 if (un->un_detach_count != 0) { 6900 /* Abort if the instance is detaching */ 6901 mutex_exit(&sd_detach_mutex); 6902 return; 6903 } 6904 mutex_exit(&sd_detach_mutex); 6905 6906 now = ddi_get_time(); 6907 /* 6908 * Grab both mutexes, in the proper order, since we're accessing 6909 * both PM and softstate variables. 6910 */ 6911 mutex_enter(SD_MUTEX(un)); 6912 mutex_enter(&un->un_pm_mutex); 6913 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6914 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6915 /* 6916 * Update the chain types. 6917 * This takes affect on the next new command received. 6918 */ 6919 if (ISREMOVABLE(un)) { 6920 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6921 } else { 6922 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6923 } 6924 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6925 6926 SD_TRACE(SD_LOG_IO_PM, un, 6927 "sd_pm_idletimeout_handler: idling device\n"); 6928 (void) pm_idle_component(SD_DEVINFO(un), 0); 6929 un->un_pm_idle_timeid = NULL; 6930 } else { 6931 un->un_pm_idle_timeid = 6932 timeout(sd_pm_idletimeout_handler, un, 6933 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6934 } 6935 mutex_exit(&un->un_pm_mutex); 6936 mutex_exit(SD_MUTEX(un)); 6937 } 6938 6939 6940 /* 6941 * Function: sd_pm_timeout_handler 6942 * 6943 * Description: Callback to tell framework we are idle. 6944 * 6945 * Context: timeout(9f) thread context. 6946 */ 6947 6948 static void 6949 sd_pm_timeout_handler(void *arg) 6950 { 6951 struct sd_lun *un = arg; 6952 6953 (void) pm_idle_component(SD_DEVINFO(un), 0); 6954 mutex_enter(&un->un_pm_mutex); 6955 un->un_pm_timeid = NULL; 6956 mutex_exit(&un->un_pm_mutex); 6957 } 6958 6959 6960 /* 6961 * Function: sdpower 6962 * 6963 * Description: PM entry point. 6964 * 6965 * Return Code: DDI_SUCCESS 6966 * DDI_FAILURE 6967 * 6968 * Context: Kernel thread context 6969 */ 6970 6971 static int 6972 sdpower(dev_info_t *devi, int component, int level) 6973 { 6974 struct sd_lun *un; 6975 int instance; 6976 int rval = DDI_SUCCESS; 6977 uint_t i, log_page_size, maxcycles, ncycles; 6978 uchar_t *log_page_data; 6979 int log_sense_page; 6980 int medium_present; 6981 time_t intvlp; 6982 dev_t dev; 6983 struct pm_trans_data sd_pm_tran_data; 6984 uchar_t save_state; 6985 int sval; 6986 uchar_t state_before_pm; 6987 int got_semaphore_here; 6988 6989 instance = ddi_get_instance(devi); 6990 6991 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 6992 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 6993 component != 0) { 6994 return (DDI_FAILURE); 6995 } 6996 6997 dev = sd_make_device(SD_DEVINFO(un)); 6998 6999 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 7000 7001 /* 7002 * Must synchronize power down with close. 7003 * Attempt to decrement/acquire the open/close semaphore, 7004 * but do NOT wait on it. If it's not greater than zero, 7005 * ie. it can't be decremented without waiting, then 7006 * someone else, either open or close, already has it 7007 * and the try returns 0. Use that knowledge here to determine 7008 * if it's OK to change the device power level. 7009 * Also, only increment it on exit if it was decremented, ie. gotten, 7010 * here. 7011 */ 7012 got_semaphore_here = sema_tryp(&un->un_semoclose); 7013 7014 mutex_enter(SD_MUTEX(un)); 7015 7016 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 7017 un->un_ncmds_in_driver); 7018 7019 /* 7020 * If un_ncmds_in_driver is non-zero it indicates commands are 7021 * already being processed in the driver, or if the semaphore was 7022 * not gotten here it indicates an open or close is being processed. 7023 * At the same time somebody is requesting to go low power which 7024 * can't happen, therefore we need to return failure. 7025 */ 7026 if ((level == SD_SPINDLE_OFF) && 7027 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 7028 mutex_exit(SD_MUTEX(un)); 7029 7030 if (got_semaphore_here != 0) { 7031 sema_v(&un->un_semoclose); 7032 } 7033 SD_TRACE(SD_LOG_IO_PM, un, 7034 "sdpower: exit, device has queued cmds.\n"); 7035 return (DDI_FAILURE); 7036 } 7037 7038 /* 7039 * if it is OFFLINE that means the disk is completely dead 7040 * in our case we have to put the disk in on or off by sending commands 7041 * Of course that will fail anyway so return back here. 7042 * 7043 * Power changes to a device that's OFFLINE or SUSPENDED 7044 * are not allowed. 7045 */ 7046 if ((un->un_state == SD_STATE_OFFLINE) || 7047 (un->un_state == SD_STATE_SUSPENDED)) { 7048 mutex_exit(SD_MUTEX(un)); 7049 7050 if (got_semaphore_here != 0) { 7051 sema_v(&un->un_semoclose); 7052 } 7053 SD_TRACE(SD_LOG_IO_PM, un, 7054 "sdpower: exit, device is off-line.\n"); 7055 return (DDI_FAILURE); 7056 } 7057 7058 /* 7059 * Change the device's state to indicate it's power level 7060 * is being changed. Do this to prevent a power off in the 7061 * middle of commands, which is especially bad on devices 7062 * that are really powered off instead of just spun down. 7063 */ 7064 state_before_pm = un->un_state; 7065 un->un_state = SD_STATE_PM_CHANGING; 7066 7067 mutex_exit(SD_MUTEX(un)); 7068 7069 /* 7070 * Bypass checking the log sense information for removables 7071 * and devices for which the HBA set the pm-capable property. 7072 * If un->un_pm_capable_prop is SD_PM_CAPABLE_UNDEFINED (-1) 7073 * then the HBA did not create the property. 7074 */ 7075 if ((level == SD_SPINDLE_OFF) && (!ISREMOVABLE(un)) && 7076 un->un_pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 7077 /* 7078 * Get the log sense information to understand whether the 7079 * the powercycle counts have gone beyond the threshhold. 7080 */ 7081 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 7082 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 7083 7084 mutex_enter(SD_MUTEX(un)); 7085 log_sense_page = un->un_start_stop_cycle_page; 7086 mutex_exit(SD_MUTEX(un)); 7087 7088 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 7089 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 7090 #ifdef SDDEBUG 7091 if (sd_force_pm_supported) { 7092 /* Force a successful result */ 7093 rval = 0; 7094 } 7095 #endif 7096 if (rval != 0) { 7097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 7098 "Log Sense Failed\n"); 7099 kmem_free(log_page_data, log_page_size); 7100 /* Cannot support power management on those drives */ 7101 7102 if (got_semaphore_here != 0) { 7103 sema_v(&un->un_semoclose); 7104 } 7105 /* 7106 * On exit put the state back to it's original value 7107 * and broadcast to anyone waiting for the power 7108 * change completion. 7109 */ 7110 mutex_enter(SD_MUTEX(un)); 7111 un->un_state = state_before_pm; 7112 cv_broadcast(&un->un_suspend_cv); 7113 mutex_exit(SD_MUTEX(un)); 7114 SD_TRACE(SD_LOG_IO_PM, un, 7115 "sdpower: exit, Log Sense Failed.\n"); 7116 return (DDI_FAILURE); 7117 } 7118 7119 /* 7120 * From the page data - Convert the essential information to 7121 * pm_trans_data 7122 */ 7123 maxcycles = 7124 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 7125 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 7126 7127 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 7128 7129 ncycles = 7130 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 7131 (log_page_data[0x26] << 8) | log_page_data[0x27]; 7132 7133 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 7134 7135 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 7136 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 7137 log_page_data[8+i]; 7138 } 7139 7140 kmem_free(log_page_data, log_page_size); 7141 7142 /* 7143 * Call pm_trans_check routine to get the Ok from 7144 * the global policy 7145 */ 7146 7147 sd_pm_tran_data.format = DC_SCSI_FORMAT; 7148 sd_pm_tran_data.un.scsi_cycles.flag = 0; 7149 7150 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 7151 #ifdef SDDEBUG 7152 if (sd_force_pm_supported) { 7153 /* Force a successful result */ 7154 rval = 1; 7155 } 7156 #endif 7157 switch (rval) { 7158 case 0: 7159 /* 7160 * Not Ok to Power cycle or error in parameters passed 7161 * Would have given the advised time to consider power 7162 * cycle. Based on the new intvlp parameter we are 7163 * supposed to pretend we are busy so that pm framework 7164 * will never call our power entry point. Because of 7165 * that install a timeout handler and wait for the 7166 * recommended time to elapse so that power management 7167 * can be effective again. 7168 * 7169 * To effect this behavior, call pm_busy_component to 7170 * indicate to the framework this device is busy. 7171 * By not adjusting un_pm_count the rest of PM in 7172 * the driver will function normally, and independant 7173 * of this but because the framework is told the device 7174 * is busy it won't attempt powering down until it gets 7175 * a matching idle. The timeout handler sends this. 7176 * Note: sd_pm_entry can't be called here to do this 7177 * because sdpower may have been called as a result 7178 * of a call to pm_raise_power from within sd_pm_entry. 7179 * 7180 * If a timeout handler is already active then 7181 * don't install another. 7182 */ 7183 mutex_enter(&un->un_pm_mutex); 7184 if (un->un_pm_timeid == NULL) { 7185 un->un_pm_timeid = 7186 timeout(sd_pm_timeout_handler, 7187 un, intvlp * drv_usectohz(1000000)); 7188 mutex_exit(&un->un_pm_mutex); 7189 (void) pm_busy_component(SD_DEVINFO(un), 0); 7190 } else { 7191 mutex_exit(&un->un_pm_mutex); 7192 } 7193 if (got_semaphore_here != 0) { 7194 sema_v(&un->un_semoclose); 7195 } 7196 /* 7197 * On exit put the state back to it's original value 7198 * and broadcast to anyone waiting for the power 7199 * change completion. 7200 */ 7201 mutex_enter(SD_MUTEX(un)); 7202 un->un_state = state_before_pm; 7203 cv_broadcast(&un->un_suspend_cv); 7204 mutex_exit(SD_MUTEX(un)); 7205 7206 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 7207 "trans check Failed, not ok to power cycle.\n"); 7208 return (DDI_FAILURE); 7209 7210 case -1: 7211 if (got_semaphore_here != 0) { 7212 sema_v(&un->un_semoclose); 7213 } 7214 /* 7215 * On exit put the state back to it's original value 7216 * and broadcast to anyone waiting for the power 7217 * change completion. 7218 */ 7219 mutex_enter(SD_MUTEX(un)); 7220 un->un_state = state_before_pm; 7221 cv_broadcast(&un->un_suspend_cv); 7222 mutex_exit(SD_MUTEX(un)); 7223 SD_TRACE(SD_LOG_IO_PM, un, 7224 "sdpower: exit, trans check command Failed.\n"); 7225 return (DDI_FAILURE); 7226 } 7227 } 7228 7229 if (level == SD_SPINDLE_OFF) { 7230 /* 7231 * Save the last state... if the STOP FAILS we need it 7232 * for restoring 7233 */ 7234 mutex_enter(SD_MUTEX(un)); 7235 save_state = un->un_last_state; 7236 /* 7237 * There must not be any cmds. getting processed 7238 * in the driver when we get here. Power to the 7239 * device is potentially going off. 7240 */ 7241 ASSERT(un->un_ncmds_in_driver == 0); 7242 mutex_exit(SD_MUTEX(un)); 7243 7244 /* 7245 * For now suspend the device completely before spindle is 7246 * turned off 7247 */ 7248 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 7249 if (got_semaphore_here != 0) { 7250 sema_v(&un->un_semoclose); 7251 } 7252 /* 7253 * On exit put the state back to it's original value 7254 * and broadcast to anyone waiting for the power 7255 * change completion. 7256 */ 7257 mutex_enter(SD_MUTEX(un)); 7258 un->un_state = state_before_pm; 7259 cv_broadcast(&un->un_suspend_cv); 7260 mutex_exit(SD_MUTEX(un)); 7261 SD_TRACE(SD_LOG_IO_PM, un, 7262 "sdpower: exit, PM suspend Failed.\n"); 7263 return (DDI_FAILURE); 7264 } 7265 } 7266 7267 /* 7268 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7269 * close, or strategy. Dump no long uses this routine, it uses it's 7270 * own code so it can be done in polled mode. 7271 */ 7272 7273 medium_present = TRUE; 7274 7275 /* 7276 * When powering up, issue a TUR in case the device is at unit 7277 * attention. Don't do retries. Bypass the PM layer, otherwise 7278 * a deadlock on un_pm_busy_cv will occur. 7279 */ 7280 if (level == SD_SPINDLE_ON) { 7281 (void) sd_send_scsi_TEST_UNIT_READY(un, 7282 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7283 } 7284 7285 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7286 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7287 7288 sval = sd_send_scsi_START_STOP_UNIT(un, 7289 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 7290 SD_PATH_DIRECT); 7291 /* Command failed, check for media present. */ 7292 if ((sval == ENXIO) && ISREMOVABLE(un)) { 7293 medium_present = FALSE; 7294 } 7295 7296 /* 7297 * The conditions of interest here are: 7298 * if a spindle off with media present fails, 7299 * then restore the state and return an error. 7300 * else if a spindle on fails, 7301 * then return an error (there's no state to restore). 7302 * In all other cases we setup for the new state 7303 * and return success. 7304 */ 7305 switch (level) { 7306 case SD_SPINDLE_OFF: 7307 if ((medium_present == TRUE) && (sval != 0)) { 7308 /* The stop command from above failed */ 7309 rval = DDI_FAILURE; 7310 /* 7311 * The stop command failed, and we have media 7312 * present. Put the level back by calling the 7313 * sd_pm_resume() and set the state back to 7314 * it's previous value. 7315 */ 7316 (void) sd_ddi_pm_resume(un); 7317 mutex_enter(SD_MUTEX(un)); 7318 un->un_last_state = save_state; 7319 mutex_exit(SD_MUTEX(un)); 7320 break; 7321 } 7322 /* 7323 * The stop command from above succeeded. 7324 */ 7325 if (ISREMOVABLE(un)) { 7326 /* 7327 * Terminate watch thread in case of removable media 7328 * devices going into low power state. This is as per 7329 * the requirements of pm framework, otherwise commands 7330 * will be generated for the device (through watch 7331 * thread), even when the device is in low power state. 7332 */ 7333 mutex_enter(SD_MUTEX(un)); 7334 un->un_f_watcht_stopped = FALSE; 7335 if (un->un_swr_token != NULL) { 7336 opaque_t temp_token = un->un_swr_token; 7337 un->un_f_watcht_stopped = TRUE; 7338 un->un_swr_token = NULL; 7339 mutex_exit(SD_MUTEX(un)); 7340 (void) scsi_watch_request_terminate(temp_token, 7341 SCSI_WATCH_TERMINATE_WAIT); 7342 } else { 7343 mutex_exit(SD_MUTEX(un)); 7344 } 7345 } 7346 break; 7347 7348 default: /* The level requested is spindle on... */ 7349 /* 7350 * Legacy behavior: return success on a failed spinup 7351 * if there is no media in the drive. 7352 * Do this by looking at medium_present here. 7353 */ 7354 if ((sval != 0) && medium_present) { 7355 /* The start command from above failed */ 7356 rval = DDI_FAILURE; 7357 break; 7358 } 7359 /* 7360 * The start command from above succeeded 7361 * Resume the devices now that we have 7362 * started the disks 7363 */ 7364 (void) sd_ddi_pm_resume(un); 7365 7366 /* 7367 * Resume the watch thread since it was suspended 7368 * when the device went into low power mode. 7369 */ 7370 if (ISREMOVABLE(un)) { 7371 mutex_enter(SD_MUTEX(un)); 7372 if (un->un_f_watcht_stopped == TRUE) { 7373 opaque_t temp_token; 7374 7375 un->un_f_watcht_stopped = FALSE; 7376 mutex_exit(SD_MUTEX(un)); 7377 temp_token = scsi_watch_request_submit( 7378 SD_SCSI_DEVP(un), 7379 sd_check_media_time, 7380 SENSE_LENGTH, sd_media_watch_cb, 7381 (caddr_t)dev); 7382 mutex_enter(SD_MUTEX(un)); 7383 un->un_swr_token = temp_token; 7384 } 7385 mutex_exit(SD_MUTEX(un)); 7386 } 7387 } 7388 if (got_semaphore_here != 0) { 7389 sema_v(&un->un_semoclose); 7390 } 7391 /* 7392 * On exit put the state back to it's original value 7393 * and broadcast to anyone waiting for the power 7394 * change completion. 7395 */ 7396 mutex_enter(SD_MUTEX(un)); 7397 un->un_state = state_before_pm; 7398 cv_broadcast(&un->un_suspend_cv); 7399 mutex_exit(SD_MUTEX(un)); 7400 7401 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7402 7403 return (rval); 7404 } 7405 7406 7407 7408 /* 7409 * Function: sdattach 7410 * 7411 * Description: Driver's attach(9e) entry point function. 7412 * 7413 * Arguments: devi - opaque device info handle 7414 * cmd - attach type 7415 * 7416 * Return Code: DDI_SUCCESS 7417 * DDI_FAILURE 7418 * 7419 * Context: Kernel thread context 7420 */ 7421 7422 static int 7423 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7424 { 7425 switch (cmd) { 7426 case DDI_ATTACH: 7427 return (sd_unit_attach(devi)); 7428 case DDI_RESUME: 7429 return (sd_ddi_resume(devi)); 7430 default: 7431 break; 7432 } 7433 return (DDI_FAILURE); 7434 } 7435 7436 7437 /* 7438 * Function: sddetach 7439 * 7440 * Description: Driver's detach(9E) entry point function. 7441 * 7442 * Arguments: devi - opaque device info handle 7443 * cmd - detach type 7444 * 7445 * Return Code: DDI_SUCCESS 7446 * DDI_FAILURE 7447 * 7448 * Context: Kernel thread context 7449 */ 7450 7451 static int 7452 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7453 { 7454 switch (cmd) { 7455 case DDI_DETACH: 7456 return (sd_unit_detach(devi)); 7457 case DDI_SUSPEND: 7458 return (sd_ddi_suspend(devi)); 7459 default: 7460 break; 7461 } 7462 return (DDI_FAILURE); 7463 } 7464 7465 7466 /* 7467 * Function: sd_sync_with_callback 7468 * 7469 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7470 * state while the callback routine is active. 7471 * 7472 * Arguments: un: softstate structure for the instance 7473 * 7474 * Context: Kernel thread context 7475 */ 7476 7477 static void 7478 sd_sync_with_callback(struct sd_lun *un) 7479 { 7480 ASSERT(un != NULL); 7481 7482 mutex_enter(SD_MUTEX(un)); 7483 7484 ASSERT(un->un_in_callback >= 0); 7485 7486 while (un->un_in_callback > 0) { 7487 mutex_exit(SD_MUTEX(un)); 7488 delay(2); 7489 mutex_enter(SD_MUTEX(un)); 7490 } 7491 7492 mutex_exit(SD_MUTEX(un)); 7493 } 7494 7495 /* 7496 * Function: sd_unit_attach 7497 * 7498 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7499 * the soft state structure for the device and performs 7500 * all necessary structure and device initializations. 7501 * 7502 * Arguments: devi: the system's dev_info_t for the device. 7503 * 7504 * Return Code: DDI_SUCCESS if attach is successful. 7505 * DDI_FAILURE if any part of the attach fails. 7506 * 7507 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7508 * Kernel thread context only. Can sleep. 7509 */ 7510 7511 static int 7512 sd_unit_attach(dev_info_t *devi) 7513 { 7514 struct scsi_device *devp; 7515 struct sd_lun *un; 7516 char *variantp; 7517 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7518 int instance; 7519 int rval; 7520 uint64_t capacity; 7521 uint_t lbasize; 7522 7523 /* 7524 * Retrieve the target driver's private data area. This was set 7525 * up by the HBA. 7526 */ 7527 devp = ddi_get_driver_private(devi); 7528 7529 /* 7530 * Since we have no idea what state things were left in by the last 7531 * user of the device, set up some 'default' settings, ie. turn 'em 7532 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7533 * Do this before the scsi_probe, which sends an inquiry. 7534 * This is a fix for bug (4430280). 7535 * Of special importance is wide-xfer. The drive could have been left 7536 * in wide transfer mode by the last driver to communicate with it, 7537 * this includes us. If that's the case, and if the following is not 7538 * setup properly or we don't re-negotiate with the drive prior to 7539 * transferring data to/from the drive, it causes bus parity errors, 7540 * data overruns, and unexpected interrupts. This first occurred when 7541 * the fix for bug (4378686) was made. 7542 */ 7543 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7544 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7545 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7546 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7547 7548 /* 7549 * Use scsi_probe() to issue an INQUIRY command to the device. 7550 * This call will allocate and fill in the scsi_inquiry structure 7551 * and point the sd_inq member of the scsi_device structure to it. 7552 * If the attach succeeds, then this memory will not be de-allocated 7553 * (via scsi_unprobe()) until the instance is detached. 7554 */ 7555 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7556 goto probe_failed; 7557 } 7558 7559 /* 7560 * Check the device type as specified in the inquiry data and 7561 * claim it if it is of a type that we support. 7562 */ 7563 switch (devp->sd_inq->inq_dtype) { 7564 case DTYPE_DIRECT: 7565 break; 7566 case DTYPE_RODIRECT: 7567 break; 7568 case DTYPE_OPTICAL: 7569 break; 7570 case DTYPE_NOTPRESENT: 7571 default: 7572 /* Unsupported device type; fail the attach. */ 7573 goto probe_failed; 7574 } 7575 7576 /* 7577 * Allocate the soft state structure for this unit. 7578 * 7579 * We rely upon this memory being set to all zeroes by 7580 * ddi_soft_state_zalloc(). We assume that any member of the 7581 * soft state structure that is not explicitly initialized by 7582 * this routine will have a value of zero. 7583 */ 7584 instance = ddi_get_instance(devp->sd_dev); 7585 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7586 goto probe_failed; 7587 } 7588 7589 /* 7590 * Retrieve a pointer to the newly-allocated soft state. 7591 * 7592 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7593 * was successful, unless something has gone horribly wrong and the 7594 * ddi's soft state internals are corrupt (in which case it is 7595 * probably better to halt here than just fail the attach....) 7596 */ 7597 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7598 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7599 instance); 7600 /*NOTREACHED*/ 7601 } 7602 7603 /* 7604 * Link the back ptr of the driver soft state to the scsi_device 7605 * struct for this lun. 7606 * Save a pointer to the softstate in the driver-private area of 7607 * the scsi_device struct. 7608 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7609 * we first set un->un_sd below. 7610 */ 7611 un->un_sd = devp; 7612 devp->sd_private = (opaque_t)un; 7613 7614 /* 7615 * The following must be after devp is stored in the soft state struct. 7616 */ 7617 #ifdef SDDEBUG 7618 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7619 "%s_unit_attach: un:0x%p instance:%d\n", 7620 ddi_driver_name(devi), un, instance); 7621 #endif 7622 7623 /* 7624 * Set up the device type and node type (for the minor nodes). 7625 * By default we assume that the device can at least support the 7626 * Common Command Set. Call it a CD-ROM if it reports itself 7627 * as a RODIRECT device. 7628 */ 7629 switch (devp->sd_inq->inq_dtype) { 7630 case DTYPE_RODIRECT: 7631 un->un_node_type = DDI_NT_CD_CHAN; 7632 un->un_ctype = CTYPE_CDROM; 7633 break; 7634 case DTYPE_OPTICAL: 7635 un->un_node_type = DDI_NT_BLOCK_CHAN; 7636 un->un_ctype = CTYPE_ROD; 7637 break; 7638 default: 7639 un->un_node_type = DDI_NT_BLOCK_CHAN; 7640 un->un_ctype = CTYPE_CCS; 7641 break; 7642 } 7643 7644 /* 7645 * Try to read the interconnect type from the HBA. 7646 * 7647 * Note: This driver is currently compiled as two binaries, a parallel 7648 * scsi version (sd) and a fibre channel version (ssd). All functional 7649 * differences are determined at compile time. In the future a single 7650 * binary will be provided and the inteconnect type will be used to 7651 * differentiate between fibre and parallel scsi behaviors. At that time 7652 * it will be necessary for all fibre channel HBAs to support this 7653 * property. 7654 * 7655 * set un_f_is_fiber to TRUE ( default fiber ) 7656 */ 7657 un->un_f_is_fibre = TRUE; 7658 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7659 case INTERCONNECT_SSA: 7660 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7661 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7662 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7663 break; 7664 case INTERCONNECT_PARALLEL: 7665 un->un_f_is_fibre = FALSE; 7666 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7667 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7668 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7669 break; 7670 case INTERCONNECT_FIBRE: 7671 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7672 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7673 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7674 break; 7675 case INTERCONNECT_FABRIC: 7676 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7677 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7678 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7679 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7680 break; 7681 default: 7682 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7683 /* 7684 * The HBA does not support the "interconnect-type" property 7685 * (or did not provide a recognized type). 7686 * 7687 * Note: This will be obsoleted when a single fibre channel 7688 * and parallel scsi driver is delivered. In the meantime the 7689 * interconnect type will be set to the platform default.If that 7690 * type is not parallel SCSI, it means that we should be 7691 * assuming "ssd" semantics. However, here this also means that 7692 * the FC HBA is not supporting the "interconnect-type" property 7693 * like we expect it to, so log this occurrence. 7694 */ 7695 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7696 if (!SD_IS_PARALLEL_SCSI(un)) { 7697 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7698 "sd_unit_attach: un:0x%p Assuming " 7699 "INTERCONNECT_FIBRE\n", un); 7700 } else { 7701 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7702 "sd_unit_attach: un:0x%p Assuming " 7703 "INTERCONNECT_PARALLEL\n", un); 7704 un->un_f_is_fibre = FALSE; 7705 } 7706 #else 7707 /* 7708 * Note: This source will be implemented when a single fibre 7709 * channel and parallel scsi driver is delivered. The default 7710 * will be to assume that if a device does not support the 7711 * "interconnect-type" property it is a parallel SCSI HBA and 7712 * we will set the interconnect type for parallel scsi. 7713 */ 7714 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7715 un->un_f_is_fibre = FALSE; 7716 #endif 7717 break; 7718 } 7719 7720 if (un->un_f_is_fibre == TRUE) { 7721 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7722 SCSI_VERSION_3) { 7723 switch (un->un_interconnect_type) { 7724 case SD_INTERCONNECT_FIBRE: 7725 case SD_INTERCONNECT_SSA: 7726 un->un_node_type = DDI_NT_BLOCK_WWN; 7727 break; 7728 default: 7729 break; 7730 } 7731 } 7732 } 7733 7734 /* 7735 * Initialize the Request Sense command for the target 7736 */ 7737 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7738 goto alloc_rqs_failed; 7739 } 7740 7741 /* 7742 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7743 * with seperate binary for sd and ssd. 7744 * 7745 * x86 has 1 binary, un_retry_count is set base on connection type. 7746 * The hardcoded values will go away when Sparc uses 1 binary 7747 * for sd and ssd. This hardcoded values need to match 7748 * SD_RETRY_COUNT in sddef.h 7749 * The value used is base on interconnect type. 7750 * fibre = 3, parallel = 5 7751 */ 7752 #if defined(__i386) || defined(__amd64) 7753 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7754 #else 7755 un->un_retry_count = SD_RETRY_COUNT; 7756 #endif 7757 7758 /* 7759 * Set the per disk retry count to the default number of retries 7760 * for disks and CDROMs. This value can be overridden by the 7761 * disk property list or an entry in sd.conf. 7762 */ 7763 un->un_notready_retry_count = 7764 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7765 : DISK_NOT_READY_RETRY_COUNT(un); 7766 7767 /* 7768 * Set the busy retry count to the default value of un_retry_count. 7769 * This can be overridden by entries in sd.conf or the device 7770 * config table. 7771 */ 7772 un->un_busy_retry_count = un->un_retry_count; 7773 7774 /* 7775 * Init the reset threshold for retries. This number determines 7776 * how many retries must be performed before a reset can be issued 7777 * (for certain error conditions). This can be overridden by entries 7778 * in sd.conf or the device config table. 7779 */ 7780 un->un_reset_retry_count = (un->un_retry_count / 2); 7781 7782 /* 7783 * Set the victim_retry_count to the default un_retry_count 7784 */ 7785 un->un_victim_retry_count = (2 * un->un_retry_count); 7786 7787 /* 7788 * Set the reservation release timeout to the default value of 7789 * 5 seconds. This can be overridden by entries in ssd.conf or the 7790 * device config table. 7791 */ 7792 un->un_reserve_release_time = 5; 7793 7794 /* 7795 * Set up the default maximum transfer size. Note that this may 7796 * get updated later in the attach, when setting up default wide 7797 * operations for disks. 7798 */ 7799 #if defined(__i386) || defined(__amd64) 7800 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7801 #else 7802 un->un_max_xfer_size = (uint_t)maxphys; 7803 #endif 7804 7805 /* 7806 * Get "allow bus device reset" property (defaults to "enabled" if 7807 * the property was not defined). This is to disable bus resets for 7808 * certain kinds of error recovery. Note: In the future when a run-time 7809 * fibre check is available the soft state flag should default to 7810 * enabled. 7811 */ 7812 if (un->un_f_is_fibre == TRUE) { 7813 un->un_f_allow_bus_device_reset = TRUE; 7814 } else { 7815 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7816 "allow-bus-device-reset", 1) != 0) { 7817 un->un_f_allow_bus_device_reset = TRUE; 7818 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7819 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 7820 un); 7821 } else { 7822 un->un_f_allow_bus_device_reset = FALSE; 7823 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7824 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 7825 un); 7826 } 7827 } 7828 7829 /* 7830 * Check if this is an ATAPI device. ATAPI devices use Group 1 7831 * Read/Write commands and Group 2 Mode Sense/Select commands. 7832 * 7833 * Note: The "obsolete" way of doing this is to check for the "atapi" 7834 * property. The new "variant" property with a value of "atapi" has been 7835 * introduced so that future 'variants' of standard SCSI behavior (like 7836 * atapi) could be specified by the underlying HBA drivers by supplying 7837 * a new value for the "variant" property, instead of having to define a 7838 * new property. 7839 */ 7840 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7841 un->un_f_cfg_is_atapi = TRUE; 7842 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7843 "sd_unit_attach: un:0x%p Atapi device\n", un); 7844 } 7845 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7846 &variantp) == DDI_PROP_SUCCESS) { 7847 if (strcmp(variantp, "atapi") == 0) { 7848 un->un_f_cfg_is_atapi = TRUE; 7849 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7850 "sd_unit_attach: un:0x%p Atapi device\n", un); 7851 } 7852 ddi_prop_free(variantp); 7853 } 7854 7855 /* 7856 * Assume doorlock commands are supported. If not, the first 7857 * call to sd_send_scsi_DOORLOCK() will set to FALSE 7858 */ 7859 un->un_f_doorlock_supported = TRUE; 7860 7861 un->un_cmd_timeout = SD_IO_TIME; 7862 7863 /* Info on current states, statuses, etc. (Updated frequently) */ 7864 un->un_state = SD_STATE_NORMAL; 7865 un->un_last_state = SD_STATE_NORMAL; 7866 7867 /* Control & status info for command throttling */ 7868 un->un_throttle = sd_max_throttle; 7869 un->un_saved_throttle = sd_max_throttle; 7870 un->un_min_throttle = sd_min_throttle; 7871 7872 if (un->un_f_is_fibre == TRUE) { 7873 un->un_f_use_adaptive_throttle = TRUE; 7874 } else { 7875 un->un_f_use_adaptive_throttle = FALSE; 7876 } 7877 7878 /* Removable media support. */ 7879 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7880 un->un_mediastate = DKIO_NONE; 7881 un->un_specified_mediastate = DKIO_NONE; 7882 7883 /* CVs for suspend/resume (PM or DR) */ 7884 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7885 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7886 7887 /* Power management support. */ 7888 un->un_power_level = SD_SPINDLE_UNINIT; 7889 7890 /* 7891 * The open/close semaphore is used to serialize threads executing 7892 * in the driver's open & close entry point routines for a given 7893 * instance. 7894 */ 7895 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7896 7897 /* 7898 * The conf file entry and softstate variable is a forceful override, 7899 * meaning a non-zero value must be entered to change the default. 7900 */ 7901 un->un_f_disksort_disabled = FALSE; 7902 7903 /* 7904 * Retrieve the properties from the static driver table or the driver 7905 * configuration file (.conf) for this unit and update the soft state 7906 * for the device as needed for the indicated properties. 7907 * Note: the property configuration needs to occur here as some of the 7908 * following routines may have dependancies on soft state flags set 7909 * as part of the driver property configuration. 7910 */ 7911 sd_read_unit_properties(un); 7912 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7913 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7914 7915 /* 7916 * By default, we mark the capacity, lbazize, and geometry 7917 * as invalid. Only if we successfully read a valid capacity 7918 * will we update the un_blockcount and un_tgt_blocksize with the 7919 * valid values (the geometry will be validated later). 7920 */ 7921 un->un_f_blockcount_is_valid = FALSE; 7922 un->un_f_tgt_blocksize_is_valid = FALSE; 7923 un->un_f_geometry_is_valid = FALSE; 7924 7925 /* 7926 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7927 * otherwise. 7928 */ 7929 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7930 un->un_blockcount = 0; 7931 7932 /* 7933 * Set up the per-instance info needed to determine the correct 7934 * CDBs and other info for issuing commands to the target. 7935 */ 7936 sd_init_cdb_limits(un); 7937 7938 /* 7939 * Set up the IO chains to use, based upon the target type. 7940 */ 7941 if (ISREMOVABLE(un)) { 7942 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7943 } else { 7944 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7945 } 7946 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7947 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7948 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7949 7950 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7951 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7952 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 7953 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 7954 7955 7956 if (ISCD(un)) { 7957 un->un_additional_codes = sd_additional_codes; 7958 } else { 7959 un->un_additional_codes = NULL; 7960 } 7961 7962 /* 7963 * Create the kstats here so they can be available for attach-time 7964 * routines that send commands to the unit (either polled or via 7965 * sd_send_scsi_cmd). 7966 * 7967 * Note: This is a critical sequence that needs to be maintained: 7968 * 1) Instantiate the kstats here, before any routines using the 7969 * iopath (i.e. sd_send_scsi_cmd). 7970 * 2) Initialize the error stats (sd_set_errstats) and partition 7971 * stats (sd_set_pstats), following sd_validate_geometry(), 7972 * sd_register_devid(), and sd_disable_caching(). 7973 */ 7974 7975 un->un_stats = kstat_create(sd_label, instance, 7976 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 7977 if (un->un_stats != NULL) { 7978 un->un_stats->ks_lock = SD_MUTEX(un); 7979 kstat_install(un->un_stats); 7980 } 7981 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7982 "sd_unit_attach: un:0x%p un_stats created\n", un); 7983 7984 sd_create_errstats(un, instance); 7985 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7986 "sd_unit_attach: un:0x%p errstats created\n", un); 7987 7988 /* 7989 * The following if/else code was relocated here from below as part 7990 * of the fix for bug (4430280). However with the default setup added 7991 * on entry to this routine, it's no longer absolutely necessary for 7992 * this to be before the call to sd_spin_up_unit. 7993 */ 7994 if (SD_IS_PARALLEL_SCSI(un)) { 7995 /* 7996 * If SCSI-2 tagged queueing is supported by the target 7997 * and by the host adapter then we will enable it. 7998 */ 7999 un->un_tagflags = 0; 8000 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8001 (devp->sd_inq->inq_cmdque) && 8002 (un->un_f_arq_enabled == TRUE)) { 8003 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 8004 1, 1) == 1) { 8005 un->un_tagflags = FLAG_STAG; 8006 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8007 "sd_unit_attach: un:0x%p tag queueing " 8008 "enabled\n", un); 8009 } else if (scsi_ifgetcap(SD_ADDRESS(un), 8010 "untagged-qing", 0) == 1) { 8011 un->un_f_opt_queueing = TRUE; 8012 un->un_saved_throttle = un->un_throttle = 8013 min(un->un_throttle, 3); 8014 } else { 8015 un->un_f_opt_queueing = FALSE; 8016 un->un_saved_throttle = un->un_throttle = 1; 8017 } 8018 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 8019 == 1) && (un->un_f_arq_enabled == TRUE)) { 8020 /* The Host Adapter supports internal queueing. */ 8021 un->un_f_opt_queueing = TRUE; 8022 un->un_saved_throttle = un->un_throttle = 8023 min(un->un_throttle, 3); 8024 } else { 8025 un->un_f_opt_queueing = FALSE; 8026 un->un_saved_throttle = un->un_throttle = 1; 8027 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8028 "sd_unit_attach: un:0x%p no tag queueing\n", un); 8029 } 8030 8031 8032 /* Setup or tear down default wide operations for disks */ 8033 8034 /* 8035 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 8036 * and "ssd_max_xfer_size" to exist simultaneously on the same 8037 * system and be set to different values. In the future this 8038 * code may need to be updated when the ssd module is 8039 * obsoleted and removed from the system. (4299588) 8040 */ 8041 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8042 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 8043 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8044 1, 1) == 1) { 8045 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8046 "sd_unit_attach: un:0x%p Wide Transfer " 8047 "enabled\n", un); 8048 } 8049 8050 /* 8051 * If tagged queuing has also been enabled, then 8052 * enable large xfers 8053 */ 8054 if (un->un_saved_throttle == sd_max_throttle) { 8055 un->un_max_xfer_size = 8056 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8057 sd_max_xfer_size, SD_MAX_XFER_SIZE); 8058 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8059 "sd_unit_attach: un:0x%p max transfer " 8060 "size=0x%x\n", un, un->un_max_xfer_size); 8061 } 8062 } else { 8063 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8064 0, 1) == 1) { 8065 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8066 "sd_unit_attach: un:0x%p " 8067 "Wide Transfer disabled\n", un); 8068 } 8069 } 8070 } else { 8071 un->un_tagflags = FLAG_STAG; 8072 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 8073 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 8074 } 8075 8076 /* 8077 * If this target supports LUN reset, try to enable it. 8078 */ 8079 if (un->un_f_lun_reset_enabled) { 8080 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 8081 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8082 "un:0x%p lun_reset capability set\n", un); 8083 } else { 8084 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8085 "un:0x%p lun-reset capability not set\n", un); 8086 } 8087 } 8088 8089 /* 8090 * At this point in the attach, we have enough info in the 8091 * soft state to be able to issue commands to the target. 8092 * 8093 * All command paths used below MUST issue their commands as 8094 * SD_PATH_DIRECT. This is important as intermediate layers 8095 * are not all initialized yet (such as PM). 8096 */ 8097 8098 /* 8099 * Send a TEST UNIT READY command to the device. This should clear 8100 * any outstanding UNIT ATTENTION that may be present. 8101 * 8102 * Note: Don't check for success, just track if there is a reservation, 8103 * this is a throw away command to clear any unit attentions. 8104 * 8105 * Note: This MUST be the first command issued to the target during 8106 * attach to ensure power on UNIT ATTENTIONS are cleared. 8107 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8108 * with attempts at spinning up a device with no media. 8109 */ 8110 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 8111 reservation_flag = SD_TARGET_IS_RESERVED; 8112 } 8113 8114 /* 8115 * If the device is NOT a removable media device, attempt to spin 8116 * it up (using the START_STOP_UNIT command) and read its capacity 8117 * (using the READ CAPACITY command). Note, however, that either 8118 * of these could fail and in some cases we would continue with 8119 * the attach despite the failure (see below). 8120 */ 8121 if (devp->sd_inq->inq_dtype == DTYPE_DIRECT && !ISREMOVABLE(un)) { 8122 switch (sd_spin_up_unit(un)) { 8123 case 0: 8124 /* 8125 * Spin-up was successful; now try to read the 8126 * capacity. If successful then save the results 8127 * and mark the capacity & lbasize as valid. 8128 */ 8129 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8130 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8131 8132 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 8133 &lbasize, SD_PATH_DIRECT)) { 8134 case 0: { 8135 if (capacity > DK_MAX_BLOCKS) { 8136 #ifdef _LP64 8137 /* 8138 * Enable descriptor format sense data 8139 * so that we can get 64 bit sense 8140 * data fields. 8141 */ 8142 sd_enable_descr_sense(un); 8143 #else 8144 /* 32-bit kernels can't handle this */ 8145 scsi_log(SD_DEVINFO(un), 8146 sd_label, CE_WARN, 8147 "disk has %llu blocks, which " 8148 "is too large for a 32-bit " 8149 "kernel", capacity); 8150 goto spinup_failed; 8151 #endif 8152 } 8153 /* 8154 * The following relies on 8155 * sd_send_scsi_READ_CAPACITY never 8156 * returning 0 for capacity and/or lbasize. 8157 */ 8158 sd_update_block_info(un, lbasize, capacity); 8159 8160 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8161 "sd_unit_attach: un:0x%p capacity = %ld " 8162 "blocks; lbasize= %ld.\n", un, 8163 un->un_blockcount, un->un_tgt_blocksize); 8164 8165 break; 8166 } 8167 case EACCES: 8168 /* 8169 * Should never get here if the spin-up 8170 * succeeded, but code it in anyway. 8171 * From here, just continue with the attach... 8172 */ 8173 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8174 "sd_unit_attach: un:0x%p " 8175 "sd_send_scsi_READ_CAPACITY " 8176 "returned reservation conflict\n", un); 8177 reservation_flag = SD_TARGET_IS_RESERVED; 8178 break; 8179 default: 8180 /* 8181 * Likewise, should never get here if the 8182 * spin-up succeeded. Just continue with 8183 * the attach... 8184 */ 8185 break; 8186 } 8187 break; 8188 case EACCES: 8189 /* 8190 * Device is reserved by another host. In this case 8191 * we could not spin it up or read the capacity, but 8192 * we continue with the attach anyway. 8193 */ 8194 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8195 "sd_unit_attach: un:0x%p spin-up reservation " 8196 "conflict.\n", un); 8197 reservation_flag = SD_TARGET_IS_RESERVED; 8198 break; 8199 default: 8200 /* Fail the attach if the spin-up failed. */ 8201 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8202 "sd_unit_attach: un:0x%p spin-up failed.", un); 8203 goto spinup_failed; 8204 } 8205 } 8206 8207 /* 8208 * Check to see if this is a MMC drive 8209 */ 8210 if (ISCD(un)) { 8211 sd_set_mmc_caps(un); 8212 } 8213 8214 /* 8215 * Create the minor nodes for the device. 8216 * Note: If we want to support fdisk on both sparc and intel, this will 8217 * have to separate out the notion that VTOC8 is always sparc, and 8218 * VTOC16 is always intel (tho these can be the defaults). The vtoc 8219 * type will have to be determined at run-time, and the fdisk 8220 * partitioning will have to have been read & set up before we 8221 * create the minor nodes. (any other inits (such as kstats) that 8222 * also ought to be done before creating the minor nodes?) (Doesn't 8223 * setting up the minor nodes kind of imply that we're ready to 8224 * handle an open from userland?) 8225 */ 8226 if (sd_create_minor_nodes(un, devi) != DDI_SUCCESS) { 8227 goto create_minor_nodes_failed; 8228 } 8229 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8230 "sd_unit_attach: un:0x%p minor nodes created\n", un); 8231 8232 /* 8233 * Add a zero-length attribute to tell the world we support 8234 * kernel ioctls (for layered drivers) 8235 */ 8236 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8237 DDI_KERNEL_IOCTL, NULL, 0); 8238 8239 /* 8240 * Add a boolean property to tell the world we support 8241 * the B_FAILFAST flag (for layered drivers) 8242 */ 8243 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8244 "ddi-failfast-supported", NULL, 0); 8245 8246 /* 8247 * Initialize power management 8248 */ 8249 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8250 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8251 sd_setup_pm(un, devi); 8252 if (un->un_f_pm_is_enabled == FALSE) { 8253 /* 8254 * For performance, point to a jump table that does 8255 * not include pm. 8256 * The direct and priority chains don't change with PM. 8257 * 8258 * Note: this is currently done based on individual device 8259 * capabilities. When an interface for determining system 8260 * power enabled state becomes available, or when additional 8261 * layers are added to the command chain, these values will 8262 * have to be re-evaluated for correctness. 8263 */ 8264 if (ISREMOVABLE(un)) { 8265 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8266 } else { 8267 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8268 } 8269 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8270 } 8271 8272 /* 8273 * This property is set to 0 by HA software to avoid retries 8274 * on a reserved disk. (The preferred property name is 8275 * "retry-on-reservation-conflict") (1189689) 8276 * 8277 * Note: The use of a global here can have unintended consequences. A 8278 * per instance variable is preferrable to match the capabilities of 8279 * different underlying hba's (4402600) 8280 */ 8281 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8282 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8283 sd_retry_on_reservation_conflict); 8284 if (sd_retry_on_reservation_conflict != 0) { 8285 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8286 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8287 sd_retry_on_reservation_conflict); 8288 } 8289 8290 /* Set up options for QFULL handling. */ 8291 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8292 "qfull-retries", -1)) != -1) { 8293 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8294 rval, 1); 8295 } 8296 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8297 "qfull-retry-interval", -1)) != -1) { 8298 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8299 rval, 1); 8300 } 8301 8302 /* 8303 * This just prints a message that announces the existence of the 8304 * device. The message is always printed in the system logfile, but 8305 * only appears on the console if the system is booted with the 8306 * -v (verbose) argument. 8307 */ 8308 ddi_report_dev(devi); 8309 8310 /* 8311 * The framework calls driver attach routines single-threaded 8312 * for a given instance. However we still acquire SD_MUTEX here 8313 * because this required for calling the sd_validate_geometry() 8314 * and sd_register_devid() functions. 8315 */ 8316 mutex_enter(SD_MUTEX(un)); 8317 un->un_f_geometry_is_valid = FALSE; 8318 un->un_mediastate = DKIO_NONE; 8319 un->un_reserved = -1; 8320 if (!ISREMOVABLE(un)) { 8321 /* 8322 * Read and validate the device's geometry (ie, disk label) 8323 * A new unformatted drive will not have a valid geometry, but 8324 * the driver needs to successfully attach to this device so 8325 * the drive can be formatted via ioctls. 8326 */ 8327 if (((sd_validate_geometry(un, SD_PATH_DIRECT) == 8328 ENOTSUP)) && 8329 (un->un_blockcount < DK_MAX_BLOCKS)) { 8330 /* 8331 * We found a small disk with an EFI label on it; 8332 * we need to fix up the minor nodes accordingly. 8333 */ 8334 ddi_remove_minor_node(devi, "h"); 8335 ddi_remove_minor_node(devi, "h,raw"); 8336 (void) ddi_create_minor_node(devi, "wd", 8337 S_IFBLK, 8338 (instance << SDUNIT_SHIFT) | WD_NODE, 8339 un->un_node_type, NULL); 8340 (void) ddi_create_minor_node(devi, "wd,raw", 8341 S_IFCHR, 8342 (instance << SDUNIT_SHIFT) | WD_NODE, 8343 un->un_node_type, NULL); 8344 } 8345 } 8346 8347 /* 8348 * Read and initialize the devid for the unit. 8349 */ 8350 ASSERT(un->un_errstats != NULL); 8351 if (!ISREMOVABLE(un)) { 8352 sd_register_devid(un, devi, reservation_flag); 8353 } 8354 mutex_exit(SD_MUTEX(un)); 8355 8356 #if (defined(__fibre)) 8357 /* 8358 * Register callbacks for fibre only. You can't do this soley 8359 * on the basis of the devid_type because this is hba specific. 8360 * We need to query our hba capabilities to find out whether to 8361 * register or not. 8362 */ 8363 if (un->un_f_is_fibre) { 8364 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8365 sd_init_event_callbacks(un); 8366 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8367 "sd_unit_attach: un:0x%p event callbacks inserted", un); 8368 } 8369 } 8370 #endif 8371 8372 if (un->un_f_opt_disable_cache == TRUE) { 8373 if (sd_disable_caching(un) != 0) { 8374 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8375 "sd_unit_attach: un:0x%p Could not disable " 8376 "caching", un); 8377 goto devid_failed; 8378 } 8379 } 8380 8381 /* 8382 * Set the pstat and error stat values here, so data obtained during the 8383 * previous attach-time routines is available. 8384 * 8385 * Note: This is a critical sequence that needs to be maintained: 8386 * 1) Instantiate the kstats before any routines using the iopath 8387 * (i.e. sd_send_scsi_cmd). 8388 * 2) Initialize the error stats (sd_set_errstats) and partition 8389 * stats (sd_set_pstats)here, following sd_validate_geometry(), 8390 * sd_register_devid(), and sd_disable_caching(). 8391 */ 8392 if (!ISREMOVABLE(un) && (un->un_f_pkstats_enabled == TRUE)) { 8393 sd_set_pstats(un); 8394 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8395 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8396 } 8397 8398 sd_set_errstats(un); 8399 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8400 "sd_unit_attach: un:0x%p errstats set\n", un); 8401 8402 /* 8403 * Find out what type of reservation this disk supports. 8404 */ 8405 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 8406 case 0: 8407 /* 8408 * SCSI-3 reservations are supported. 8409 */ 8410 un->un_reservation_type = SD_SCSI3_RESERVATION; 8411 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8412 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8413 break; 8414 case ENOTSUP: 8415 /* 8416 * The PERSISTENT RESERVE IN command would not be recognized by 8417 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8418 */ 8419 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8420 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8421 un->un_reservation_type = SD_SCSI2_RESERVATION; 8422 break; 8423 default: 8424 /* 8425 * default to SCSI-3 reservations 8426 */ 8427 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8428 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8429 un->un_reservation_type = SD_SCSI3_RESERVATION; 8430 break; 8431 } 8432 8433 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8434 "sd_unit_attach: un:0x%p exit success\n", un); 8435 8436 return (DDI_SUCCESS); 8437 8438 /* 8439 * An error occurred during the attach; clean up & return failure. 8440 */ 8441 8442 devid_failed: 8443 8444 setup_pm_failed: 8445 ddi_remove_minor_node(devi, NULL); 8446 8447 create_minor_nodes_failed: 8448 /* 8449 * Cleanup from the scsi_ifsetcap() calls (437868) 8450 */ 8451 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8452 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8453 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8454 8455 if (un->un_f_is_fibre == FALSE) { 8456 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8457 } 8458 8459 spinup_failed: 8460 8461 mutex_enter(SD_MUTEX(un)); 8462 8463 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8464 if (un->un_direct_priority_timeid != NULL) { 8465 timeout_id_t temp_id = un->un_direct_priority_timeid; 8466 un->un_direct_priority_timeid = NULL; 8467 mutex_exit(SD_MUTEX(un)); 8468 (void) untimeout(temp_id); 8469 mutex_enter(SD_MUTEX(un)); 8470 } 8471 8472 /* Cancel any pending start/stop timeouts */ 8473 if (un->un_startstop_timeid != NULL) { 8474 timeout_id_t temp_id = un->un_startstop_timeid; 8475 un->un_startstop_timeid = NULL; 8476 mutex_exit(SD_MUTEX(un)); 8477 (void) untimeout(temp_id); 8478 mutex_enter(SD_MUTEX(un)); 8479 } 8480 8481 mutex_exit(SD_MUTEX(un)); 8482 8483 /* There should not be any in-progress I/O so ASSERT this check */ 8484 ASSERT(un->un_ncmds_in_transport == 0); 8485 ASSERT(un->un_ncmds_in_driver == 0); 8486 8487 /* Do not free the softstate if the callback routine is active */ 8488 sd_sync_with_callback(un); 8489 8490 /* 8491 * Partition stats apparently are not used with removables. These would 8492 * not have been created during attach, so no need to clean them up... 8493 */ 8494 if (un->un_stats != NULL) { 8495 kstat_delete(un->un_stats); 8496 un->un_stats = NULL; 8497 } 8498 if (un->un_errstats != NULL) { 8499 kstat_delete(un->un_errstats); 8500 un->un_errstats = NULL; 8501 } 8502 8503 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8504 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8505 8506 ddi_prop_remove_all(devi); 8507 sema_destroy(&un->un_semoclose); 8508 cv_destroy(&un->un_state_cv); 8509 8510 getrbuf_failed: 8511 8512 sd_free_rqs(un); 8513 8514 alloc_rqs_failed: 8515 8516 devp->sd_private = NULL; 8517 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8518 8519 get_softstate_failed: 8520 /* 8521 * Note: the man pages are unclear as to whether or not doing a 8522 * ddi_soft_state_free(sd_state, instance) is the right way to 8523 * clean up after the ddi_soft_state_zalloc() if the subsequent 8524 * ddi_get_soft_state() fails. The implication seems to be 8525 * that the get_soft_state cannot fail if the zalloc succeeds. 8526 */ 8527 ddi_soft_state_free(sd_state, instance); 8528 8529 probe_failed: 8530 scsi_unprobe(devp); 8531 #ifdef SDDEBUG 8532 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 8533 (sd_level_mask & SD_LOGMASK_TRACE)) { 8534 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 8535 (void *)un); 8536 } 8537 #endif 8538 return (DDI_FAILURE); 8539 } 8540 8541 8542 /* 8543 * Function: sd_unit_detach 8544 * 8545 * Description: Performs DDI_DETACH processing for sddetach(). 8546 * 8547 * Return Code: DDI_SUCCESS 8548 * DDI_FAILURE 8549 * 8550 * Context: Kernel thread context 8551 */ 8552 8553 static int 8554 sd_unit_detach(dev_info_t *devi) 8555 { 8556 struct scsi_device *devp; 8557 struct sd_lun *un; 8558 int i; 8559 dev_t dev; 8560 #if !(defined(__i386) || defined(__amd64)) && !defined(__fibre) 8561 int reset_retval; 8562 #endif 8563 int instance = ddi_get_instance(devi); 8564 8565 mutex_enter(&sd_detach_mutex); 8566 8567 /* 8568 * Fail the detach for any of the following: 8569 * - Unable to get the sd_lun struct for the instance 8570 * - A layered driver has an outstanding open on the instance 8571 * - Another thread is already detaching this instance 8572 * - Another thread is currently performing an open 8573 */ 8574 devp = ddi_get_driver_private(devi); 8575 if ((devp == NULL) || 8576 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8577 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8578 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8579 mutex_exit(&sd_detach_mutex); 8580 return (DDI_FAILURE); 8581 } 8582 8583 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8584 8585 /* 8586 * Mark this instance as currently in a detach, to inhibit any 8587 * opens from a layered driver. 8588 */ 8589 un->un_detach_count++; 8590 mutex_exit(&sd_detach_mutex); 8591 8592 dev = sd_make_device(SD_DEVINFO(un)); 8593 8594 _NOTE(COMPETING_THREADS_NOW); 8595 8596 mutex_enter(SD_MUTEX(un)); 8597 8598 /* 8599 * Fail the detach if there are any outstanding layered 8600 * opens on this device. 8601 */ 8602 for (i = 0; i < NDKMAP; i++) { 8603 if (un->un_ocmap.lyropen[i] != 0) { 8604 goto err_notclosed; 8605 } 8606 } 8607 8608 /* 8609 * Verify there are NO outstanding commands issued to this device. 8610 * ie, un_ncmds_in_transport == 0. 8611 * It's possible to have outstanding commands through the physio 8612 * code path, even though everything's closed. 8613 */ 8614 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8615 (un->un_direct_priority_timeid != NULL) || 8616 (un->un_state == SD_STATE_RWAIT)) { 8617 mutex_exit(SD_MUTEX(un)); 8618 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8619 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8620 goto err_stillbusy; 8621 } 8622 8623 /* 8624 * If we have the device reserved, release the reservation. 8625 */ 8626 if ((un->un_resvd_status & SD_RESERVE) && 8627 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8628 mutex_exit(SD_MUTEX(un)); 8629 /* 8630 * Note: sd_reserve_release sends a command to the device 8631 * via the sd_ioctlcmd() path, and can sleep. 8632 */ 8633 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8634 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8635 "sd_dr_detach: Cannot release reservation \n"); 8636 } 8637 } else { 8638 mutex_exit(SD_MUTEX(un)); 8639 } 8640 8641 /* 8642 * Untimeout any reserve recover, throttle reset, restart unit 8643 * and delayed broadcast timeout threads. Protect the timeout pointer 8644 * from getting nulled by their callback functions. 8645 */ 8646 mutex_enter(SD_MUTEX(un)); 8647 if (un->un_resvd_timeid != NULL) { 8648 timeout_id_t temp_id = un->un_resvd_timeid; 8649 un->un_resvd_timeid = NULL; 8650 mutex_exit(SD_MUTEX(un)); 8651 (void) untimeout(temp_id); 8652 mutex_enter(SD_MUTEX(un)); 8653 } 8654 8655 if (un->un_reset_throttle_timeid != NULL) { 8656 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8657 un->un_reset_throttle_timeid = NULL; 8658 mutex_exit(SD_MUTEX(un)); 8659 (void) untimeout(temp_id); 8660 mutex_enter(SD_MUTEX(un)); 8661 } 8662 8663 if (un->un_startstop_timeid != NULL) { 8664 timeout_id_t temp_id = un->un_startstop_timeid; 8665 un->un_startstop_timeid = NULL; 8666 mutex_exit(SD_MUTEX(un)); 8667 (void) untimeout(temp_id); 8668 mutex_enter(SD_MUTEX(un)); 8669 } 8670 8671 if (un->un_dcvb_timeid != NULL) { 8672 timeout_id_t temp_id = un->un_dcvb_timeid; 8673 un->un_dcvb_timeid = NULL; 8674 mutex_exit(SD_MUTEX(un)); 8675 (void) untimeout(temp_id); 8676 } else { 8677 mutex_exit(SD_MUTEX(un)); 8678 } 8679 8680 /* Remove any pending reservation reclaim requests for this device */ 8681 sd_rmv_resv_reclaim_req(dev); 8682 8683 mutex_enter(SD_MUTEX(un)); 8684 8685 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8686 if (un->un_direct_priority_timeid != NULL) { 8687 timeout_id_t temp_id = un->un_direct_priority_timeid; 8688 un->un_direct_priority_timeid = NULL; 8689 mutex_exit(SD_MUTEX(un)); 8690 (void) untimeout(temp_id); 8691 mutex_enter(SD_MUTEX(un)); 8692 } 8693 8694 /* Cancel any active multi-host disk watch thread requests */ 8695 if (un->un_mhd_token != NULL) { 8696 mutex_exit(SD_MUTEX(un)); 8697 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8698 if (scsi_watch_request_terminate(un->un_mhd_token, 8699 SCSI_WATCH_TERMINATE_NOWAIT)) { 8700 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8701 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8702 /* 8703 * Note: We are returning here after having removed 8704 * some driver timeouts above. This is consistent with 8705 * the legacy implementation but perhaps the watch 8706 * terminate call should be made with the wait flag set. 8707 */ 8708 goto err_stillbusy; 8709 } 8710 mutex_enter(SD_MUTEX(un)); 8711 un->un_mhd_token = NULL; 8712 } 8713 8714 if (un->un_swr_token != NULL) { 8715 mutex_exit(SD_MUTEX(un)); 8716 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8717 if (scsi_watch_request_terminate(un->un_swr_token, 8718 SCSI_WATCH_TERMINATE_NOWAIT)) { 8719 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8720 "sd_dr_detach: Cannot cancel swr watch request\n"); 8721 /* 8722 * Note: We are returning here after having removed 8723 * some driver timeouts above. This is consistent with 8724 * the legacy implementation but perhaps the watch 8725 * terminate call should be made with the wait flag set. 8726 */ 8727 goto err_stillbusy; 8728 } 8729 mutex_enter(SD_MUTEX(un)); 8730 un->un_swr_token = NULL; 8731 } 8732 8733 mutex_exit(SD_MUTEX(un)); 8734 8735 /* 8736 * Clear any scsi_reset_notifies. We clear the reset notifies 8737 * if we have not registered one. 8738 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8739 */ 8740 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8741 sd_mhd_reset_notify_cb, (caddr_t)un); 8742 8743 8744 8745 #if defined(__i386) || defined(__amd64) 8746 /* 8747 * Gratuitous bus resets sometimes cause an otherwise 8748 * okay ATA/ATAPI bus to hang. This is due the lack of 8749 * a clear spec of how resets should be implemented by ATA 8750 * disk drives. 8751 */ 8752 #elif !defined(__fibre) /* "#else if" does NOT work! */ 8753 /* 8754 * Reset target/bus. 8755 * 8756 * Note: This is a legacy workaround for Elite III dual-port drives that 8757 * will not come online after an aborted detach and subsequent re-attach 8758 * It should be removed when the Elite III FW is fixed, or the drives 8759 * are no longer supported. 8760 */ 8761 if (un->un_f_cfg_is_atapi == FALSE) { 8762 reset_retval = 0; 8763 8764 /* If the device is in low power mode don't reset it */ 8765 8766 mutex_enter(&un->un_pm_mutex); 8767 if (!SD_DEVICE_IS_IN_LOW_POWER(un)) { 8768 /* 8769 * First try a LUN reset if we can, then move on to a 8770 * target reset if needed; swat the bus as a last 8771 * resort. 8772 */ 8773 mutex_exit(&un->un_pm_mutex); 8774 if (un->un_f_allow_bus_device_reset == TRUE) { 8775 if (un->un_f_lun_reset_enabled == TRUE) { 8776 reset_retval = 8777 scsi_reset(SD_ADDRESS(un), 8778 RESET_LUN); 8779 } 8780 if (reset_retval == 0) { 8781 reset_retval = 8782 scsi_reset(SD_ADDRESS(un), 8783 RESET_TARGET); 8784 } 8785 } 8786 if (reset_retval == 0) { 8787 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 8788 } 8789 } else { 8790 mutex_exit(&un->un_pm_mutex); 8791 } 8792 } 8793 #endif 8794 8795 /* 8796 * protect the timeout pointers from getting nulled by 8797 * their callback functions during the cancellation process. 8798 * In such a scenario untimeout can be invoked with a null value. 8799 */ 8800 _NOTE(NO_COMPETING_THREADS_NOW); 8801 8802 mutex_enter(&un->un_pm_mutex); 8803 if (un->un_pm_idle_timeid != NULL) { 8804 timeout_id_t temp_id = un->un_pm_idle_timeid; 8805 un->un_pm_idle_timeid = NULL; 8806 mutex_exit(&un->un_pm_mutex); 8807 8808 /* 8809 * Timeout is active; cancel it. 8810 * Note that it'll never be active on a device 8811 * that does not support PM therefore we don't 8812 * have to check before calling pm_idle_component. 8813 */ 8814 (void) untimeout(temp_id); 8815 (void) pm_idle_component(SD_DEVINFO(un), 0); 8816 mutex_enter(&un->un_pm_mutex); 8817 } 8818 8819 /* 8820 * Check whether there is already a timeout scheduled for power 8821 * management. If yes then don't lower the power here, that's. 8822 * the timeout handler's job. 8823 */ 8824 if (un->un_pm_timeid != NULL) { 8825 timeout_id_t temp_id = un->un_pm_timeid; 8826 un->un_pm_timeid = NULL; 8827 mutex_exit(&un->un_pm_mutex); 8828 /* 8829 * Timeout is active; cancel it. 8830 * Note that it'll never be active on a device 8831 * that does not support PM therefore we don't 8832 * have to check before calling pm_idle_component. 8833 */ 8834 (void) untimeout(temp_id); 8835 (void) pm_idle_component(SD_DEVINFO(un), 0); 8836 8837 } else { 8838 mutex_exit(&un->un_pm_mutex); 8839 if ((un->un_f_pm_is_enabled == TRUE) && 8840 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8841 DDI_SUCCESS)) { 8842 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8843 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8844 /* 8845 * Fix for bug: 4297749, item # 13 8846 * The above test now includes a check to see if PM is 8847 * supported by this device before call 8848 * pm_lower_power(). 8849 * Note, the following is not dead code. The call to 8850 * pm_lower_power above will generate a call back into 8851 * our sdpower routine which might result in a timeout 8852 * handler getting activated. Therefore the following 8853 * code is valid and necessary. 8854 */ 8855 mutex_enter(&un->un_pm_mutex); 8856 if (un->un_pm_timeid != NULL) { 8857 timeout_id_t temp_id = un->un_pm_timeid; 8858 un->un_pm_timeid = NULL; 8859 mutex_exit(&un->un_pm_mutex); 8860 (void) untimeout(temp_id); 8861 (void) pm_idle_component(SD_DEVINFO(un), 0); 8862 } else { 8863 mutex_exit(&un->un_pm_mutex); 8864 } 8865 } 8866 } 8867 8868 /* 8869 * Cleanup from the scsi_ifsetcap() calls (437868) 8870 * Relocated here from above to be after the call to 8871 * pm_lower_power, which was getting errors. 8872 */ 8873 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8874 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8875 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8876 8877 if (un->un_f_is_fibre == FALSE) { 8878 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8879 } 8880 8881 /* 8882 * Remove any event callbacks, fibre only 8883 */ 8884 if (un->un_f_is_fibre == TRUE) { 8885 if ((un->un_insert_event != NULL) && 8886 (ddi_remove_event_handler(un->un_insert_cb_id) != 8887 DDI_SUCCESS)) { 8888 /* 8889 * Note: We are returning here after having done 8890 * substantial cleanup above. This is consistent 8891 * with the legacy implementation but this may not 8892 * be the right thing to do. 8893 */ 8894 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8895 "sd_dr_detach: Cannot cancel insert event\n"); 8896 goto err_remove_event; 8897 } 8898 un->un_insert_event = NULL; 8899 8900 if ((un->un_remove_event != NULL) && 8901 (ddi_remove_event_handler(un->un_remove_cb_id) != 8902 DDI_SUCCESS)) { 8903 /* 8904 * Note: We are returning here after having done 8905 * substantial cleanup above. This is consistent 8906 * with the legacy implementation but this may not 8907 * be the right thing to do. 8908 */ 8909 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8910 "sd_dr_detach: Cannot cancel remove event\n"); 8911 goto err_remove_event; 8912 } 8913 un->un_remove_event = NULL; 8914 } 8915 8916 /* Do not free the softstate if the callback routine is active */ 8917 sd_sync_with_callback(un); 8918 8919 /* 8920 * Hold the detach mutex here, to make sure that no other threads ever 8921 * can access a (partially) freed soft state structure. 8922 */ 8923 mutex_enter(&sd_detach_mutex); 8924 8925 /* 8926 * Clean up the soft state struct. 8927 * Cleanup is done in reverse order of allocs/inits. 8928 * At this point there should be no competing threads anymore. 8929 */ 8930 8931 /* Unregister and free device id. */ 8932 ddi_devid_unregister(devi); 8933 if (un->un_devid) { 8934 ddi_devid_free(un->un_devid); 8935 un->un_devid = NULL; 8936 } 8937 8938 /* 8939 * Destroy wmap cache if it exists. 8940 */ 8941 if (un->un_wm_cache != NULL) { 8942 kmem_cache_destroy(un->un_wm_cache); 8943 un->un_wm_cache = NULL; 8944 } 8945 8946 /* Remove minor nodes */ 8947 ddi_remove_minor_node(devi, NULL); 8948 8949 /* 8950 * kstat cleanup is done in detach for all device types (4363169). 8951 * We do not want to fail detach if the device kstats are not deleted 8952 * since there is a confusion about the devo_refcnt for the device. 8953 * We just delete the kstats and let detach complete successfully. 8954 */ 8955 if (un->un_stats != NULL) { 8956 kstat_delete(un->un_stats); 8957 un->un_stats = NULL; 8958 } 8959 if (un->un_errstats != NULL) { 8960 kstat_delete(un->un_errstats); 8961 un->un_errstats = NULL; 8962 } 8963 8964 /* Remove partition stats (not created for removables) */ 8965 if (!ISREMOVABLE(un)) { 8966 for (i = 0; i < NSDMAP; i++) { 8967 if (un->un_pstats[i] != NULL) { 8968 kstat_delete(un->un_pstats[i]); 8969 un->un_pstats[i] = NULL; 8970 } 8971 } 8972 } 8973 8974 /* Remove xbuf registration */ 8975 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8976 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8977 8978 /* Remove driver properties */ 8979 ddi_prop_remove_all(devi); 8980 8981 mutex_destroy(&un->un_pm_mutex); 8982 cv_destroy(&un->un_pm_busy_cv); 8983 8984 /* Open/close semaphore */ 8985 sema_destroy(&un->un_semoclose); 8986 8987 /* Removable media condvar. */ 8988 cv_destroy(&un->un_state_cv); 8989 8990 /* Suspend/resume condvar. */ 8991 cv_destroy(&un->un_suspend_cv); 8992 cv_destroy(&un->un_disk_busy_cv); 8993 8994 sd_free_rqs(un); 8995 8996 /* Free up soft state */ 8997 devp->sd_private = NULL; 8998 bzero(un, sizeof (struct sd_lun)); 8999 ddi_soft_state_free(sd_state, instance); 9000 9001 mutex_exit(&sd_detach_mutex); 9002 9003 /* This frees up the INQUIRY data associated with the device. */ 9004 scsi_unprobe(devp); 9005 9006 return (DDI_SUCCESS); 9007 9008 err_notclosed: 9009 mutex_exit(SD_MUTEX(un)); 9010 9011 err_stillbusy: 9012 _NOTE(NO_COMPETING_THREADS_NOW); 9013 9014 err_remove_event: 9015 mutex_enter(&sd_detach_mutex); 9016 un->un_detach_count--; 9017 mutex_exit(&sd_detach_mutex); 9018 9019 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9020 return (DDI_FAILURE); 9021 } 9022 9023 9024 /* 9025 * Driver minor node structure and data table 9026 */ 9027 struct driver_minor_data { 9028 char *name; 9029 minor_t minor; 9030 int type; 9031 }; 9032 9033 static struct driver_minor_data sd_minor_data[] = { 9034 {"a", 0, S_IFBLK}, 9035 {"b", 1, S_IFBLK}, 9036 {"c", 2, S_IFBLK}, 9037 {"d", 3, S_IFBLK}, 9038 {"e", 4, S_IFBLK}, 9039 {"f", 5, S_IFBLK}, 9040 {"g", 6, S_IFBLK}, 9041 {"h", 7, S_IFBLK}, 9042 #if defined(_SUNOS_VTOC_16) 9043 {"i", 8, S_IFBLK}, 9044 {"j", 9, S_IFBLK}, 9045 {"k", 10, S_IFBLK}, 9046 {"l", 11, S_IFBLK}, 9047 {"m", 12, S_IFBLK}, 9048 {"n", 13, S_IFBLK}, 9049 {"o", 14, S_IFBLK}, 9050 {"p", 15, S_IFBLK}, 9051 #endif /* defined(_SUNOS_VTOC_16) */ 9052 #if defined(_FIRMWARE_NEEDS_FDISK) 9053 {"q", 16, S_IFBLK}, 9054 {"r", 17, S_IFBLK}, 9055 {"s", 18, S_IFBLK}, 9056 {"t", 19, S_IFBLK}, 9057 {"u", 20, S_IFBLK}, 9058 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9059 {"a,raw", 0, S_IFCHR}, 9060 {"b,raw", 1, S_IFCHR}, 9061 {"c,raw", 2, S_IFCHR}, 9062 {"d,raw", 3, S_IFCHR}, 9063 {"e,raw", 4, S_IFCHR}, 9064 {"f,raw", 5, S_IFCHR}, 9065 {"g,raw", 6, S_IFCHR}, 9066 {"h,raw", 7, S_IFCHR}, 9067 #if defined(_SUNOS_VTOC_16) 9068 {"i,raw", 8, S_IFCHR}, 9069 {"j,raw", 9, S_IFCHR}, 9070 {"k,raw", 10, S_IFCHR}, 9071 {"l,raw", 11, S_IFCHR}, 9072 {"m,raw", 12, S_IFCHR}, 9073 {"n,raw", 13, S_IFCHR}, 9074 {"o,raw", 14, S_IFCHR}, 9075 {"p,raw", 15, S_IFCHR}, 9076 #endif /* defined(_SUNOS_VTOC_16) */ 9077 #if defined(_FIRMWARE_NEEDS_FDISK) 9078 {"q,raw", 16, S_IFCHR}, 9079 {"r,raw", 17, S_IFCHR}, 9080 {"s,raw", 18, S_IFCHR}, 9081 {"t,raw", 19, S_IFCHR}, 9082 {"u,raw", 20, S_IFCHR}, 9083 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9084 {0} 9085 }; 9086 9087 static struct driver_minor_data sd_minor_data_efi[] = { 9088 {"a", 0, S_IFBLK}, 9089 {"b", 1, S_IFBLK}, 9090 {"c", 2, S_IFBLK}, 9091 {"d", 3, S_IFBLK}, 9092 {"e", 4, S_IFBLK}, 9093 {"f", 5, S_IFBLK}, 9094 {"g", 6, S_IFBLK}, 9095 {"wd", 7, S_IFBLK}, 9096 #if defined(_FIRMWARE_NEEDS_FDISK) 9097 {"q", 16, S_IFBLK}, 9098 {"r", 17, S_IFBLK}, 9099 {"s", 18, S_IFBLK}, 9100 {"t", 19, S_IFBLK}, 9101 {"u", 20, S_IFBLK}, 9102 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9103 {"a,raw", 0, S_IFCHR}, 9104 {"b,raw", 1, S_IFCHR}, 9105 {"c,raw", 2, S_IFCHR}, 9106 {"d,raw", 3, S_IFCHR}, 9107 {"e,raw", 4, S_IFCHR}, 9108 {"f,raw", 5, S_IFCHR}, 9109 {"g,raw", 6, S_IFCHR}, 9110 {"wd,raw", 7, S_IFCHR}, 9111 #if defined(_FIRMWARE_NEEDS_FDISK) 9112 {"q,raw", 16, S_IFCHR}, 9113 {"r,raw", 17, S_IFCHR}, 9114 {"s,raw", 18, S_IFCHR}, 9115 {"t,raw", 19, S_IFCHR}, 9116 {"u,raw", 20, S_IFCHR}, 9117 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9118 {0} 9119 }; 9120 9121 9122 /* 9123 * Function: sd_create_minor_nodes 9124 * 9125 * Description: Create the minor device nodes for the instance. 9126 * 9127 * Arguments: un - driver soft state (unit) structure 9128 * devi - pointer to device info structure 9129 * 9130 * Return Code: DDI_SUCCESS 9131 * DDI_FAILURE 9132 * 9133 * Context: Kernel thread context 9134 */ 9135 9136 static int 9137 sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi) 9138 { 9139 struct driver_minor_data *dmdp; 9140 struct scsi_device *devp; 9141 int instance; 9142 char name[48]; 9143 9144 ASSERT(un != NULL); 9145 devp = ddi_get_driver_private(devi); 9146 instance = ddi_get_instance(devp->sd_dev); 9147 9148 /* 9149 * Create all the minor nodes for this target. 9150 */ 9151 if (un->un_blockcount > DK_MAX_BLOCKS) 9152 dmdp = sd_minor_data_efi; 9153 else 9154 dmdp = sd_minor_data; 9155 while (dmdp->name != NULL) { 9156 9157 (void) sprintf(name, "%s", dmdp->name); 9158 9159 if (ddi_create_minor_node(devi, name, dmdp->type, 9160 (instance << SDUNIT_SHIFT) | dmdp->minor, 9161 un->un_node_type, NULL) == DDI_FAILURE) { 9162 /* 9163 * Clean up any nodes that may have been created, in 9164 * case this fails in the middle of the loop. 9165 */ 9166 ddi_remove_minor_node(devi, NULL); 9167 return (DDI_FAILURE); 9168 } 9169 dmdp++; 9170 } 9171 9172 return (DDI_SUCCESS); 9173 } 9174 9175 9176 /* 9177 * Function: sd_create_errstats 9178 * 9179 * Description: This routine instantiates the device error stats. 9180 * 9181 * Note: During attach the stats are instantiated first so they are 9182 * available for attach-time routines that utilize the driver 9183 * iopath to send commands to the device. The stats are initialized 9184 * separately so data obtained during some attach-time routines is 9185 * available. (4362483) 9186 * 9187 * Arguments: un - driver soft state (unit) structure 9188 * instance - driver instance 9189 * 9190 * Context: Kernel thread context 9191 */ 9192 9193 static void 9194 sd_create_errstats(struct sd_lun *un, int instance) 9195 { 9196 struct sd_errstats *stp; 9197 char kstatmodule_err[KSTAT_STRLEN]; 9198 char kstatname[KSTAT_STRLEN]; 9199 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9200 9201 ASSERT(un != NULL); 9202 9203 if (un->un_errstats != NULL) { 9204 return; 9205 } 9206 9207 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9208 "%serr", sd_label); 9209 (void) snprintf(kstatname, sizeof (kstatname), 9210 "%s%d,err", sd_label, instance); 9211 9212 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9213 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9214 9215 if (un->un_errstats == NULL) { 9216 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9217 "sd_create_errstats: Failed kstat_create\n"); 9218 return; 9219 } 9220 9221 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9222 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9223 KSTAT_DATA_UINT32); 9224 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9225 KSTAT_DATA_UINT32); 9226 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9227 KSTAT_DATA_UINT32); 9228 kstat_named_init(&stp->sd_vid, "Vendor", 9229 KSTAT_DATA_CHAR); 9230 kstat_named_init(&stp->sd_pid, "Product", 9231 KSTAT_DATA_CHAR); 9232 kstat_named_init(&stp->sd_revision, "Revision", 9233 KSTAT_DATA_CHAR); 9234 kstat_named_init(&stp->sd_serial, "Serial No", 9235 KSTAT_DATA_CHAR); 9236 kstat_named_init(&stp->sd_capacity, "Size", 9237 KSTAT_DATA_ULONGLONG); 9238 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9239 KSTAT_DATA_UINT32); 9240 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9241 KSTAT_DATA_UINT32); 9242 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9243 KSTAT_DATA_UINT32); 9244 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9245 KSTAT_DATA_UINT32); 9246 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9247 KSTAT_DATA_UINT32); 9248 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9249 KSTAT_DATA_UINT32); 9250 9251 un->un_errstats->ks_private = un; 9252 un->un_errstats->ks_update = nulldev; 9253 9254 kstat_install(un->un_errstats); 9255 } 9256 9257 9258 /* 9259 * Function: sd_set_errstats 9260 * 9261 * Description: This routine sets the value of the vendor id, product id, 9262 * revision, serial number, and capacity device error stats. 9263 * 9264 * Note: During attach the stats are instantiated first so they are 9265 * available for attach-time routines that utilize the driver 9266 * iopath to send commands to the device. The stats are initialized 9267 * separately so data obtained during some attach-time routines is 9268 * available. (4362483) 9269 * 9270 * Arguments: un - driver soft state (unit) structure 9271 * 9272 * Context: Kernel thread context 9273 */ 9274 9275 static void 9276 sd_set_errstats(struct sd_lun *un) 9277 { 9278 struct sd_errstats *stp; 9279 9280 ASSERT(un != NULL); 9281 ASSERT(un->un_errstats != NULL); 9282 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9283 ASSERT(stp != NULL); 9284 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9285 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9286 (void) strncpy(stp->sd_revision.value.c, 9287 un->un_sd->sd_inq->inq_revision, 4); 9288 9289 /* 9290 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9291 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9292 * (4376302)) 9293 */ 9294 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9295 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9296 sizeof (SD_INQUIRY(un)->inq_serial)); 9297 } 9298 9299 if (un->un_f_blockcount_is_valid != TRUE) { 9300 /* 9301 * Set capacity error stat to 0 for no media. This ensures 9302 * a valid capacity is displayed in response to 'iostat -E' 9303 * when no media is present in the device. 9304 */ 9305 stp->sd_capacity.value.ui64 = 0; 9306 } else { 9307 /* 9308 * Multiply un_blockcount by un->un_sys_blocksize to get 9309 * capacity. 9310 * 9311 * Note: for non-512 blocksize devices "un_blockcount" has been 9312 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9313 * (un_tgt_blocksize / un->un_sys_blocksize). 9314 */ 9315 stp->sd_capacity.value.ui64 = (uint64_t) 9316 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9317 } 9318 } 9319 9320 9321 /* 9322 * Function: sd_set_pstats 9323 * 9324 * Description: This routine instantiates and initializes the partition 9325 * stats for each partition with more than zero blocks. 9326 * (4363169) 9327 * 9328 * Arguments: un - driver soft state (unit) structure 9329 * 9330 * Context: Kernel thread context 9331 */ 9332 9333 static void 9334 sd_set_pstats(struct sd_lun *un) 9335 { 9336 char kstatname[KSTAT_STRLEN]; 9337 int instance; 9338 int i; 9339 9340 ASSERT(un != NULL); 9341 9342 instance = ddi_get_instance(SD_DEVINFO(un)); 9343 9344 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9345 for (i = 0; i < NSDMAP; i++) { 9346 if ((un->un_pstats[i] == NULL) && 9347 (un->un_map[i].dkl_nblk != 0)) { 9348 (void) snprintf(kstatname, sizeof (kstatname), 9349 "%s%d,%s", sd_label, instance, 9350 sd_minor_data[i].name); 9351 un->un_pstats[i] = kstat_create(sd_label, 9352 instance, kstatname, "partition", KSTAT_TYPE_IO, 9353 1, KSTAT_FLAG_PERSISTENT); 9354 if (un->un_pstats[i] != NULL) { 9355 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9356 kstat_install(un->un_pstats[i]); 9357 } 9358 } 9359 } 9360 } 9361 9362 9363 #if (defined(__fibre)) 9364 /* 9365 * Function: sd_init_event_callbacks 9366 * 9367 * Description: This routine initializes the insertion and removal event 9368 * callbacks. (fibre only) 9369 * 9370 * Arguments: un - driver soft state (unit) structure 9371 * 9372 * Context: Kernel thread context 9373 */ 9374 9375 static void 9376 sd_init_event_callbacks(struct sd_lun *un) 9377 { 9378 ASSERT(un != NULL); 9379 9380 if ((un->un_insert_event == NULL) && 9381 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9382 &un->un_insert_event) == DDI_SUCCESS)) { 9383 /* 9384 * Add the callback for an insertion event 9385 */ 9386 (void) ddi_add_event_handler(SD_DEVINFO(un), 9387 un->un_insert_event, sd_event_callback, (void *)un, 9388 &(un->un_insert_cb_id)); 9389 } 9390 9391 if ((un->un_remove_event == NULL) && 9392 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9393 &un->un_remove_event) == DDI_SUCCESS)) { 9394 /* 9395 * Add the callback for a removal event 9396 */ 9397 (void) ddi_add_event_handler(SD_DEVINFO(un), 9398 un->un_remove_event, sd_event_callback, (void *)un, 9399 &(un->un_remove_cb_id)); 9400 } 9401 } 9402 9403 9404 /* 9405 * Function: sd_event_callback 9406 * 9407 * Description: This routine handles insert/remove events (photon). The 9408 * state is changed to OFFLINE which can be used to supress 9409 * error msgs. (fibre only) 9410 * 9411 * Arguments: un - driver soft state (unit) structure 9412 * 9413 * Context: Callout thread context 9414 */ 9415 /* ARGSUSED */ 9416 static void 9417 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9418 void *bus_impldata) 9419 { 9420 struct sd_lun *un = (struct sd_lun *)arg; 9421 9422 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9423 if (event == un->un_insert_event) { 9424 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9425 mutex_enter(SD_MUTEX(un)); 9426 if (un->un_state == SD_STATE_OFFLINE) { 9427 if (un->un_last_state != SD_STATE_SUSPENDED) { 9428 un->un_state = un->un_last_state; 9429 } else { 9430 /* 9431 * We have gone through SUSPEND/RESUME while 9432 * we were offline. Restore the last state 9433 */ 9434 un->un_state = un->un_save_state; 9435 } 9436 } 9437 mutex_exit(SD_MUTEX(un)); 9438 9439 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9440 } else if (event == un->un_remove_event) { 9441 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9442 mutex_enter(SD_MUTEX(un)); 9443 /* 9444 * We need to handle an event callback that occurs during 9445 * the suspend operation, since we don't prevent it. 9446 */ 9447 if (un->un_state != SD_STATE_OFFLINE) { 9448 if (un->un_state != SD_STATE_SUSPENDED) { 9449 New_state(un, SD_STATE_OFFLINE); 9450 } else { 9451 un->un_last_state = SD_STATE_OFFLINE; 9452 } 9453 } 9454 mutex_exit(SD_MUTEX(un)); 9455 } else { 9456 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9457 "!Unknown event\n"); 9458 } 9459 9460 } 9461 #endif 9462 9463 9464 /* 9465 * Function: sd_disable_caching() 9466 * 9467 * Description: This routine is the driver entry point for disabling 9468 * read and write caching by modifying the WCE (write cache 9469 * enable) and RCD (read cache disable) bits of mode 9470 * page 8 (MODEPAGE_CACHING). 9471 * 9472 * Arguments: un - driver soft state (unit) structure 9473 * 9474 * Return Code: EIO 9475 * code returned by sd_send_scsi_MODE_SENSE and 9476 * sd_send_scsi_MODE_SELECT 9477 * 9478 * Context: Kernel Thread 9479 */ 9480 9481 static int 9482 sd_disable_caching(struct sd_lun *un) 9483 { 9484 struct mode_caching *mode_caching_page; 9485 uchar_t *header; 9486 size_t buflen; 9487 int hdrlen; 9488 int bd_len; 9489 int rval = 0; 9490 9491 ASSERT(un != NULL); 9492 9493 /* 9494 * Do a test unit ready, otherwise a mode sense may not work if this 9495 * is the first command sent to the device after boot. 9496 */ 9497 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9498 9499 if (un->un_f_cfg_is_atapi == TRUE) { 9500 hdrlen = MODE_HEADER_LENGTH_GRP2; 9501 } else { 9502 hdrlen = MODE_HEADER_LENGTH; 9503 } 9504 9505 /* 9506 * Allocate memory for the retrieved mode page and its headers. Set 9507 * a pointer to the page itself. 9508 */ 9509 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9510 header = kmem_zalloc(buflen, KM_SLEEP); 9511 9512 /* Get the information from the device. */ 9513 if (un->un_f_cfg_is_atapi == TRUE) { 9514 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9515 MODEPAGE_CACHING, SD_PATH_DIRECT); 9516 } else { 9517 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9518 MODEPAGE_CACHING, SD_PATH_DIRECT); 9519 } 9520 if (rval != 0) { 9521 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9522 "sd_disable_caching: Mode Sense Failed\n"); 9523 kmem_free(header, buflen); 9524 return (rval); 9525 } 9526 9527 /* 9528 * Determine size of Block Descriptors in order to locate 9529 * the mode page data. ATAPI devices return 0, SCSI devices 9530 * should return MODE_BLK_DESC_LENGTH. 9531 */ 9532 if (un->un_f_cfg_is_atapi == TRUE) { 9533 struct mode_header_grp2 *mhp; 9534 mhp = (struct mode_header_grp2 *)header; 9535 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9536 } else { 9537 bd_len = ((struct mode_header *)header)->bdesc_length; 9538 } 9539 9540 if (bd_len > MODE_BLK_DESC_LENGTH) { 9541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9542 "sd_disable_caching: Mode Sense returned invalid " 9543 "block descriptor length\n"); 9544 kmem_free(header, buflen); 9545 return (EIO); 9546 } 9547 9548 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9549 9550 /* Check the relevant bits on successful mode sense. */ 9551 if ((mode_caching_page->wce) || !(mode_caching_page->rcd)) { 9552 /* 9553 * Read or write caching is enabled. Disable both of them. 9554 */ 9555 mode_caching_page->wce = 0; 9556 mode_caching_page->rcd = 1; 9557 9558 /* Clear reserved bits before mode select. */ 9559 mode_caching_page->mode_page.ps = 0; 9560 9561 /* 9562 * Clear out mode header for mode select. 9563 * The rest of the retrieved page will be reused. 9564 */ 9565 bzero(header, hdrlen); 9566 9567 /* Change the cache page to disable all caching. */ 9568 if (un->un_f_cfg_is_atapi == TRUE) { 9569 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 9570 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9571 } else { 9572 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 9573 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9574 } 9575 } 9576 9577 kmem_free(header, buflen); 9578 return (rval); 9579 } 9580 9581 9582 /* 9583 * Function: sd_make_device 9584 * 9585 * Description: Utility routine to return the Solaris device number from 9586 * the data in the device's dev_info structure. 9587 * 9588 * Return Code: The Solaris device number 9589 * 9590 * Context: Any 9591 */ 9592 9593 static dev_t 9594 sd_make_device(dev_info_t *devi) 9595 { 9596 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9597 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9598 } 9599 9600 9601 /* 9602 * Function: sd_pm_entry 9603 * 9604 * Description: Called at the start of a new command to manage power 9605 * and busy status of a device. This includes determining whether 9606 * the current power state of the device is sufficient for 9607 * performing the command or whether it must be changed. 9608 * The PM framework is notified appropriately. 9609 * Only with a return status of DDI_SUCCESS will the 9610 * component be busy to the framework. 9611 * 9612 * All callers of sd_pm_entry must check the return status 9613 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9614 * of DDI_FAILURE indicates the device failed to power up. 9615 * In this case un_pm_count has been adjusted so the result 9616 * on exit is still powered down, ie. count is less than 0. 9617 * Calling sd_pm_exit with this count value hits an ASSERT. 9618 * 9619 * Return Code: DDI_SUCCESS or DDI_FAILURE 9620 * 9621 * Context: Kernel thread context. 9622 */ 9623 9624 static int 9625 sd_pm_entry(struct sd_lun *un) 9626 { 9627 int return_status = DDI_SUCCESS; 9628 9629 ASSERT(!mutex_owned(SD_MUTEX(un))); 9630 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9631 9632 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9633 9634 if (un->un_f_pm_is_enabled == FALSE) { 9635 SD_TRACE(SD_LOG_IO_PM, un, 9636 "sd_pm_entry: exiting, PM not enabled\n"); 9637 return (return_status); 9638 } 9639 9640 /* 9641 * Just increment a counter if PM is enabled. On the transition from 9642 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9643 * the count with each IO and mark the device as idle when the count 9644 * hits 0. 9645 * 9646 * If the count is less than 0 the device is powered down. If a powered 9647 * down device is successfully powered up then the count must be 9648 * incremented to reflect the power up. Note that it'll get incremented 9649 * a second time to become busy. 9650 * 9651 * Because the following has the potential to change the device state 9652 * and must release the un_pm_mutex to do so, only one thread can be 9653 * allowed through at a time. 9654 */ 9655 9656 mutex_enter(&un->un_pm_mutex); 9657 while (un->un_pm_busy == TRUE) { 9658 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9659 } 9660 un->un_pm_busy = TRUE; 9661 9662 if (un->un_pm_count < 1) { 9663 9664 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9665 9666 /* 9667 * Indicate we are now busy so the framework won't attempt to 9668 * power down the device. This call will only fail if either 9669 * we passed a bad component number or the device has no 9670 * components. Neither of these should ever happen. 9671 */ 9672 mutex_exit(&un->un_pm_mutex); 9673 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9674 ASSERT(return_status == DDI_SUCCESS); 9675 9676 mutex_enter(&un->un_pm_mutex); 9677 9678 if (un->un_pm_count < 0) { 9679 mutex_exit(&un->un_pm_mutex); 9680 9681 SD_TRACE(SD_LOG_IO_PM, un, 9682 "sd_pm_entry: power up component\n"); 9683 9684 /* 9685 * pm_raise_power will cause sdpower to be called 9686 * which brings the device power level to the 9687 * desired state, ON in this case. If successful, 9688 * un_pm_count and un_power_level will be updated 9689 * appropriately. 9690 */ 9691 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9692 SD_SPINDLE_ON); 9693 9694 mutex_enter(&un->un_pm_mutex); 9695 9696 if (return_status != DDI_SUCCESS) { 9697 /* 9698 * Power up failed. 9699 * Idle the device and adjust the count 9700 * so the result on exit is that we're 9701 * still powered down, ie. count is less than 0. 9702 */ 9703 SD_TRACE(SD_LOG_IO_PM, un, 9704 "sd_pm_entry: power up failed," 9705 " idle the component\n"); 9706 9707 (void) pm_idle_component(SD_DEVINFO(un), 0); 9708 un->un_pm_count--; 9709 } else { 9710 /* 9711 * Device is powered up, verify the 9712 * count is non-negative. 9713 * This is debug only. 9714 */ 9715 ASSERT(un->un_pm_count == 0); 9716 } 9717 } 9718 9719 if (return_status == DDI_SUCCESS) { 9720 /* 9721 * For performance, now that the device has been tagged 9722 * as busy, and it's known to be powered up, update the 9723 * chain types to use jump tables that do not include 9724 * pm. This significantly lowers the overhead and 9725 * therefore improves performance. 9726 */ 9727 9728 mutex_exit(&un->un_pm_mutex); 9729 mutex_enter(SD_MUTEX(un)); 9730 SD_TRACE(SD_LOG_IO_PM, un, 9731 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9732 un->un_uscsi_chain_type); 9733 9734 if (ISREMOVABLE(un)) { 9735 un->un_buf_chain_type = 9736 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9737 } else { 9738 un->un_buf_chain_type = 9739 SD_CHAIN_INFO_DISK_NO_PM; 9740 } 9741 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9742 9743 SD_TRACE(SD_LOG_IO_PM, un, 9744 " changed uscsi_chain_type to %d\n", 9745 un->un_uscsi_chain_type); 9746 mutex_exit(SD_MUTEX(un)); 9747 mutex_enter(&un->un_pm_mutex); 9748 9749 if (un->un_pm_idle_timeid == NULL) { 9750 /* 300 ms. */ 9751 un->un_pm_idle_timeid = 9752 timeout(sd_pm_idletimeout_handler, un, 9753 (drv_usectohz((clock_t)300000))); 9754 /* 9755 * Include an extra call to busy which keeps the 9756 * device busy with-respect-to the PM layer 9757 * until the timer fires, at which time it'll 9758 * get the extra idle call. 9759 */ 9760 (void) pm_busy_component(SD_DEVINFO(un), 0); 9761 } 9762 } 9763 } 9764 un->un_pm_busy = FALSE; 9765 /* Next... */ 9766 cv_signal(&un->un_pm_busy_cv); 9767 9768 un->un_pm_count++; 9769 9770 SD_TRACE(SD_LOG_IO_PM, un, 9771 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9772 9773 mutex_exit(&un->un_pm_mutex); 9774 9775 return (return_status); 9776 } 9777 9778 9779 /* 9780 * Function: sd_pm_exit 9781 * 9782 * Description: Called at the completion of a command to manage busy 9783 * status for the device. If the device becomes idle the 9784 * PM framework is notified. 9785 * 9786 * Context: Kernel thread context 9787 */ 9788 9789 static void 9790 sd_pm_exit(struct sd_lun *un) 9791 { 9792 ASSERT(!mutex_owned(SD_MUTEX(un))); 9793 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9794 9795 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9796 9797 /* 9798 * After attach the following flag is only read, so don't 9799 * take the penalty of acquiring a mutex for it. 9800 */ 9801 if (un->un_f_pm_is_enabled == TRUE) { 9802 9803 mutex_enter(&un->un_pm_mutex); 9804 un->un_pm_count--; 9805 9806 SD_TRACE(SD_LOG_IO_PM, un, 9807 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9808 9809 ASSERT(un->un_pm_count >= 0); 9810 if (un->un_pm_count == 0) { 9811 mutex_exit(&un->un_pm_mutex); 9812 9813 SD_TRACE(SD_LOG_IO_PM, un, 9814 "sd_pm_exit: idle component\n"); 9815 9816 (void) pm_idle_component(SD_DEVINFO(un), 0); 9817 9818 } else { 9819 mutex_exit(&un->un_pm_mutex); 9820 } 9821 } 9822 9823 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9824 } 9825 9826 9827 /* 9828 * Function: sdopen 9829 * 9830 * Description: Driver's open(9e) entry point function. 9831 * 9832 * Arguments: dev_i - pointer to device number 9833 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9834 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9835 * cred_p - user credential pointer 9836 * 9837 * Return Code: EINVAL 9838 * ENXIO 9839 * EIO 9840 * EROFS 9841 * EBUSY 9842 * 9843 * Context: Kernel thread context 9844 */ 9845 /* ARGSUSED */ 9846 static int 9847 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9848 { 9849 struct sd_lun *un; 9850 int nodelay; 9851 int part; 9852 int partmask; 9853 int instance; 9854 dev_t dev; 9855 int rval = EIO; 9856 9857 /* Validate the open type */ 9858 if (otyp >= OTYPCNT) { 9859 return (EINVAL); 9860 } 9861 9862 dev = *dev_p; 9863 instance = SDUNIT(dev); 9864 mutex_enter(&sd_detach_mutex); 9865 9866 /* 9867 * Fail the open if there is no softstate for the instance, or 9868 * if another thread somewhere is trying to detach the instance. 9869 */ 9870 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9871 (un->un_detach_count != 0)) { 9872 mutex_exit(&sd_detach_mutex); 9873 /* 9874 * The probe cache only needs to be cleared when open (9e) fails 9875 * with ENXIO (4238046). 9876 */ 9877 /* 9878 * un-conditionally clearing probe cache is ok with 9879 * separate sd/ssd binaries 9880 * x86 platform can be an issue with both parallel 9881 * and fibre in 1 binary 9882 */ 9883 sd_scsi_clear_probe_cache(); 9884 return (ENXIO); 9885 } 9886 9887 /* 9888 * The un_layer_count is to prevent another thread in specfs from 9889 * trying to detach the instance, which can happen when we are 9890 * called from a higher-layer driver instead of thru specfs. 9891 * This will not be needed when DDI provides a layered driver 9892 * interface that allows specfs to know that an instance is in 9893 * use by a layered driver & should not be detached. 9894 * 9895 * Note: the semantics for layered driver opens are exactly one 9896 * close for every open. 9897 */ 9898 if (otyp == OTYP_LYR) { 9899 un->un_layer_count++; 9900 } 9901 9902 /* 9903 * Keep a count of the current # of opens in progress. This is because 9904 * some layered drivers try to call us as a regular open. This can 9905 * cause problems that we cannot prevent, however by keeping this count 9906 * we can at least keep our open and detach routines from racing against 9907 * each other under such conditions. 9908 */ 9909 un->un_opens_in_progress++; 9910 mutex_exit(&sd_detach_mutex); 9911 9912 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9913 part = SDPART(dev); 9914 partmask = 1 << part; 9915 9916 /* 9917 * We use a semaphore here in order to serialize 9918 * open and close requests on the device. 9919 */ 9920 sema_p(&un->un_semoclose); 9921 9922 mutex_enter(SD_MUTEX(un)); 9923 9924 /* 9925 * All device accesses go thru sdstrategy() where we check 9926 * on suspend status but there could be a scsi_poll command, 9927 * which bypasses sdstrategy(), so we need to check pm 9928 * status. 9929 */ 9930 9931 if (!nodelay) { 9932 while ((un->un_state == SD_STATE_SUSPENDED) || 9933 (un->un_state == SD_STATE_PM_CHANGING)) { 9934 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9935 } 9936 9937 mutex_exit(SD_MUTEX(un)); 9938 if (sd_pm_entry(un) != DDI_SUCCESS) { 9939 rval = EIO; 9940 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9941 "sdopen: sd_pm_entry failed\n"); 9942 goto open_failed_with_pm; 9943 } 9944 mutex_enter(SD_MUTEX(un)); 9945 } 9946 9947 /* check for previous exclusive open */ 9948 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9949 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9950 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9951 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9952 9953 if (un->un_exclopen & (partmask)) { 9954 goto excl_open_fail; 9955 } 9956 9957 if (flag & FEXCL) { 9958 int i; 9959 if (un->un_ocmap.lyropen[part]) { 9960 goto excl_open_fail; 9961 } 9962 for (i = 0; i < (OTYPCNT - 1); i++) { 9963 if (un->un_ocmap.regopen[i] & (partmask)) { 9964 goto excl_open_fail; 9965 } 9966 } 9967 } 9968 9969 /* 9970 * Check the write permission if this is a removable media device, 9971 * NDELAY has not been set, and writable permission is requested. 9972 * 9973 * Note: If NDELAY was set and this is write-protected media the WRITE 9974 * attempt will fail with EIO as part of the I/O processing. This is a 9975 * more permissive implementation that allows the open to succeed and 9976 * WRITE attempts to fail when appropriate. 9977 */ 9978 if (ISREMOVABLE(un)) { 9979 if ((flag & FWRITE) && (!nodelay)) { 9980 mutex_exit(SD_MUTEX(un)); 9981 /* 9982 * Defer the check for write permission on writable 9983 * DVD drive till sdstrategy and will not fail open even 9984 * if FWRITE is set as the device can be writable 9985 * depending upon the media and the media can change 9986 * after the call to open(). 9987 */ 9988 if (un->un_f_dvdram_writable_device == FALSE) { 9989 if (ISCD(un) || sr_check_wp(dev)) { 9990 rval = EROFS; 9991 mutex_enter(SD_MUTEX(un)); 9992 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 9993 "write to cd or write protected media\n"); 9994 goto open_fail; 9995 } 9996 } 9997 mutex_enter(SD_MUTEX(un)); 9998 } 9999 } 10000 10001 /* 10002 * If opening in NDELAY/NONBLOCK mode, just return. 10003 * Check if disk is ready and has a valid geometry later. 10004 */ 10005 if (!nodelay) { 10006 mutex_exit(SD_MUTEX(un)); 10007 rval = sd_ready_and_valid(un); 10008 mutex_enter(SD_MUTEX(un)); 10009 /* 10010 * Fail if device is not ready or if the number of disk 10011 * blocks is zero or negative for non CD devices. 10012 */ 10013 if ((rval != SD_READY_VALID) || 10014 (!ISCD(un) && un->un_map[part].dkl_nblk <= 0)) { 10015 if (ISREMOVABLE(un)) { 10016 rval = ENXIO; 10017 } else { 10018 rval = EIO; 10019 } 10020 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10021 "device not ready or invalid disk block value\n"); 10022 goto open_fail; 10023 } 10024 #if defined(__i386) || defined(__amd64) 10025 } else { 10026 uchar_t *cp; 10027 /* 10028 * x86 requires special nodelay handling, so that p0 is 10029 * always defined and accessible. 10030 * Invalidate geometry only if device is not already open. 10031 */ 10032 cp = &un->un_ocmap.chkd[0]; 10033 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10034 if (*cp != (uchar_t)0) { 10035 break; 10036 } 10037 cp++; 10038 } 10039 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10040 un->un_f_geometry_is_valid = FALSE; 10041 } 10042 10043 #endif 10044 } 10045 10046 if (otyp == OTYP_LYR) { 10047 un->un_ocmap.lyropen[part]++; 10048 } else { 10049 un->un_ocmap.regopen[otyp] |= partmask; 10050 } 10051 10052 /* Set up open and exclusive open flags */ 10053 if (flag & FEXCL) { 10054 un->un_exclopen |= (partmask); 10055 } 10056 10057 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10058 "open of part %d type %d\n", part, otyp); 10059 10060 mutex_exit(SD_MUTEX(un)); 10061 if (!nodelay) { 10062 sd_pm_exit(un); 10063 } 10064 10065 sema_v(&un->un_semoclose); 10066 10067 mutex_enter(&sd_detach_mutex); 10068 un->un_opens_in_progress--; 10069 mutex_exit(&sd_detach_mutex); 10070 10071 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10072 return (DDI_SUCCESS); 10073 10074 excl_open_fail: 10075 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10076 rval = EBUSY; 10077 10078 open_fail: 10079 mutex_exit(SD_MUTEX(un)); 10080 10081 /* 10082 * On a failed open we must exit the pm management. 10083 */ 10084 if (!nodelay) { 10085 sd_pm_exit(un); 10086 } 10087 open_failed_with_pm: 10088 sema_v(&un->un_semoclose); 10089 10090 mutex_enter(&sd_detach_mutex); 10091 un->un_opens_in_progress--; 10092 if (otyp == OTYP_LYR) { 10093 un->un_layer_count--; 10094 } 10095 mutex_exit(&sd_detach_mutex); 10096 10097 return (rval); 10098 } 10099 10100 10101 /* 10102 * Function: sdclose 10103 * 10104 * Description: Driver's close(9e) entry point function. 10105 * 10106 * Arguments: dev - device number 10107 * flag - file status flag, informational only 10108 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10109 * cred_p - user credential pointer 10110 * 10111 * Return Code: ENXIO 10112 * 10113 * Context: Kernel thread context 10114 */ 10115 /* ARGSUSED */ 10116 static int 10117 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10118 { 10119 struct sd_lun *un; 10120 uchar_t *cp; 10121 int part; 10122 int nodelay; 10123 int rval = 0; 10124 10125 /* Validate the open type */ 10126 if (otyp >= OTYPCNT) { 10127 return (ENXIO); 10128 } 10129 10130 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10131 return (ENXIO); 10132 } 10133 10134 part = SDPART(dev); 10135 nodelay = flag & (FNDELAY | FNONBLOCK); 10136 10137 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10138 "sdclose: close of part %d type %d\n", part, otyp); 10139 10140 /* 10141 * We use a semaphore here in order to serialize 10142 * open and close requests on the device. 10143 */ 10144 sema_p(&un->un_semoclose); 10145 10146 mutex_enter(SD_MUTEX(un)); 10147 10148 /* Don't proceed if power is being changed. */ 10149 while (un->un_state == SD_STATE_PM_CHANGING) { 10150 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10151 } 10152 10153 if (un->un_exclopen & (1 << part)) { 10154 un->un_exclopen &= ~(1 << part); 10155 } 10156 10157 /* Update the open partition map */ 10158 if (otyp == OTYP_LYR) { 10159 un->un_ocmap.lyropen[part] -= 1; 10160 } else { 10161 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10162 } 10163 10164 cp = &un->un_ocmap.chkd[0]; 10165 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10166 if (*cp != NULL) { 10167 break; 10168 } 10169 cp++; 10170 } 10171 10172 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10173 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10174 10175 /* 10176 * We avoid persistance upon the last close, and set 10177 * the throttle back to the maximum. 10178 */ 10179 un->un_throttle = un->un_saved_throttle; 10180 10181 if (un->un_state == SD_STATE_OFFLINE) { 10182 if (un->un_f_is_fibre == FALSE) { 10183 scsi_log(SD_DEVINFO(un), sd_label, 10184 CE_WARN, "offline\n"); 10185 } 10186 un->un_f_geometry_is_valid = FALSE; 10187 10188 } else { 10189 /* 10190 * Flush any outstanding writes in NVRAM cache. 10191 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10192 * cmd, it may not work for non-Pluto devices. 10193 * SYNCHRONIZE CACHE is not required for removables, 10194 * except DVD-RAM drives. 10195 * 10196 * Also note: because SYNCHRONIZE CACHE is currently 10197 * the only command issued here that requires the 10198 * drive be powered up, only do the power up before 10199 * sending the Sync Cache command. If additional 10200 * commands are added which require a powered up 10201 * drive, the following sequence may have to change. 10202 * 10203 * And finally, note that parallel SCSI on SPARC 10204 * only issues a Sync Cache to DVD-RAM, a newly 10205 * supported device. 10206 */ 10207 #if defined(__i386) || defined(__amd64) 10208 if (!ISREMOVABLE(un) || 10209 un->un_f_dvdram_writable_device == TRUE) { 10210 #else 10211 if (un->un_f_dvdram_writable_device == TRUE) { 10212 #endif 10213 mutex_exit(SD_MUTEX(un)); 10214 if (sd_pm_entry(un) == DDI_SUCCESS) { 10215 if (sd_send_scsi_SYNCHRONIZE_CACHE(un) 10216 != 0) { 10217 rval = EIO; 10218 } 10219 sd_pm_exit(un); 10220 } else { 10221 rval = EIO; 10222 } 10223 mutex_enter(SD_MUTEX(un)); 10224 } 10225 10226 /* 10227 * For removable media devices, send an ALLOW MEDIA 10228 * REMOVAL command, but don't get upset if it fails. 10229 * Also invalidate the geometry. We need to raise 10230 * the power of the drive before we can call 10231 * sd_send_scsi_DOORLOCK() 10232 */ 10233 if (ISREMOVABLE(un)) { 10234 mutex_exit(SD_MUTEX(un)); 10235 if (sd_pm_entry(un) == DDI_SUCCESS) { 10236 rval = sd_send_scsi_DOORLOCK(un, 10237 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10238 10239 sd_pm_exit(un); 10240 if (ISCD(un) && (rval != 0) && 10241 (nodelay != 0)) { 10242 rval = ENXIO; 10243 } 10244 } else { 10245 rval = EIO; 10246 } 10247 mutex_enter(SD_MUTEX(un)); 10248 10249 sr_ejected(un); 10250 /* 10251 * Destroy the cache (if it exists) which was 10252 * allocated for the write maps since this is 10253 * the last close for this media. 10254 */ 10255 if (un->un_wm_cache) { 10256 /* 10257 * Check if there are pending commands. 10258 * and if there are give a warning and 10259 * do not destroy the cache. 10260 */ 10261 if (un->un_ncmds_in_driver > 0) { 10262 scsi_log(SD_DEVINFO(un), 10263 sd_label, CE_WARN, 10264 "Unable to clean up memory " 10265 "because of pending I/O\n"); 10266 } else { 10267 kmem_cache_destroy( 10268 un->un_wm_cache); 10269 un->un_wm_cache = NULL; 10270 } 10271 } 10272 } 10273 } 10274 } 10275 10276 mutex_exit(SD_MUTEX(un)); 10277 sema_v(&un->un_semoclose); 10278 10279 if (otyp == OTYP_LYR) { 10280 mutex_enter(&sd_detach_mutex); 10281 /* 10282 * The detach routine may run when the layer count 10283 * drops to zero. 10284 */ 10285 un->un_layer_count--; 10286 mutex_exit(&sd_detach_mutex); 10287 } 10288 10289 return (rval); 10290 } 10291 10292 10293 /* 10294 * Function: sd_ready_and_valid 10295 * 10296 * Description: Test if device is ready and has a valid geometry. 10297 * 10298 * Arguments: dev - device number 10299 * un - driver soft state (unit) structure 10300 * 10301 * Return Code: SD_READY_VALID ready and valid label 10302 * SD_READY_NOT_VALID ready, geom ops never applicable 10303 * SD_NOT_READY_VALID not ready, no label 10304 * 10305 * Context: Never called at interrupt context. 10306 */ 10307 10308 static int 10309 sd_ready_and_valid(struct sd_lun *un) 10310 { 10311 struct sd_errstats *stp; 10312 uint64_t capacity; 10313 uint_t lbasize; 10314 int rval = SD_READY_VALID; 10315 char name_str[48]; 10316 10317 ASSERT(un != NULL); 10318 ASSERT(!mutex_owned(SD_MUTEX(un))); 10319 10320 mutex_enter(SD_MUTEX(un)); 10321 if (ISREMOVABLE(un)) { 10322 mutex_exit(SD_MUTEX(un)); 10323 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 10324 rval = SD_NOT_READY_VALID; 10325 mutex_enter(SD_MUTEX(un)); 10326 goto done; 10327 } 10328 10329 mutex_enter(SD_MUTEX(un)); 10330 if ((un->un_f_geometry_is_valid == FALSE) || 10331 (un->un_f_blockcount_is_valid == FALSE) || 10332 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10333 10334 /* capacity has to be read every open. */ 10335 mutex_exit(SD_MUTEX(un)); 10336 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 10337 &lbasize, SD_PATH_DIRECT) != 0) { 10338 mutex_enter(SD_MUTEX(un)); 10339 un->un_f_geometry_is_valid = FALSE; 10340 rval = SD_NOT_READY_VALID; 10341 goto done; 10342 } else { 10343 mutex_enter(SD_MUTEX(un)); 10344 sd_update_block_info(un, lbasize, capacity); 10345 } 10346 } 10347 10348 /* 10349 * If this is a non 512 block device, allocate space for 10350 * the wmap cache. This is being done here since every time 10351 * a media is changed this routine will be called and the 10352 * block size is a function of media rather than device. 10353 */ 10354 if (NOT_DEVBSIZE(un)) { 10355 if (!(un->un_wm_cache)) { 10356 (void) snprintf(name_str, sizeof (name_str), 10357 "%s%d_cache", 10358 ddi_driver_name(SD_DEVINFO(un)), 10359 ddi_get_instance(SD_DEVINFO(un))); 10360 un->un_wm_cache = kmem_cache_create( 10361 name_str, sizeof (struct sd_w_map), 10362 8, sd_wm_cache_constructor, 10363 sd_wm_cache_destructor, NULL, 10364 (void *)un, NULL, 0); 10365 if (!(un->un_wm_cache)) { 10366 rval = ENOMEM; 10367 goto done; 10368 } 10369 } 10370 } 10371 10372 /* 10373 * Check if the media in the device is writable or not. 10374 */ 10375 if ((un->un_f_geometry_is_valid == FALSE) && ISCD(un)) { 10376 sd_check_for_writable_cd(un); 10377 } 10378 10379 } else { 10380 /* 10381 * Do a test unit ready to clear any unit attention from non-cd 10382 * devices. 10383 */ 10384 mutex_exit(SD_MUTEX(un)); 10385 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 10386 mutex_enter(SD_MUTEX(un)); 10387 } 10388 10389 10390 if (un->un_state == SD_STATE_NORMAL) { 10391 /* 10392 * If the target is not yet ready here (defined by a TUR 10393 * failure), invalidate the geometry and print an 'offline' 10394 * message. This is a legacy message, as the state of the 10395 * target is not actually changed to SD_STATE_OFFLINE. 10396 * 10397 * If the TUR fails for EACCES (Reservation Conflict), it 10398 * means there actually is nothing wrong with the target that 10399 * would require invalidating the geometry, so continue in 10400 * that case as if the TUR was successful. 10401 */ 10402 int err; 10403 10404 mutex_exit(SD_MUTEX(un)); 10405 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 10406 mutex_enter(SD_MUTEX(un)); 10407 10408 if ((err != 0) && (err != EACCES)) { 10409 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10410 "offline\n"); 10411 un->un_f_geometry_is_valid = FALSE; 10412 rval = SD_NOT_READY_VALID; 10413 goto done; 10414 } 10415 } 10416 10417 if (un->un_f_format_in_progress == FALSE) { 10418 /* 10419 * Note: sd_validate_geometry may return TRUE, but that does 10420 * not necessarily mean un_f_geometry_is_valid == TRUE! 10421 */ 10422 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 10423 if (rval == ENOTSUP) { 10424 if (un->un_f_geometry_is_valid == TRUE) 10425 rval = 0; 10426 else { 10427 rval = SD_READY_NOT_VALID; 10428 goto done; 10429 } 10430 } 10431 if (rval != 0) { 10432 /* 10433 * We don't check the validity of geometry for 10434 * CDROMs. Also we assume we have a good label 10435 * even if sd_validate_geometry returned ENOMEM. 10436 */ 10437 if (!ISCD(un) && rval != ENOMEM) { 10438 rval = SD_NOT_READY_VALID; 10439 goto done; 10440 } 10441 } 10442 } 10443 10444 #ifdef DOESNTWORK /* on eliteII, see 1118607 */ 10445 /* 10446 * check to see if this disk is write protected, if it is and we have 10447 * not set read-only, then fail 10448 */ 10449 if ((flag & FWRITE) && (sr_check_wp(dev))) { 10450 New_state(un, SD_STATE_CLOSED); 10451 goto done; 10452 } 10453 #endif 10454 10455 /* 10456 * If this is a removable media device, try and send 10457 * a PREVENT MEDIA REMOVAL command, but don't get upset 10458 * if it fails. For a CD, however, it is an error 10459 */ 10460 if (ISREMOVABLE(un)) { 10461 mutex_exit(SD_MUTEX(un)); 10462 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 10463 SD_PATH_DIRECT) != 0) && ISCD(un)) { 10464 rval = SD_NOT_READY_VALID; 10465 mutex_enter(SD_MUTEX(un)); 10466 goto done; 10467 } 10468 mutex_enter(SD_MUTEX(un)); 10469 } 10470 10471 /* The state has changed, inform the media watch routines */ 10472 un->un_mediastate = DKIO_INSERTED; 10473 cv_broadcast(&un->un_state_cv); 10474 rval = SD_READY_VALID; 10475 10476 done: 10477 10478 /* 10479 * Initialize the capacity kstat value, if no media previously 10480 * (capacity kstat is 0) and a media has been inserted 10481 * (un_blockcount > 0). 10482 * This is a more generic way then checking for ISREMOVABLE. 10483 */ 10484 if (un->un_errstats != NULL) { 10485 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10486 if ((stp->sd_capacity.value.ui64 == 0) && 10487 (un->un_f_blockcount_is_valid == TRUE)) { 10488 stp->sd_capacity.value.ui64 = 10489 (uint64_t)((uint64_t)un->un_blockcount * 10490 un->un_sys_blocksize); 10491 } 10492 } 10493 10494 mutex_exit(SD_MUTEX(un)); 10495 return (rval); 10496 } 10497 10498 10499 /* 10500 * Function: sdmin 10501 * 10502 * Description: Routine to limit the size of a data transfer. Used in 10503 * conjunction with physio(9F). 10504 * 10505 * Arguments: bp - pointer to the indicated buf(9S) struct. 10506 * 10507 * Context: Kernel thread context. 10508 */ 10509 10510 static void 10511 sdmin(struct buf *bp) 10512 { 10513 struct sd_lun *un; 10514 int instance; 10515 10516 instance = SDUNIT(bp->b_edev); 10517 10518 un = ddi_get_soft_state(sd_state, instance); 10519 ASSERT(un != NULL); 10520 10521 if (bp->b_bcount > un->un_max_xfer_size) { 10522 bp->b_bcount = un->un_max_xfer_size; 10523 } 10524 } 10525 10526 10527 /* 10528 * Function: sdread 10529 * 10530 * Description: Driver's read(9e) entry point function. 10531 * 10532 * Arguments: dev - device number 10533 * uio - structure pointer describing where data is to be stored 10534 * in user's space 10535 * cred_p - user credential pointer 10536 * 10537 * Return Code: ENXIO 10538 * EIO 10539 * EINVAL 10540 * value returned by physio 10541 * 10542 * Context: Kernel thread context. 10543 */ 10544 /* ARGSUSED */ 10545 static int 10546 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10547 { 10548 struct sd_lun *un = NULL; 10549 int secmask; 10550 int err; 10551 10552 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10553 return (ENXIO); 10554 } 10555 10556 ASSERT(!mutex_owned(SD_MUTEX(un))); 10557 10558 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10559 mutex_enter(SD_MUTEX(un)); 10560 /* 10561 * Because the call to sd_ready_and_valid will issue I/O we 10562 * must wait here if either the device is suspended or 10563 * if it's power level is changing. 10564 */ 10565 while ((un->un_state == SD_STATE_SUSPENDED) || 10566 (un->un_state == SD_STATE_PM_CHANGING)) { 10567 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10568 } 10569 un->un_ncmds_in_driver++; 10570 mutex_exit(SD_MUTEX(un)); 10571 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10572 mutex_enter(SD_MUTEX(un)); 10573 un->un_ncmds_in_driver--; 10574 ASSERT(un->un_ncmds_in_driver >= 0); 10575 mutex_exit(SD_MUTEX(un)); 10576 return (EIO); 10577 } 10578 mutex_enter(SD_MUTEX(un)); 10579 un->un_ncmds_in_driver--; 10580 ASSERT(un->un_ncmds_in_driver >= 0); 10581 mutex_exit(SD_MUTEX(un)); 10582 } 10583 10584 /* 10585 * Read requests are restricted to multiples of the system block size. 10586 */ 10587 secmask = un->un_sys_blocksize - 1; 10588 10589 if (uio->uio_loffset & ((offset_t)(secmask))) { 10590 SD_ERROR(SD_LOG_READ_WRITE, un, 10591 "sdread: file offset not modulo %d\n", 10592 un->un_sys_blocksize); 10593 err = EINVAL; 10594 } else if (uio->uio_iov->iov_len & (secmask)) { 10595 SD_ERROR(SD_LOG_READ_WRITE, un, 10596 "sdread: transfer length not modulo %d\n", 10597 un->un_sys_blocksize); 10598 err = EINVAL; 10599 } else { 10600 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10601 } 10602 return (err); 10603 } 10604 10605 10606 /* 10607 * Function: sdwrite 10608 * 10609 * Description: Driver's write(9e) entry point function. 10610 * 10611 * Arguments: dev - device number 10612 * uio - structure pointer describing where data is stored in 10613 * user's space 10614 * cred_p - user credential pointer 10615 * 10616 * Return Code: ENXIO 10617 * EIO 10618 * EINVAL 10619 * value returned by physio 10620 * 10621 * Context: Kernel thread context. 10622 */ 10623 /* ARGSUSED */ 10624 static int 10625 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10626 { 10627 struct sd_lun *un = NULL; 10628 int secmask; 10629 int err; 10630 10631 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10632 return (ENXIO); 10633 } 10634 10635 ASSERT(!mutex_owned(SD_MUTEX(un))); 10636 10637 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10638 mutex_enter(SD_MUTEX(un)); 10639 /* 10640 * Because the call to sd_ready_and_valid will issue I/O we 10641 * must wait here if either the device is suspended or 10642 * if it's power level is changing. 10643 */ 10644 while ((un->un_state == SD_STATE_SUSPENDED) || 10645 (un->un_state == SD_STATE_PM_CHANGING)) { 10646 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10647 } 10648 un->un_ncmds_in_driver++; 10649 mutex_exit(SD_MUTEX(un)); 10650 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10651 mutex_enter(SD_MUTEX(un)); 10652 un->un_ncmds_in_driver--; 10653 ASSERT(un->un_ncmds_in_driver >= 0); 10654 mutex_exit(SD_MUTEX(un)); 10655 return (EIO); 10656 } 10657 mutex_enter(SD_MUTEX(un)); 10658 un->un_ncmds_in_driver--; 10659 ASSERT(un->un_ncmds_in_driver >= 0); 10660 mutex_exit(SD_MUTEX(un)); 10661 } 10662 10663 /* 10664 * Write requests are restricted to multiples of the system block size. 10665 */ 10666 secmask = un->un_sys_blocksize - 1; 10667 10668 if (uio->uio_loffset & ((offset_t)(secmask))) { 10669 SD_ERROR(SD_LOG_READ_WRITE, un, 10670 "sdwrite: file offset not modulo %d\n", 10671 un->un_sys_blocksize); 10672 err = EINVAL; 10673 } else if (uio->uio_iov->iov_len & (secmask)) { 10674 SD_ERROR(SD_LOG_READ_WRITE, un, 10675 "sdwrite: transfer length not modulo %d\n", 10676 un->un_sys_blocksize); 10677 err = EINVAL; 10678 } else { 10679 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10680 } 10681 return (err); 10682 } 10683 10684 10685 /* 10686 * Function: sdaread 10687 * 10688 * Description: Driver's aread(9e) entry point function. 10689 * 10690 * Arguments: dev - device number 10691 * aio - structure pointer describing where data is to be stored 10692 * cred_p - user credential pointer 10693 * 10694 * Return Code: ENXIO 10695 * EIO 10696 * EINVAL 10697 * value returned by aphysio 10698 * 10699 * Context: Kernel thread context. 10700 */ 10701 /* ARGSUSED */ 10702 static int 10703 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10704 { 10705 struct sd_lun *un = NULL; 10706 struct uio *uio = aio->aio_uio; 10707 int secmask; 10708 int err; 10709 10710 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10711 return (ENXIO); 10712 } 10713 10714 ASSERT(!mutex_owned(SD_MUTEX(un))); 10715 10716 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10717 mutex_enter(SD_MUTEX(un)); 10718 /* 10719 * Because the call to sd_ready_and_valid will issue I/O we 10720 * must wait here if either the device is suspended or 10721 * if it's power level is changing. 10722 */ 10723 while ((un->un_state == SD_STATE_SUSPENDED) || 10724 (un->un_state == SD_STATE_PM_CHANGING)) { 10725 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10726 } 10727 un->un_ncmds_in_driver++; 10728 mutex_exit(SD_MUTEX(un)); 10729 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10730 mutex_enter(SD_MUTEX(un)); 10731 un->un_ncmds_in_driver--; 10732 ASSERT(un->un_ncmds_in_driver >= 0); 10733 mutex_exit(SD_MUTEX(un)); 10734 return (EIO); 10735 } 10736 mutex_enter(SD_MUTEX(un)); 10737 un->un_ncmds_in_driver--; 10738 ASSERT(un->un_ncmds_in_driver >= 0); 10739 mutex_exit(SD_MUTEX(un)); 10740 } 10741 10742 /* 10743 * Read requests are restricted to multiples of the system block size. 10744 */ 10745 secmask = un->un_sys_blocksize - 1; 10746 10747 if (uio->uio_loffset & ((offset_t)(secmask))) { 10748 SD_ERROR(SD_LOG_READ_WRITE, un, 10749 "sdaread: file offset not modulo %d\n", 10750 un->un_sys_blocksize); 10751 err = EINVAL; 10752 } else if (uio->uio_iov->iov_len & (secmask)) { 10753 SD_ERROR(SD_LOG_READ_WRITE, un, 10754 "sdaread: transfer length not modulo %d\n", 10755 un->un_sys_blocksize); 10756 err = EINVAL; 10757 } else { 10758 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10759 } 10760 return (err); 10761 } 10762 10763 10764 /* 10765 * Function: sdawrite 10766 * 10767 * Description: Driver's awrite(9e) entry point function. 10768 * 10769 * Arguments: dev - device number 10770 * aio - structure pointer describing where data is stored 10771 * cred_p - user credential pointer 10772 * 10773 * Return Code: ENXIO 10774 * EIO 10775 * EINVAL 10776 * value returned by aphysio 10777 * 10778 * Context: Kernel thread context. 10779 */ 10780 /* ARGSUSED */ 10781 static int 10782 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10783 { 10784 struct sd_lun *un = NULL; 10785 struct uio *uio = aio->aio_uio; 10786 int secmask; 10787 int err; 10788 10789 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10790 return (ENXIO); 10791 } 10792 10793 ASSERT(!mutex_owned(SD_MUTEX(un))); 10794 10795 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10796 mutex_enter(SD_MUTEX(un)); 10797 /* 10798 * Because the call to sd_ready_and_valid will issue I/O we 10799 * must wait here if either the device is suspended or 10800 * if it's power level is changing. 10801 */ 10802 while ((un->un_state == SD_STATE_SUSPENDED) || 10803 (un->un_state == SD_STATE_PM_CHANGING)) { 10804 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10805 } 10806 un->un_ncmds_in_driver++; 10807 mutex_exit(SD_MUTEX(un)); 10808 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10809 mutex_enter(SD_MUTEX(un)); 10810 un->un_ncmds_in_driver--; 10811 ASSERT(un->un_ncmds_in_driver >= 0); 10812 mutex_exit(SD_MUTEX(un)); 10813 return (EIO); 10814 } 10815 mutex_enter(SD_MUTEX(un)); 10816 un->un_ncmds_in_driver--; 10817 ASSERT(un->un_ncmds_in_driver >= 0); 10818 mutex_exit(SD_MUTEX(un)); 10819 } 10820 10821 /* 10822 * Write requests are restricted to multiples of the system block size. 10823 */ 10824 secmask = un->un_sys_blocksize - 1; 10825 10826 if (uio->uio_loffset & ((offset_t)(secmask))) { 10827 SD_ERROR(SD_LOG_READ_WRITE, un, 10828 "sdawrite: file offset not modulo %d\n", 10829 un->un_sys_blocksize); 10830 err = EINVAL; 10831 } else if (uio->uio_iov->iov_len & (secmask)) { 10832 SD_ERROR(SD_LOG_READ_WRITE, un, 10833 "sdawrite: transfer length not modulo %d\n", 10834 un->un_sys_blocksize); 10835 err = EINVAL; 10836 } else { 10837 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10838 } 10839 return (err); 10840 } 10841 10842 10843 10844 10845 10846 /* 10847 * Driver IO processing follows the following sequence: 10848 * 10849 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10850 * | | ^ 10851 * v v | 10852 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10853 * | | | | 10854 * v | | | 10855 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10856 * | | ^ ^ 10857 * v v | | 10858 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10859 * | | | | 10860 * +---+ | +------------+ +-------+ 10861 * | | | | 10862 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10863 * | v | | 10864 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10865 * | | ^ | 10866 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10867 * | v | | 10868 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10869 * | | ^ | 10870 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10871 * | v | | 10872 * | sd_checksum_iostart() sd_checksum_iodone() | 10873 * | | ^ | 10874 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10875 * | v | | 10876 * | sd_pm_iostart() sd_pm_iodone() | 10877 * | | ^ | 10878 * | | | | 10879 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10880 * | ^ 10881 * v | 10882 * sd_core_iostart() | 10883 * | | 10884 * | +------>(*destroypkt)() 10885 * +-> sd_start_cmds() <-+ | | 10886 * | | | v 10887 * | | | scsi_destroy_pkt(9F) 10888 * | | | 10889 * +->(*initpkt)() +- sdintr() 10890 * | | | | 10891 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10892 * | +-> scsi_setup_cdb(9F) | 10893 * | | 10894 * +--> scsi_transport(9F) | 10895 * | | 10896 * +----> SCSA ---->+ 10897 * 10898 * 10899 * This code is based upon the following presumtions: 10900 * 10901 * - iostart and iodone functions operate on buf(9S) structures. These 10902 * functions perform the necessary operations on the buf(9S) and pass 10903 * them along to the next function in the chain by using the macros 10904 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10905 * (for iodone side functions). 10906 * 10907 * - The iostart side functions may sleep. The iodone side functions 10908 * are called under interrupt context and may NOT sleep. Therefore 10909 * iodone side functions also may not call iostart side functions. 10910 * (NOTE: iostart side functions should NOT sleep for memory, as 10911 * this could result in deadlock.) 10912 * 10913 * - An iostart side function may call its corresponding iodone side 10914 * function directly (if necessary). 10915 * 10916 * - In the event of an error, an iostart side function can return a buf(9S) 10917 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10918 * b_error in the usual way of course). 10919 * 10920 * - The taskq mechanism may be used by the iodone side functions to dispatch 10921 * requests to the iostart side functions. The iostart side functions in 10922 * this case would be called under the context of a taskq thread, so it's 10923 * OK for them to block/sleep/spin in this case. 10924 * 10925 * - iostart side functions may allocate "shadow" buf(9S) structs and 10926 * pass them along to the next function in the chain. The corresponding 10927 * iodone side functions must coalesce the "shadow" bufs and return 10928 * the "original" buf to the next higher layer. 10929 * 10930 * - The b_private field of the buf(9S) struct holds a pointer to 10931 * an sd_xbuf struct, which contains information needed to 10932 * construct the scsi_pkt for the command. 10933 * 10934 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10935 * layer must acquire & release the SD_MUTEX(un) as needed. 10936 */ 10937 10938 10939 /* 10940 * Create taskq for all targets in the system. This is created at 10941 * _init(9E) and destroyed at _fini(9E). 10942 * 10943 * Note: here we set the minalloc to a reasonably high number to ensure that 10944 * we will have an adequate supply of task entries available at interrupt time. 10945 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10946 * sd_create_taskq(). Since we do not want to sleep for allocations at 10947 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10948 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10949 * requests any one instant in time. 10950 */ 10951 #define SD_TASKQ_NUMTHREADS 8 10952 #define SD_TASKQ_MINALLOC 256 10953 #define SD_TASKQ_MAXALLOC 256 10954 10955 static taskq_t *sd_tq = NULL; 10956 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 10957 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 10958 10959 /* 10960 * The following task queue is being created for the write part of 10961 * read-modify-write of non-512 block size devices. 10962 * Limit the number of threads to 1 for now. This number has been choosen 10963 * considering the fact that it applies only to dvd ram drives/MO drives 10964 * currently. Performance for which is not main criteria at this stage. 10965 * Note: It needs to be explored if we can use a single taskq in future 10966 */ 10967 #define SD_WMR_TASKQ_NUMTHREADS 1 10968 static taskq_t *sd_wmr_tq = NULL; 10969 10970 /* 10971 * Function: sd_taskq_create 10972 * 10973 * Description: Create taskq thread(s) and preallocate task entries 10974 * 10975 * Return Code: Returns a pointer to the allocated taskq_t. 10976 * 10977 * Context: Can sleep. Requires blockable context. 10978 * 10979 * Notes: - The taskq() facility currently is NOT part of the DDI. 10980 * (definitely NOT recommeded for 3rd-party drivers!) :-) 10981 * - taskq_create() will block for memory, also it will panic 10982 * if it cannot create the requested number of threads. 10983 * - Currently taskq_create() creates threads that cannot be 10984 * swapped. 10985 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 10986 * supply of taskq entries at interrupt time (ie, so that we 10987 * do not have to sleep for memory) 10988 */ 10989 10990 static void 10991 sd_taskq_create(void) 10992 { 10993 char taskq_name[TASKQ_NAMELEN]; 10994 10995 ASSERT(sd_tq == NULL); 10996 ASSERT(sd_wmr_tq == NULL); 10997 10998 (void) snprintf(taskq_name, sizeof (taskq_name), 10999 "%s_drv_taskq", sd_label); 11000 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11001 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11002 TASKQ_PREPOPULATE)); 11003 11004 (void) snprintf(taskq_name, sizeof (taskq_name), 11005 "%s_rmw_taskq", sd_label); 11006 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11007 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11008 TASKQ_PREPOPULATE)); 11009 } 11010 11011 11012 /* 11013 * Function: sd_taskq_delete 11014 * 11015 * Description: Complementary cleanup routine for sd_taskq_create(). 11016 * 11017 * Context: Kernel thread context. 11018 */ 11019 11020 static void 11021 sd_taskq_delete(void) 11022 { 11023 ASSERT(sd_tq != NULL); 11024 ASSERT(sd_wmr_tq != NULL); 11025 taskq_destroy(sd_tq); 11026 taskq_destroy(sd_wmr_tq); 11027 sd_tq = NULL; 11028 sd_wmr_tq = NULL; 11029 } 11030 11031 11032 /* 11033 * Function: sdstrategy 11034 * 11035 * Description: Driver's strategy (9E) entry point function. 11036 * 11037 * Arguments: bp - pointer to buf(9S) 11038 * 11039 * Return Code: Always returns zero 11040 * 11041 * Context: Kernel thread context. 11042 */ 11043 11044 static int 11045 sdstrategy(struct buf *bp) 11046 { 11047 struct sd_lun *un; 11048 11049 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11050 if (un == NULL) { 11051 bioerror(bp, EIO); 11052 bp->b_resid = bp->b_bcount; 11053 biodone(bp); 11054 return (0); 11055 } 11056 /* As was done in the past, fail new cmds. if state is dumping. */ 11057 if (un->un_state == SD_STATE_DUMPING) { 11058 bioerror(bp, ENXIO); 11059 bp->b_resid = bp->b_bcount; 11060 biodone(bp); 11061 return (0); 11062 } 11063 11064 ASSERT(!mutex_owned(SD_MUTEX(un))); 11065 11066 /* 11067 * Commands may sneak in while we released the mutex in 11068 * DDI_SUSPEND, we should block new commands. However, old 11069 * commands that are still in the driver at this point should 11070 * still be allowed to drain. 11071 */ 11072 mutex_enter(SD_MUTEX(un)); 11073 /* 11074 * Must wait here if either the device is suspended or 11075 * if it's power level is changing. 11076 */ 11077 while ((un->un_state == SD_STATE_SUSPENDED) || 11078 (un->un_state == SD_STATE_PM_CHANGING)) { 11079 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11080 } 11081 11082 un->un_ncmds_in_driver++; 11083 11084 /* 11085 * atapi: Since we are running the CD for now in PIO mode we need to 11086 * call bp_mapin here to avoid bp_mapin called interrupt context under 11087 * the HBA's init_pkt routine. 11088 */ 11089 if (un->un_f_cfg_is_atapi == TRUE) { 11090 mutex_exit(SD_MUTEX(un)); 11091 bp_mapin(bp); 11092 mutex_enter(SD_MUTEX(un)); 11093 } 11094 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11095 un->un_ncmds_in_driver); 11096 11097 mutex_exit(SD_MUTEX(un)); 11098 11099 /* 11100 * This will (eventually) allocate the sd_xbuf area and 11101 * call sd_xbuf_strategy(). We just want to return the 11102 * result of ddi_xbuf_qstrategy so that we have an opt- 11103 * imized tail call which saves us a stack frame. 11104 */ 11105 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11106 } 11107 11108 11109 /* 11110 * Function: sd_xbuf_strategy 11111 * 11112 * Description: Function for initiating IO operations via the 11113 * ddi_xbuf_qstrategy() mechanism. 11114 * 11115 * Context: Kernel thread context. 11116 */ 11117 11118 static void 11119 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11120 { 11121 struct sd_lun *un = arg; 11122 11123 ASSERT(bp != NULL); 11124 ASSERT(xp != NULL); 11125 ASSERT(un != NULL); 11126 ASSERT(!mutex_owned(SD_MUTEX(un))); 11127 11128 /* 11129 * Initialize the fields in the xbuf and save a pointer to the 11130 * xbuf in bp->b_private. 11131 */ 11132 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11133 11134 /* Send the buf down the iostart chain */ 11135 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11136 } 11137 11138 11139 /* 11140 * Function: sd_xbuf_init 11141 * 11142 * Description: Prepare the given sd_xbuf struct for use. 11143 * 11144 * Arguments: un - ptr to softstate 11145 * bp - ptr to associated buf(9S) 11146 * xp - ptr to associated sd_xbuf 11147 * chain_type - IO chain type to use: 11148 * SD_CHAIN_NULL 11149 * SD_CHAIN_BUFIO 11150 * SD_CHAIN_USCSI 11151 * SD_CHAIN_DIRECT 11152 * SD_CHAIN_DIRECT_PRIORITY 11153 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11154 * initialization; may be NULL if none. 11155 * 11156 * Context: Kernel thread context 11157 */ 11158 11159 static void 11160 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11161 uchar_t chain_type, void *pktinfop) 11162 { 11163 int index; 11164 11165 ASSERT(un != NULL); 11166 ASSERT(bp != NULL); 11167 ASSERT(xp != NULL); 11168 11169 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11170 bp, chain_type); 11171 11172 xp->xb_un = un; 11173 xp->xb_pktp = NULL; 11174 xp->xb_pktinfo = pktinfop; 11175 xp->xb_private = bp->b_private; 11176 xp->xb_blkno = (daddr_t)bp->b_blkno; 11177 11178 /* 11179 * Set up the iostart and iodone chain indexes in the xbuf, based 11180 * upon the specified chain type to use. 11181 */ 11182 switch (chain_type) { 11183 case SD_CHAIN_NULL: 11184 /* 11185 * Fall thru to just use the values for the buf type, even 11186 * tho for the NULL chain these values will never be used. 11187 */ 11188 /* FALLTHRU */ 11189 case SD_CHAIN_BUFIO: 11190 index = un->un_buf_chain_type; 11191 break; 11192 case SD_CHAIN_USCSI: 11193 index = un->un_uscsi_chain_type; 11194 break; 11195 case SD_CHAIN_DIRECT: 11196 index = un->un_direct_chain_type; 11197 break; 11198 case SD_CHAIN_DIRECT_PRIORITY: 11199 index = un->un_priority_chain_type; 11200 break; 11201 default: 11202 /* We're really broken if we ever get here... */ 11203 panic("sd_xbuf_init: illegal chain type!"); 11204 /*NOTREACHED*/ 11205 } 11206 11207 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11208 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11209 11210 /* 11211 * It might be a bit easier to simply bzero the entire xbuf above, 11212 * but it turns out that since we init a fair number of members anyway, 11213 * we save a fair number cycles by doing explicit assignment of zero. 11214 */ 11215 xp->xb_pkt_flags = 0; 11216 xp->xb_dma_resid = 0; 11217 xp->xb_retry_count = 0; 11218 xp->xb_victim_retry_count = 0; 11219 xp->xb_ua_retry_count = 0; 11220 xp->xb_sense_bp = NULL; 11221 xp->xb_sense_status = 0; 11222 xp->xb_sense_state = 0; 11223 xp->xb_sense_resid = 0; 11224 11225 bp->b_private = xp; 11226 bp->b_flags &= ~(B_DONE | B_ERROR); 11227 bp->b_resid = 0; 11228 bp->av_forw = NULL; 11229 bp->av_back = NULL; 11230 bioerror(bp, 0); 11231 11232 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11233 } 11234 11235 11236 /* 11237 * Function: sd_uscsi_strategy 11238 * 11239 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11240 * 11241 * Arguments: bp - buf struct ptr 11242 * 11243 * Return Code: Always returns 0 11244 * 11245 * Context: Kernel thread context 11246 */ 11247 11248 static int 11249 sd_uscsi_strategy(struct buf *bp) 11250 { 11251 struct sd_lun *un; 11252 struct sd_uscsi_info *uip; 11253 struct sd_xbuf *xp; 11254 uchar_t chain_type; 11255 11256 ASSERT(bp != NULL); 11257 11258 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11259 if (un == NULL) { 11260 bioerror(bp, EIO); 11261 bp->b_resid = bp->b_bcount; 11262 biodone(bp); 11263 return (0); 11264 } 11265 11266 ASSERT(!mutex_owned(SD_MUTEX(un))); 11267 11268 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11269 11270 mutex_enter(SD_MUTEX(un)); 11271 /* 11272 * atapi: Since we are running the CD for now in PIO mode we need to 11273 * call bp_mapin here to avoid bp_mapin called interrupt context under 11274 * the HBA's init_pkt routine. 11275 */ 11276 if (un->un_f_cfg_is_atapi == TRUE) { 11277 mutex_exit(SD_MUTEX(un)); 11278 bp_mapin(bp); 11279 mutex_enter(SD_MUTEX(un)); 11280 } 11281 un->un_ncmds_in_driver++; 11282 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11283 un->un_ncmds_in_driver); 11284 mutex_exit(SD_MUTEX(un)); 11285 11286 /* 11287 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11288 */ 11289 ASSERT(bp->b_private != NULL); 11290 uip = (struct sd_uscsi_info *)bp->b_private; 11291 11292 switch (uip->ui_flags) { 11293 case SD_PATH_DIRECT: 11294 chain_type = SD_CHAIN_DIRECT; 11295 break; 11296 case SD_PATH_DIRECT_PRIORITY: 11297 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11298 break; 11299 default: 11300 chain_type = SD_CHAIN_USCSI; 11301 break; 11302 } 11303 11304 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 11305 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11306 11307 /* Use the index obtained within xbuf_init */ 11308 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11309 11310 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11311 11312 return (0); 11313 } 11314 11315 11316 /* 11317 * These routines perform raw i/o operations. 11318 */ 11319 /*ARGSUSED*/ 11320 static void 11321 sduscsimin(struct buf *bp) 11322 { 11323 /* 11324 * do not break up because the CDB count would then 11325 * be incorrect and data underruns would result (incomplete 11326 * read/writes which would be retried and then failed, see 11327 * sdintr(). 11328 */ 11329 } 11330 11331 11332 11333 /* 11334 * Function: sd_send_scsi_cmd 11335 * 11336 * Description: Runs a USCSI command for user (when called thru sdioctl), 11337 * or for the driver 11338 * 11339 * Arguments: dev - the dev_t for the device 11340 * incmd - ptr to a valid uscsi_cmd struct 11341 * cdbspace - UIO_USERSPACE or UIO_SYSSPACE 11342 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11343 * rqbufspace - UIO_USERSPACE or UIO_SYSSPACE 11344 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11345 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11346 * to use the USCSI "direct" chain and bypass the normal 11347 * command waitq. 11348 * 11349 * Return Code: 0 - successful completion of the given command 11350 * EIO - scsi_reset() failed, or see biowait()/physio() codes. 11351 * ENXIO - soft state not found for specified dev 11352 * EINVAL 11353 * EFAULT - copyin/copyout error 11354 * return code of biowait(9F) or physio(9F): 11355 * EIO - IO error, caller may check incmd->uscsi_status 11356 * ENXIO 11357 * EACCES - reservation conflict 11358 * 11359 * Context: Waits for command to complete. Can sleep. 11360 */ 11361 11362 static int 11363 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 11364 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 11365 int path_flag) 11366 { 11367 struct sd_uscsi_info *uip; 11368 struct uscsi_cmd *uscmd; 11369 struct sd_lun *un; 11370 struct buf *bp; 11371 int rval; 11372 int flags; 11373 11374 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11375 if (un == NULL) { 11376 return (ENXIO); 11377 } 11378 11379 ASSERT(!mutex_owned(SD_MUTEX(un))); 11380 11381 #ifdef SDDEBUG 11382 switch (dataspace) { 11383 case UIO_USERSPACE: 11384 SD_TRACE(SD_LOG_IO, un, 11385 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 11386 break; 11387 case UIO_SYSSPACE: 11388 SD_TRACE(SD_LOG_IO, un, 11389 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 11390 break; 11391 default: 11392 SD_TRACE(SD_LOG_IO, un, 11393 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 11394 break; 11395 } 11396 #endif 11397 11398 /* 11399 * Perform resets directly; no need to generate a command to do it. 11400 */ 11401 if (incmd->uscsi_flags & (USCSI_RESET | USCSI_RESET_ALL)) { 11402 flags = ((incmd->uscsi_flags & USCSI_RESET_ALL) != 0) ? 11403 RESET_ALL : RESET_TARGET; 11404 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: Issuing reset\n"); 11405 if (scsi_reset(SD_ADDRESS(un), flags) == 0) { 11406 /* Reset attempt was unsuccessful */ 11407 SD_TRACE(SD_LOG_IO, un, 11408 "sd_send_scsi_cmd: reset: failure\n"); 11409 return (EIO); 11410 } 11411 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: reset: success\n"); 11412 return (0); 11413 } 11414 11415 /* Perfunctory sanity check... */ 11416 if (incmd->uscsi_cdblen <= 0) { 11417 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11418 "invalid uscsi_cdblen, returning EINVAL\n"); 11419 return (EINVAL); 11420 } 11421 11422 /* 11423 * In order to not worry about where the uscsi structure came from 11424 * (or where the cdb it points to came from) we're going to make 11425 * kmem_alloc'd copies of them here. This will also allow reference 11426 * to the data they contain long after this process has gone to 11427 * sleep and its kernel stack has been unmapped, etc. 11428 * 11429 * First get some memory for the uscsi_cmd struct and copy the 11430 * contents of the given uscsi_cmd struct into it. 11431 */ 11432 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 11433 bcopy(incmd, uscmd, sizeof (struct uscsi_cmd)); 11434 11435 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: uscsi_cmd", 11436 (uchar_t *)uscmd, sizeof (struct uscsi_cmd), SD_LOG_HEX); 11437 11438 /* 11439 * Now get some space for the CDB, and copy the given CDB into 11440 * it. Use ddi_copyin() in case the data is in user space. 11441 */ 11442 uscmd->uscsi_cdb = kmem_zalloc((size_t)incmd->uscsi_cdblen, KM_SLEEP); 11443 flags = (cdbspace == UIO_SYSSPACE) ? FKIOCTL : 0; 11444 if (ddi_copyin(incmd->uscsi_cdb, uscmd->uscsi_cdb, 11445 (uint_t)incmd->uscsi_cdblen, flags) != 0) { 11446 kmem_free(uscmd->uscsi_cdb, (size_t)incmd->uscsi_cdblen); 11447 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11448 return (EFAULT); 11449 } 11450 11451 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: CDB", 11452 (uchar_t *)uscmd->uscsi_cdb, incmd->uscsi_cdblen, SD_LOG_HEX); 11453 11454 bp = getrbuf(KM_SLEEP); 11455 11456 /* 11457 * Allocate an sd_uscsi_info struct and fill it with the info 11458 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11459 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11460 * since we allocate the buf here in this function, we do not 11461 * need to preserve the prior contents of b_private. 11462 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11463 */ 11464 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11465 uip->ui_flags = path_flag; 11466 uip->ui_cmdp = uscmd; 11467 bp->b_private = uip; 11468 11469 /* 11470 * Initialize Request Sense buffering, if requested. 11471 */ 11472 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11473 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11474 /* 11475 * Here uscmd->uscsi_rqbuf currently points to the caller's 11476 * buffer, but we replace this with a kernel buffer that 11477 * we allocate to use with the sense data. The sense data 11478 * (if present) gets copied into this new buffer before the 11479 * command is completed. Then we copy the sense data from 11480 * our allocated buf into the caller's buffer below. Note 11481 * that incmd->uscsi_rqbuf and incmd->uscsi_rqlen are used 11482 * below to perform the copy back to the caller's buf. 11483 */ 11484 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 11485 if (rqbufspace == UIO_USERSPACE) { 11486 uscmd->uscsi_rqlen = SENSE_LENGTH; 11487 uscmd->uscsi_rqresid = SENSE_LENGTH; 11488 } else { 11489 uchar_t rlen = min(SENSE_LENGTH, uscmd->uscsi_rqlen); 11490 uscmd->uscsi_rqlen = rlen; 11491 uscmd->uscsi_rqresid = rlen; 11492 } 11493 } else { 11494 uscmd->uscsi_rqbuf = NULL; 11495 uscmd->uscsi_rqlen = 0; 11496 uscmd->uscsi_rqresid = 0; 11497 } 11498 11499 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: rqbuf:0x%p rqlen:%d\n", 11500 uscmd->uscsi_rqbuf, uscmd->uscsi_rqlen); 11501 11502 if (un->un_f_is_fibre == FALSE) { 11503 /* 11504 * Force asynchronous mode, if necessary. Doing this here 11505 * has the unfortunate effect of running other queued 11506 * commands async also, but since the main purpose of this 11507 * capability is downloading new drive firmware, we can 11508 * probably live with it. 11509 */ 11510 if ((uscmd->uscsi_flags & USCSI_ASYNC) != 0) { 11511 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11512 == 1) { 11513 if (scsi_ifsetcap(SD_ADDRESS(un), 11514 "synchronous", 0, 1) == 1) { 11515 SD_TRACE(SD_LOG_IO, un, 11516 "sd_send_scsi_cmd: forced async ok\n"); 11517 } else { 11518 SD_TRACE(SD_LOG_IO, un, 11519 "sd_send_scsi_cmd:\ 11520 forced async failed\n"); 11521 rval = EINVAL; 11522 goto done; 11523 } 11524 } 11525 } 11526 11527 /* 11528 * Re-enable synchronous mode, if requested 11529 */ 11530 if (uscmd->uscsi_flags & USCSI_SYNC) { 11531 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11532 == 0) { 11533 int i = scsi_ifsetcap(SD_ADDRESS(un), 11534 "synchronous", 1, 1); 11535 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11536 "re-enabled sync %s\n", 11537 (i == 1) ? "ok" : "failed"); 11538 } 11539 } 11540 } 11541 11542 /* 11543 * Commands sent with priority are intended for error recovery 11544 * situations, and do not have retries performed. 11545 */ 11546 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11547 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11548 } 11549 11550 /* 11551 * If we're going to do actual I/O, let physio do all the right things 11552 */ 11553 if (uscmd->uscsi_buflen != 0) { 11554 struct iovec aiov; 11555 struct uio auio; 11556 struct uio *uio = &auio; 11557 11558 bzero(&auio, sizeof (struct uio)); 11559 bzero(&aiov, sizeof (struct iovec)); 11560 aiov.iov_base = uscmd->uscsi_bufaddr; 11561 aiov.iov_len = uscmd->uscsi_buflen; 11562 uio->uio_iov = &aiov; 11563 11564 uio->uio_iovcnt = 1; 11565 uio->uio_resid = uscmd->uscsi_buflen; 11566 uio->uio_segflg = dataspace; 11567 11568 /* 11569 * physio() will block here until the command completes.... 11570 */ 11571 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling physio.\n"); 11572 11573 rval = physio(sd_uscsi_strategy, bp, dev, 11574 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE), 11575 sduscsimin, uio); 11576 11577 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11578 "returned from physio with 0x%x\n", rval); 11579 11580 } else { 11581 /* 11582 * We have to mimic what physio would do here! Argh! 11583 */ 11584 bp->b_flags = B_BUSY | 11585 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE); 11586 bp->b_edev = dev; 11587 bp->b_dev = cmpdev(dev); /* maybe unnecessary? */ 11588 bp->b_bcount = 0; 11589 bp->b_blkno = 0; 11590 11591 SD_TRACE(SD_LOG_IO, un, 11592 "sd_send_scsi_cmd: calling sd_uscsi_strategy...\n"); 11593 11594 (void) sd_uscsi_strategy(bp); 11595 11596 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling biowait\n"); 11597 11598 rval = biowait(bp); 11599 11600 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11601 "returned from biowait with 0x%x\n", rval); 11602 } 11603 11604 done: 11605 11606 #ifdef SDDEBUG 11607 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11608 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11609 uscmd->uscsi_status, uscmd->uscsi_resid); 11610 if (uscmd->uscsi_bufaddr != NULL) { 11611 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11612 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11613 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11614 if (dataspace == UIO_SYSSPACE) { 11615 SD_DUMP_MEMORY(un, SD_LOG_IO, 11616 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11617 uscmd->uscsi_buflen, SD_LOG_HEX); 11618 } 11619 } 11620 #endif 11621 11622 /* 11623 * Get the status and residual to return to the caller. 11624 */ 11625 incmd->uscsi_status = uscmd->uscsi_status; 11626 incmd->uscsi_resid = uscmd->uscsi_resid; 11627 11628 /* 11629 * If the caller wants sense data, copy back whatever sense data 11630 * we may have gotten, and update the relevant rqsense info. 11631 */ 11632 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11633 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11634 11635 int rqlen = uscmd->uscsi_rqlen - uscmd->uscsi_rqresid; 11636 rqlen = min(((int)incmd->uscsi_rqlen), rqlen); 11637 11638 /* Update the Request Sense status and resid */ 11639 incmd->uscsi_rqresid = incmd->uscsi_rqlen - rqlen; 11640 incmd->uscsi_rqstatus = uscmd->uscsi_rqstatus; 11641 11642 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11643 "uscsi_rqstatus: 0x%02x uscsi_rqresid:0x%x\n", 11644 incmd->uscsi_rqstatus, incmd->uscsi_rqresid); 11645 11646 /* Copy out the sense data for user processes */ 11647 if ((incmd->uscsi_rqbuf != NULL) && (rqlen != 0)) { 11648 int flags = 11649 (rqbufspace == UIO_USERSPACE) ? 0 : FKIOCTL; 11650 if (ddi_copyout(uscmd->uscsi_rqbuf, incmd->uscsi_rqbuf, 11651 rqlen, flags) != 0) { 11652 rval = EFAULT; 11653 } 11654 /* 11655 * Note: Can't touch incmd->uscsi_rqbuf so use 11656 * uscmd->uscsi_rqbuf instead. They're the same. 11657 */ 11658 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11659 "incmd->uscsi_rqbuf: 0x%p rqlen:%d\n", 11660 incmd->uscsi_rqbuf, rqlen); 11661 SD_DUMP_MEMORY(un, SD_LOG_IO, "rq", 11662 (uchar_t *)uscmd->uscsi_rqbuf, rqlen, SD_LOG_HEX); 11663 } 11664 } 11665 11666 /* 11667 * Free allocated resources and return; mapout the buf in case it was 11668 * mapped in by a lower layer. 11669 */ 11670 bp_mapout(bp); 11671 freerbuf(bp); 11672 kmem_free(uip, sizeof (struct sd_uscsi_info)); 11673 if (uscmd->uscsi_rqbuf != NULL) { 11674 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 11675 } 11676 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 11677 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11678 11679 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: exit\n"); 11680 11681 return (rval); 11682 } 11683 11684 11685 /* 11686 * Function: sd_buf_iodone 11687 * 11688 * Description: Frees the sd_xbuf & returns the buf to its originator. 11689 * 11690 * Context: May be called from interrupt context. 11691 */ 11692 /* ARGSUSED */ 11693 static void 11694 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11695 { 11696 struct sd_xbuf *xp; 11697 11698 ASSERT(un != NULL); 11699 ASSERT(bp != NULL); 11700 ASSERT(!mutex_owned(SD_MUTEX(un))); 11701 11702 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11703 11704 xp = SD_GET_XBUF(bp); 11705 ASSERT(xp != NULL); 11706 11707 mutex_enter(SD_MUTEX(un)); 11708 11709 /* 11710 * Grab time when the cmd completed. 11711 * This is used for determining if the system has been 11712 * idle long enough to make it idle to the PM framework. 11713 * This is for lowering the overhead, and therefore improving 11714 * performance per I/O operation. 11715 */ 11716 un->un_pm_idle_time = ddi_get_time(); 11717 11718 un->un_ncmds_in_driver--; 11719 ASSERT(un->un_ncmds_in_driver >= 0); 11720 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11721 un->un_ncmds_in_driver); 11722 11723 mutex_exit(SD_MUTEX(un)); 11724 11725 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11726 biodone(bp); /* bp is gone after this */ 11727 11728 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11729 } 11730 11731 11732 /* 11733 * Function: sd_uscsi_iodone 11734 * 11735 * Description: Frees the sd_xbuf & returns the buf to its originator. 11736 * 11737 * Context: May be called from interrupt context. 11738 */ 11739 /* ARGSUSED */ 11740 static void 11741 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11742 { 11743 struct sd_xbuf *xp; 11744 11745 ASSERT(un != NULL); 11746 ASSERT(bp != NULL); 11747 11748 xp = SD_GET_XBUF(bp); 11749 ASSERT(xp != NULL); 11750 ASSERT(!mutex_owned(SD_MUTEX(un))); 11751 11752 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11753 11754 mutex_enter(SD_MUTEX(un)); 11755 11756 /* 11757 * Grab time when the cmd completed. 11758 * This is used for determining if the system has been 11759 * idle long enough to make it idle to the PM framework. 11760 * This is for lowering the overhead, and therefore improving 11761 * performance per I/O operation. 11762 */ 11763 un->un_pm_idle_time = ddi_get_time(); 11764 11765 un->un_ncmds_in_driver--; 11766 ASSERT(un->un_ncmds_in_driver >= 0); 11767 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11768 un->un_ncmds_in_driver); 11769 11770 mutex_exit(SD_MUTEX(un)); 11771 11772 kmem_free(xp, sizeof (struct sd_xbuf)); 11773 biodone(bp); 11774 11775 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11776 } 11777 11778 11779 /* 11780 * Function: sd_mapblockaddr_iostart 11781 * 11782 * Description: Verify request lies withing the partition limits for 11783 * the indicated minor device. Issue "overrun" buf if 11784 * request would exceed partition range. Converts 11785 * partition-relative block address to absolute. 11786 * 11787 * Context: Can sleep 11788 * 11789 * Issues: This follows what the old code did, in terms of accessing 11790 * some of the partition info in the unit struct without holding 11791 * the mutext. This is a general issue, if the partition info 11792 * can be altered while IO is in progress... as soon as we send 11793 * a buf, its partitioning can be invalid before it gets to the 11794 * device. Probably the right fix is to move partitioning out 11795 * of the driver entirely. 11796 */ 11797 11798 static void 11799 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11800 { 11801 daddr_t nblocks; /* #blocks in the given partition */ 11802 daddr_t blocknum; /* Block number specified by the buf */ 11803 size_t requested_nblocks; 11804 size_t available_nblocks; 11805 int partition; 11806 diskaddr_t partition_offset; 11807 struct sd_xbuf *xp; 11808 11809 11810 ASSERT(un != NULL); 11811 ASSERT(bp != NULL); 11812 ASSERT(!mutex_owned(SD_MUTEX(un))); 11813 11814 SD_TRACE(SD_LOG_IO_PARTITION, un, 11815 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11816 11817 xp = SD_GET_XBUF(bp); 11818 ASSERT(xp != NULL); 11819 11820 /* 11821 * If the geometry is not indicated as valid, attempt to access 11822 * the unit & verify the geometry/label. This can be the case for 11823 * removable-media devices, of if the device was opened in 11824 * NDELAY/NONBLOCK mode. 11825 */ 11826 if ((un->un_f_geometry_is_valid != TRUE) && 11827 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11828 /* 11829 * For removable devices it is possible to start an I/O 11830 * without a media by opening the device in nodelay mode. 11831 * Also for writable CDs there can be many scenarios where 11832 * there is no geometry yet but volume manager is trying to 11833 * issue a read() just because it can see TOC on the CD. So 11834 * do not print a message for removables. 11835 */ 11836 if (!ISREMOVABLE(un)) { 11837 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11838 "i/o to invalid geometry\n"); 11839 } 11840 bioerror(bp, EIO); 11841 bp->b_resid = bp->b_bcount; 11842 SD_BEGIN_IODONE(index, un, bp); 11843 return; 11844 } 11845 11846 partition = SDPART(bp->b_edev); 11847 11848 /* #blocks in partition */ 11849 nblocks = un->un_map[partition].dkl_nblk; /* #blocks in partition */ 11850 11851 /* Use of a local variable potentially improves performance slightly */ 11852 partition_offset = un->un_offset[partition]; 11853 11854 /* 11855 * blocknum is the starting block number of the request. At this 11856 * point it is still relative to the start of the minor device. 11857 */ 11858 blocknum = xp->xb_blkno; 11859 11860 /* 11861 * Legacy: If the starting block number is one past the last block 11862 * in the partition, do not set B_ERROR in the buf. 11863 */ 11864 if (blocknum == nblocks) { 11865 goto error_exit; 11866 } 11867 11868 /* 11869 * Confirm that the first block of the request lies within the 11870 * partition limits. Also the requested number of bytes must be 11871 * a multiple of the system block size. 11872 */ 11873 if ((blocknum < 0) || (blocknum >= nblocks) || 11874 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11875 bp->b_flags |= B_ERROR; 11876 goto error_exit; 11877 } 11878 11879 /* 11880 * If the requsted # blocks exceeds the available # blocks, that 11881 * is an overrun of the partition. 11882 */ 11883 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11884 available_nblocks = (size_t)(nblocks - blocknum); 11885 ASSERT(nblocks >= blocknum); 11886 11887 if (requested_nblocks > available_nblocks) { 11888 /* 11889 * Allocate an "overrun" buf to allow the request to proceed 11890 * for the amount of space available in the partition. The 11891 * amount not transferred will be added into the b_resid 11892 * when the operation is complete. The overrun buf 11893 * replaces the original buf here, and the original buf 11894 * is saved inside the overrun buf, for later use. 11895 */ 11896 size_t resid = SD_SYSBLOCKS2BYTES(un, 11897 (offset_t)(requested_nblocks - available_nblocks)); 11898 size_t count = bp->b_bcount - resid; 11899 /* 11900 * Note: count is an unsigned entity thus it'll NEVER 11901 * be less than 0 so ASSERT the original values are 11902 * correct. 11903 */ 11904 ASSERT(bp->b_bcount >= resid); 11905 11906 bp = sd_bioclone_alloc(bp, count, blocknum, 11907 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 11908 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 11909 ASSERT(xp != NULL); 11910 } 11911 11912 /* At this point there should be no residual for this buf. */ 11913 ASSERT(bp->b_resid == 0); 11914 11915 /* Convert the block number to an absolute address. */ 11916 xp->xb_blkno += partition_offset; 11917 11918 SD_NEXT_IOSTART(index, un, bp); 11919 11920 SD_TRACE(SD_LOG_IO_PARTITION, un, 11921 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 11922 11923 return; 11924 11925 error_exit: 11926 bp->b_resid = bp->b_bcount; 11927 SD_BEGIN_IODONE(index, un, bp); 11928 SD_TRACE(SD_LOG_IO_PARTITION, un, 11929 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 11930 } 11931 11932 11933 /* 11934 * Function: sd_mapblockaddr_iodone 11935 * 11936 * Description: Completion-side processing for partition management. 11937 * 11938 * Context: May be called under interrupt context 11939 */ 11940 11941 static void 11942 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 11943 { 11944 /* int partition; */ /* Not used, see below. */ 11945 ASSERT(un != NULL); 11946 ASSERT(bp != NULL); 11947 ASSERT(!mutex_owned(SD_MUTEX(un))); 11948 11949 SD_TRACE(SD_LOG_IO_PARTITION, un, 11950 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 11951 11952 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 11953 /* 11954 * We have an "overrun" buf to deal with... 11955 */ 11956 struct sd_xbuf *xp; 11957 struct buf *obp; /* ptr to the original buf */ 11958 11959 xp = SD_GET_XBUF(bp); 11960 ASSERT(xp != NULL); 11961 11962 /* Retrieve the pointer to the original buf */ 11963 obp = (struct buf *)xp->xb_private; 11964 ASSERT(obp != NULL); 11965 11966 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 11967 bioerror(obp, bp->b_error); 11968 11969 sd_bioclone_free(bp); 11970 11971 /* 11972 * Get back the original buf. 11973 * Note that since the restoration of xb_blkno below 11974 * was removed, the sd_xbuf is not needed. 11975 */ 11976 bp = obp; 11977 /* 11978 * xp = SD_GET_XBUF(bp); 11979 * ASSERT(xp != NULL); 11980 */ 11981 } 11982 11983 /* 11984 * Convert sd->xb_blkno back to a minor-device relative value. 11985 * Note: this has been commented out, as it is not needed in the 11986 * current implementation of the driver (ie, since this function 11987 * is at the top of the layering chains, so the info will be 11988 * discarded) and it is in the "hot" IO path. 11989 * 11990 * partition = getminor(bp->b_edev) & SDPART_MASK; 11991 * xp->xb_blkno -= un->un_offset[partition]; 11992 */ 11993 11994 SD_NEXT_IODONE(index, un, bp); 11995 11996 SD_TRACE(SD_LOG_IO_PARTITION, un, 11997 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 11998 } 11999 12000 12001 /* 12002 * Function: sd_mapblocksize_iostart 12003 * 12004 * Description: Convert between system block size (un->un_sys_blocksize) 12005 * and target block size (un->un_tgt_blocksize). 12006 * 12007 * Context: Can sleep to allocate resources. 12008 * 12009 * Assumptions: A higher layer has already performed any partition validation, 12010 * and converted the xp->xb_blkno to an absolute value relative 12011 * to the start of the device. 12012 * 12013 * It is also assumed that the higher layer has implemented 12014 * an "overrun" mechanism for the case where the request would 12015 * read/write beyond the end of a partition. In this case we 12016 * assume (and ASSERT) that bp->b_resid == 0. 12017 * 12018 * Note: The implementation for this routine assumes the target 12019 * block size remains constant between allocation and transport. 12020 */ 12021 12022 static void 12023 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12024 { 12025 struct sd_mapblocksize_info *bsp; 12026 struct sd_xbuf *xp; 12027 offset_t first_byte; 12028 daddr_t start_block, end_block; 12029 daddr_t request_bytes; 12030 ushort_t is_aligned = FALSE; 12031 12032 ASSERT(un != NULL); 12033 ASSERT(bp != NULL); 12034 ASSERT(!mutex_owned(SD_MUTEX(un))); 12035 ASSERT(bp->b_resid == 0); 12036 12037 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12038 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12039 12040 /* 12041 * For a non-writable CD, a write request is an error 12042 */ 12043 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12044 (un->un_f_mmc_writable_media == FALSE)) { 12045 bioerror(bp, EIO); 12046 bp->b_resid = bp->b_bcount; 12047 SD_BEGIN_IODONE(index, un, bp); 12048 return; 12049 } 12050 12051 /* 12052 * We do not need a shadow buf if the device is using 12053 * un->un_sys_blocksize as its block size or if bcount == 0. 12054 * In this case there is no layer-private data block allocated. 12055 */ 12056 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12057 (bp->b_bcount == 0)) { 12058 goto done; 12059 } 12060 12061 #if defined(__i386) || defined(__amd64) 12062 /* We do not support non-block-aligned transfers for ROD devices */ 12063 ASSERT(!ISROD(un)); 12064 #endif 12065 12066 xp = SD_GET_XBUF(bp); 12067 ASSERT(xp != NULL); 12068 12069 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12070 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12071 un->un_tgt_blocksize, un->un_sys_blocksize); 12072 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12073 "request start block:0x%x\n", xp->xb_blkno); 12074 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12075 "request len:0x%x\n", bp->b_bcount); 12076 12077 /* 12078 * Allocate the layer-private data area for the mapblocksize layer. 12079 * Layers are allowed to use the xp_private member of the sd_xbuf 12080 * struct to store the pointer to their layer-private data block, but 12081 * each layer also has the responsibility of restoring the prior 12082 * contents of xb_private before returning the buf/xbuf to the 12083 * higher layer that sent it. 12084 * 12085 * Here we save the prior contents of xp->xb_private into the 12086 * bsp->mbs_oprivate field of our layer-private data area. This value 12087 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12088 * the layer-private area and returning the buf/xbuf to the layer 12089 * that sent it. 12090 * 12091 * Note that here we use kmem_zalloc for the allocation as there are 12092 * parts of the mapblocksize code that expect certain fields to be 12093 * zero unless explicitly set to a required value. 12094 */ 12095 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12096 bsp->mbs_oprivate = xp->xb_private; 12097 xp->xb_private = bsp; 12098 12099 /* 12100 * This treats the data on the disk (target) as an array of bytes. 12101 * first_byte is the byte offset, from the beginning of the device, 12102 * to the location of the request. This is converted from a 12103 * un->un_sys_blocksize block address to a byte offset, and then back 12104 * to a block address based upon a un->un_tgt_blocksize block size. 12105 * 12106 * xp->xb_blkno should be absolute upon entry into this function, 12107 * but, but it is based upon partitions that use the "system" 12108 * block size. It must be adjusted to reflect the block size of 12109 * the target. 12110 * 12111 * Note that end_block is actually the block that follows the last 12112 * block of the request, but that's what is needed for the computation. 12113 */ 12114 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12115 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12116 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12117 un->un_tgt_blocksize; 12118 12119 /* request_bytes is rounded up to a multiple of the target block size */ 12120 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12121 12122 /* 12123 * See if the starting address of the request and the request 12124 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12125 * then we do not need to allocate a shadow buf to handle the request. 12126 */ 12127 if (((first_byte % un->un_tgt_blocksize) == 0) && 12128 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12129 is_aligned = TRUE; 12130 } 12131 12132 if ((bp->b_flags & B_READ) == 0) { 12133 /* 12134 * Lock the range for a write operation. An aligned request is 12135 * considered a simple write; otherwise the request must be a 12136 * read-modify-write. 12137 */ 12138 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12139 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12140 } 12141 12142 /* 12143 * Alloc a shadow buf if the request is not aligned. Also, this is 12144 * where the READ command is generated for a read-modify-write. (The 12145 * write phase is deferred until after the read completes.) 12146 */ 12147 if (is_aligned == FALSE) { 12148 12149 struct sd_mapblocksize_info *shadow_bsp; 12150 struct sd_xbuf *shadow_xp; 12151 struct buf *shadow_bp; 12152 12153 /* 12154 * Allocate the shadow buf and it associated xbuf. Note that 12155 * after this call the xb_blkno value in both the original 12156 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12157 * same: absolute relative to the start of the device, and 12158 * adjusted for the target block size. The b_blkno in the 12159 * shadow buf will also be set to this value. We should never 12160 * change b_blkno in the original bp however. 12161 * 12162 * Note also that the shadow buf will always need to be a 12163 * READ command, regardless of whether the incoming command 12164 * is a READ or a WRITE. 12165 */ 12166 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12167 xp->xb_blkno, 12168 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12169 12170 shadow_xp = SD_GET_XBUF(shadow_bp); 12171 12172 /* 12173 * Allocate the layer-private data for the shadow buf. 12174 * (No need to preserve xb_private in the shadow xbuf.) 12175 */ 12176 shadow_xp->xb_private = shadow_bsp = 12177 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12178 12179 /* 12180 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12181 * to figure out where the start of the user data is (based upon 12182 * the system block size) in the data returned by the READ 12183 * command (which will be based upon the target blocksize). Note 12184 * that this is only really used if the request is unaligned. 12185 */ 12186 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12187 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12188 ASSERT((bsp->mbs_copy_offset >= 0) && 12189 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12190 12191 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12192 12193 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12194 12195 /* Transfer the wmap (if any) to the shadow buf */ 12196 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12197 bsp->mbs_wmp = NULL; 12198 12199 /* 12200 * The shadow buf goes on from here in place of the 12201 * original buf. 12202 */ 12203 shadow_bsp->mbs_orig_bp = bp; 12204 bp = shadow_bp; 12205 } 12206 12207 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12208 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12209 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12210 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12211 request_bytes); 12212 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12213 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12214 12215 done: 12216 SD_NEXT_IOSTART(index, un, bp); 12217 12218 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12219 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12220 } 12221 12222 12223 /* 12224 * Function: sd_mapblocksize_iodone 12225 * 12226 * Description: Completion side processing for block-size mapping. 12227 * 12228 * Context: May be called under interrupt context 12229 */ 12230 12231 static void 12232 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12233 { 12234 struct sd_mapblocksize_info *bsp; 12235 struct sd_xbuf *xp; 12236 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12237 struct buf *orig_bp; /* ptr to the original buf */ 12238 offset_t shadow_end; 12239 offset_t request_end; 12240 offset_t shadow_start; 12241 ssize_t copy_offset; 12242 size_t copy_length; 12243 size_t shortfall; 12244 uint_t is_write; /* TRUE if this bp is a WRITE */ 12245 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12246 12247 ASSERT(un != NULL); 12248 ASSERT(bp != NULL); 12249 12250 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12251 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12252 12253 /* 12254 * There is no shadow buf or layer-private data if the target is 12255 * using un->un_sys_blocksize as its block size or if bcount == 0. 12256 */ 12257 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12258 (bp->b_bcount == 0)) { 12259 goto exit; 12260 } 12261 12262 xp = SD_GET_XBUF(bp); 12263 ASSERT(xp != NULL); 12264 12265 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12266 bsp = xp->xb_private; 12267 12268 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12269 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12270 12271 if (is_write) { 12272 /* 12273 * For a WRITE request we must free up the block range that 12274 * we have locked up. This holds regardless of whether this is 12275 * an aligned write request or a read-modify-write request. 12276 */ 12277 sd_range_unlock(un, bsp->mbs_wmp); 12278 bsp->mbs_wmp = NULL; 12279 } 12280 12281 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12282 /* 12283 * An aligned read or write command will have no shadow buf; 12284 * there is not much else to do with it. 12285 */ 12286 goto done; 12287 } 12288 12289 orig_bp = bsp->mbs_orig_bp; 12290 ASSERT(orig_bp != NULL); 12291 orig_xp = SD_GET_XBUF(orig_bp); 12292 ASSERT(orig_xp != NULL); 12293 ASSERT(!mutex_owned(SD_MUTEX(un))); 12294 12295 if (!is_write && has_wmap) { 12296 /* 12297 * A READ with a wmap means this is the READ phase of a 12298 * read-modify-write. If an error occurred on the READ then 12299 * we do not proceed with the WRITE phase or copy any data. 12300 * Just release the write maps and return with an error. 12301 */ 12302 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12303 orig_bp->b_resid = orig_bp->b_bcount; 12304 bioerror(orig_bp, bp->b_error); 12305 sd_range_unlock(un, bsp->mbs_wmp); 12306 goto freebuf_done; 12307 } 12308 } 12309 12310 /* 12311 * Here is where we set up to copy the data from the shadow buf 12312 * into the space associated with the original buf. 12313 * 12314 * To deal with the conversion between block sizes, these 12315 * computations treat the data as an array of bytes, with the 12316 * first byte (byte 0) corresponding to the first byte in the 12317 * first block on the disk. 12318 */ 12319 12320 /* 12321 * shadow_start and shadow_len indicate the location and size of 12322 * the data returned with the shadow IO request. 12323 */ 12324 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12325 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12326 12327 /* 12328 * copy_offset gives the offset (in bytes) from the start of the first 12329 * block of the READ request to the beginning of the data. We retrieve 12330 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12331 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12332 * data to be copied (in bytes). 12333 */ 12334 copy_offset = bsp->mbs_copy_offset; 12335 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12336 copy_length = orig_bp->b_bcount; 12337 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12338 12339 /* 12340 * Set up the resid and error fields of orig_bp as appropriate. 12341 */ 12342 if (shadow_end >= request_end) { 12343 /* We got all the requested data; set resid to zero */ 12344 orig_bp->b_resid = 0; 12345 } else { 12346 /* 12347 * We failed to get enough data to fully satisfy the original 12348 * request. Just copy back whatever data we got and set 12349 * up the residual and error code as required. 12350 * 12351 * 'shortfall' is the amount by which the data received with the 12352 * shadow buf has "fallen short" of the requested amount. 12353 */ 12354 shortfall = (size_t)(request_end - shadow_end); 12355 12356 if (shortfall > orig_bp->b_bcount) { 12357 /* 12358 * We did not get enough data to even partially 12359 * fulfill the original request. The residual is 12360 * equal to the amount requested. 12361 */ 12362 orig_bp->b_resid = orig_bp->b_bcount; 12363 } else { 12364 /* 12365 * We did not get all the data that we requested 12366 * from the device, but we will try to return what 12367 * portion we did get. 12368 */ 12369 orig_bp->b_resid = shortfall; 12370 } 12371 ASSERT(copy_length >= orig_bp->b_resid); 12372 copy_length -= orig_bp->b_resid; 12373 } 12374 12375 /* Propagate the error code from the shadow buf to the original buf */ 12376 bioerror(orig_bp, bp->b_error); 12377 12378 if (is_write) { 12379 goto freebuf_done; /* No data copying for a WRITE */ 12380 } 12381 12382 if (has_wmap) { 12383 /* 12384 * This is a READ command from the READ phase of a 12385 * read-modify-write request. We have to copy the data given 12386 * by the user OVER the data returned by the READ command, 12387 * then convert the command from a READ to a WRITE and send 12388 * it back to the target. 12389 */ 12390 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12391 copy_length); 12392 12393 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12394 12395 /* 12396 * Dispatch the WRITE command to the taskq thread, which 12397 * will in turn send the command to the target. When the 12398 * WRITE command completes, we (sd_mapblocksize_iodone()) 12399 * will get called again as part of the iodone chain 12400 * processing for it. Note that we will still be dealing 12401 * with the shadow buf at that point. 12402 */ 12403 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12404 KM_NOSLEEP) != 0) { 12405 /* 12406 * Dispatch was successful so we are done. Return 12407 * without going any higher up the iodone chain. Do 12408 * not free up any layer-private data until after the 12409 * WRITE completes. 12410 */ 12411 return; 12412 } 12413 12414 /* 12415 * Dispatch of the WRITE command failed; set up the error 12416 * condition and send this IO back up the iodone chain. 12417 */ 12418 bioerror(orig_bp, EIO); 12419 orig_bp->b_resid = orig_bp->b_bcount; 12420 12421 } else { 12422 /* 12423 * This is a regular READ request (ie, not a RMW). Copy the 12424 * data from the shadow buf into the original buf. The 12425 * copy_offset compensates for any "misalignment" between the 12426 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12427 * original buf (with its un->un_sys_blocksize blocks). 12428 */ 12429 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12430 copy_length); 12431 } 12432 12433 freebuf_done: 12434 12435 /* 12436 * At this point we still have both the shadow buf AND the original 12437 * buf to deal with, as well as the layer-private data area in each. 12438 * Local variables are as follows: 12439 * 12440 * bp -- points to shadow buf 12441 * xp -- points to xbuf of shadow buf 12442 * bsp -- points to layer-private data area of shadow buf 12443 * orig_bp -- points to original buf 12444 * 12445 * First free the shadow buf and its associated xbuf, then free the 12446 * layer-private data area from the shadow buf. There is no need to 12447 * restore xb_private in the shadow xbuf. 12448 */ 12449 sd_shadow_buf_free(bp); 12450 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12451 12452 /* 12453 * Now update the local variables to point to the original buf, xbuf, 12454 * and layer-private area. 12455 */ 12456 bp = orig_bp; 12457 xp = SD_GET_XBUF(bp); 12458 ASSERT(xp != NULL); 12459 ASSERT(xp == orig_xp); 12460 bsp = xp->xb_private; 12461 ASSERT(bsp != NULL); 12462 12463 done: 12464 /* 12465 * Restore xb_private to whatever it was set to by the next higher 12466 * layer in the chain, then free the layer-private data area. 12467 */ 12468 xp->xb_private = bsp->mbs_oprivate; 12469 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12470 12471 exit: 12472 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12473 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12474 12475 SD_NEXT_IODONE(index, un, bp); 12476 } 12477 12478 12479 /* 12480 * Function: sd_checksum_iostart 12481 * 12482 * Description: A stub function for a layer that's currently not used. 12483 * For now just a placeholder. 12484 * 12485 * Context: Kernel thread context 12486 */ 12487 12488 static void 12489 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12490 { 12491 ASSERT(un != NULL); 12492 ASSERT(bp != NULL); 12493 ASSERT(!mutex_owned(SD_MUTEX(un))); 12494 SD_NEXT_IOSTART(index, un, bp); 12495 } 12496 12497 12498 /* 12499 * Function: sd_checksum_iodone 12500 * 12501 * Description: A stub function for a layer that's currently not used. 12502 * For now just a placeholder. 12503 * 12504 * Context: May be called under interrupt context 12505 */ 12506 12507 static void 12508 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12509 { 12510 ASSERT(un != NULL); 12511 ASSERT(bp != NULL); 12512 ASSERT(!mutex_owned(SD_MUTEX(un))); 12513 SD_NEXT_IODONE(index, un, bp); 12514 } 12515 12516 12517 /* 12518 * Function: sd_checksum_uscsi_iostart 12519 * 12520 * Description: A stub function for a layer that's currently not used. 12521 * For now just a placeholder. 12522 * 12523 * Context: Kernel thread context 12524 */ 12525 12526 static void 12527 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12528 { 12529 ASSERT(un != NULL); 12530 ASSERT(bp != NULL); 12531 ASSERT(!mutex_owned(SD_MUTEX(un))); 12532 SD_NEXT_IOSTART(index, un, bp); 12533 } 12534 12535 12536 /* 12537 * Function: sd_checksum_uscsi_iodone 12538 * 12539 * Description: A stub function for a layer that's currently not used. 12540 * For now just a placeholder. 12541 * 12542 * Context: May be called under interrupt context 12543 */ 12544 12545 static void 12546 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12547 { 12548 ASSERT(un != NULL); 12549 ASSERT(bp != NULL); 12550 ASSERT(!mutex_owned(SD_MUTEX(un))); 12551 SD_NEXT_IODONE(index, un, bp); 12552 } 12553 12554 12555 /* 12556 * Function: sd_pm_iostart 12557 * 12558 * Description: iostart-side routine for Power mangement. 12559 * 12560 * Context: Kernel thread context 12561 */ 12562 12563 static void 12564 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12565 { 12566 ASSERT(un != NULL); 12567 ASSERT(bp != NULL); 12568 ASSERT(!mutex_owned(SD_MUTEX(un))); 12569 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12570 12571 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12572 12573 if (sd_pm_entry(un) != DDI_SUCCESS) { 12574 /* 12575 * Set up to return the failed buf back up the 'iodone' 12576 * side of the calling chain. 12577 */ 12578 bioerror(bp, EIO); 12579 bp->b_resid = bp->b_bcount; 12580 12581 SD_BEGIN_IODONE(index, un, bp); 12582 12583 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12584 return; 12585 } 12586 12587 SD_NEXT_IOSTART(index, un, bp); 12588 12589 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12590 } 12591 12592 12593 /* 12594 * Function: sd_pm_iodone 12595 * 12596 * Description: iodone-side routine for power mangement. 12597 * 12598 * Context: may be called from interrupt context 12599 */ 12600 12601 static void 12602 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12603 { 12604 ASSERT(un != NULL); 12605 ASSERT(bp != NULL); 12606 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12607 12608 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12609 12610 /* 12611 * After attach the following flag is only read, so don't 12612 * take the penalty of acquiring a mutex for it. 12613 */ 12614 if (un->un_f_pm_is_enabled == TRUE) { 12615 sd_pm_exit(un); 12616 } 12617 12618 SD_NEXT_IODONE(index, un, bp); 12619 12620 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12621 } 12622 12623 12624 /* 12625 * Function: sd_core_iostart 12626 * 12627 * Description: Primary driver function for enqueuing buf(9S) structs from 12628 * the system and initiating IO to the target device 12629 * 12630 * Context: Kernel thread context. Can sleep. 12631 * 12632 * Assumptions: - The given xp->xb_blkno is absolute 12633 * (ie, relative to the start of the device). 12634 * - The IO is to be done using the native blocksize of 12635 * the device, as specified in un->un_tgt_blocksize. 12636 */ 12637 /* ARGSUSED */ 12638 static void 12639 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12640 { 12641 struct sd_xbuf *xp; 12642 12643 ASSERT(un != NULL); 12644 ASSERT(bp != NULL); 12645 ASSERT(!mutex_owned(SD_MUTEX(un))); 12646 ASSERT(bp->b_resid == 0); 12647 12648 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12649 12650 xp = SD_GET_XBUF(bp); 12651 ASSERT(xp != NULL); 12652 12653 mutex_enter(SD_MUTEX(un)); 12654 12655 /* 12656 * If we are currently in the failfast state, fail any new IO 12657 * that has B_FAILFAST set, then return. 12658 */ 12659 if ((bp->b_flags & B_FAILFAST) && 12660 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12661 mutex_exit(SD_MUTEX(un)); 12662 bioerror(bp, EIO); 12663 bp->b_resid = bp->b_bcount; 12664 SD_BEGIN_IODONE(index, un, bp); 12665 return; 12666 } 12667 12668 if (SD_IS_DIRECT_PRIORITY(xp)) { 12669 /* 12670 * Priority command -- transport it immediately. 12671 * 12672 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12673 * because all direct priority commands should be associated 12674 * with error recovery actions which we don't want to retry. 12675 */ 12676 sd_start_cmds(un, bp); 12677 } else { 12678 /* 12679 * Normal command -- add it to the wait queue, then start 12680 * transporting commands from the wait queue. 12681 */ 12682 sd_add_buf_to_waitq(un, bp); 12683 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12684 sd_start_cmds(un, NULL); 12685 } 12686 12687 mutex_exit(SD_MUTEX(un)); 12688 12689 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12690 } 12691 12692 12693 /* 12694 * Function: sd_init_cdb_limits 12695 * 12696 * Description: This is to handle scsi_pkt initialization differences 12697 * between the driver platforms. 12698 * 12699 * Legacy behaviors: 12700 * 12701 * If the block number or the sector count exceeds the 12702 * capabilities of a Group 0 command, shift over to a 12703 * Group 1 command. We don't blindly use Group 1 12704 * commands because a) some drives (CDC Wren IVs) get a 12705 * bit confused, and b) there is probably a fair amount 12706 * of speed difference for a target to receive and decode 12707 * a 10 byte command instead of a 6 byte command. 12708 * 12709 * The xfer time difference of 6 vs 10 byte CDBs is 12710 * still significant so this code is still worthwhile. 12711 * 10 byte CDBs are very inefficient with the fas HBA driver 12712 * and older disks. Each CDB byte took 1 usec with some 12713 * popular disks. 12714 * 12715 * Context: Must be called at attach time 12716 */ 12717 12718 static void 12719 sd_init_cdb_limits(struct sd_lun *un) 12720 { 12721 /* 12722 * Use CDB_GROUP1 commands for most devices except for 12723 * parallel SCSI fixed drives in which case we get better 12724 * performance using CDB_GROUP0 commands (where applicable). 12725 */ 12726 un->un_mincdb = SD_CDB_GROUP1; 12727 #if !defined(__fibre) 12728 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12729 !ISREMOVABLE(un)) { 12730 un->un_mincdb = SD_CDB_GROUP0; 12731 } 12732 #endif 12733 12734 /* 12735 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12736 * commands for fixed disks unless we are building for a 32 bit 12737 * kernel. 12738 */ 12739 #ifdef _LP64 12740 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP4; 12741 #else 12742 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP1; 12743 #endif 12744 12745 /* 12746 * x86 systems require the PKT_DMA_PARTIAL flag 12747 */ 12748 #if defined(__x86) 12749 un->un_pkt_flags = PKT_DMA_PARTIAL; 12750 #else 12751 un->un_pkt_flags = 0; 12752 #endif 12753 12754 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12755 ? sizeof (struct scsi_arq_status) : 1); 12756 un->un_cmd_timeout = (ushort_t)sd_io_time; 12757 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12758 } 12759 12760 12761 /* 12762 * Function: sd_initpkt_for_buf 12763 * 12764 * Description: Allocate and initialize for transport a scsi_pkt struct, 12765 * based upon the info specified in the given buf struct. 12766 * 12767 * Assumes the xb_blkno in the request is absolute (ie, 12768 * relative to the start of the device (NOT partition!). 12769 * Also assumes that the request is using the native block 12770 * size of the device (as returned by the READ CAPACITY 12771 * command). 12772 * 12773 * Return Code: SD_PKT_ALLOC_SUCCESS 12774 * SD_PKT_ALLOC_FAILURE 12775 * SD_PKT_ALLOC_FAILURE_NO_DMA 12776 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12777 * 12778 * Context: Kernel thread and may be called from software interrupt context 12779 * as part of a sdrunout callback. This function may not block or 12780 * call routines that block 12781 */ 12782 12783 static int 12784 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12785 { 12786 struct sd_xbuf *xp; 12787 struct scsi_pkt *pktp = NULL; 12788 struct sd_lun *un; 12789 size_t blockcount; 12790 daddr_t startblock; 12791 int rval; 12792 int cmd_flags; 12793 12794 ASSERT(bp != NULL); 12795 ASSERT(pktpp != NULL); 12796 xp = SD_GET_XBUF(bp); 12797 ASSERT(xp != NULL); 12798 un = SD_GET_UN(bp); 12799 ASSERT(un != NULL); 12800 ASSERT(mutex_owned(SD_MUTEX(un))); 12801 ASSERT(bp->b_resid == 0); 12802 12803 SD_TRACE(SD_LOG_IO_CORE, un, 12804 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12805 12806 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12807 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12808 /* 12809 * Already have a scsi_pkt -- just need DMA resources. 12810 * We must recompute the CDB in case the mapping returns 12811 * a nonzero pkt_resid. 12812 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12813 * that is being retried, the unmap/remap of the DMA resouces 12814 * will result in the entire transfer starting over again 12815 * from the very first block. 12816 */ 12817 ASSERT(xp->xb_pktp != NULL); 12818 pktp = xp->xb_pktp; 12819 } else { 12820 pktp = NULL; 12821 } 12822 #endif /* __i386 || __amd64 */ 12823 12824 startblock = xp->xb_blkno; /* Absolute block num. */ 12825 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12826 12827 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12828 12829 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12830 12831 #else 12832 12833 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 12834 12835 #endif 12836 12837 /* 12838 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12839 * call scsi_init_pkt, and build the CDB. 12840 */ 12841 rval = sd_setup_rw_pkt(un, &pktp, bp, 12842 cmd_flags, sdrunout, (caddr_t)un, 12843 startblock, blockcount); 12844 12845 if (rval == 0) { 12846 /* 12847 * Success. 12848 * 12849 * If partial DMA is being used and required for this transfer. 12850 * set it up here. 12851 */ 12852 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12853 (pktp->pkt_resid != 0)) { 12854 12855 /* 12856 * Save the CDB length and pkt_resid for the 12857 * next xfer 12858 */ 12859 xp->xb_dma_resid = pktp->pkt_resid; 12860 12861 /* rezero resid */ 12862 pktp->pkt_resid = 0; 12863 12864 } else { 12865 xp->xb_dma_resid = 0; 12866 } 12867 12868 pktp->pkt_flags = un->un_tagflags; 12869 pktp->pkt_time = un->un_cmd_timeout; 12870 pktp->pkt_comp = sdintr; 12871 12872 pktp->pkt_private = bp; 12873 *pktpp = pktp; 12874 12875 SD_TRACE(SD_LOG_IO_CORE, un, 12876 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12877 12878 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12879 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12880 #endif 12881 12882 return (SD_PKT_ALLOC_SUCCESS); 12883 12884 } 12885 12886 /* 12887 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12888 * from sd_setup_rw_pkt. 12889 */ 12890 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12891 12892 if (rval == SD_PKT_ALLOC_FAILURE) { 12893 *pktpp = NULL; 12894 /* 12895 * Set the driver state to RWAIT to indicate the driver 12896 * is waiting on resource allocations. The driver will not 12897 * suspend, pm_suspend, or detatch while the state is RWAIT. 12898 */ 12899 New_state(un, SD_STATE_RWAIT); 12900 12901 SD_ERROR(SD_LOG_IO_CORE, un, 12902 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 12903 12904 if ((bp->b_flags & B_ERROR) != 0) { 12905 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12906 } 12907 return (SD_PKT_ALLOC_FAILURE); 12908 } else { 12909 /* 12910 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12911 * 12912 * This should never happen. Maybe someone messed with the 12913 * kernel's minphys? 12914 */ 12915 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12916 "Request rejected: too large for CDB: " 12917 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 12918 SD_ERROR(SD_LOG_IO_CORE, un, 12919 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 12920 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12921 12922 } 12923 } 12924 12925 12926 /* 12927 * Function: sd_destroypkt_for_buf 12928 * 12929 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 12930 * 12931 * Context: Kernel thread or interrupt context 12932 */ 12933 12934 static void 12935 sd_destroypkt_for_buf(struct buf *bp) 12936 { 12937 ASSERT(bp != NULL); 12938 ASSERT(SD_GET_UN(bp) != NULL); 12939 12940 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12941 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 12942 12943 ASSERT(SD_GET_PKTP(bp) != NULL); 12944 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12945 12946 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12947 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 12948 } 12949 12950 /* 12951 * Function: sd_setup_rw_pkt 12952 * 12953 * Description: Determines appropriate CDB group for the requested LBA 12954 * and transfer length, calls scsi_init_pkt, and builds 12955 * the CDB. Do not use for partial DMA transfers except 12956 * for the initial transfer since the CDB size must 12957 * remain constant. 12958 * 12959 * Context: Kernel thread and may be called from software interrupt 12960 * context as part of a sdrunout callback. This function may not 12961 * block or call routines that block 12962 */ 12963 12964 12965 int 12966 sd_setup_rw_pkt(struct sd_lun *un, 12967 struct scsi_pkt **pktpp, struct buf *bp, int flags, 12968 int (*callback)(caddr_t), caddr_t callback_arg, 12969 diskaddr_t lba, uint32_t blockcount) 12970 { 12971 struct scsi_pkt *return_pktp; 12972 union scsi_cdb *cdbp; 12973 struct sd_cdbinfo *cp = NULL; 12974 int i; 12975 12976 /* 12977 * See which size CDB to use, based upon the request. 12978 */ 12979 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 12980 12981 /* 12982 * Check lba and block count against sd_cdbtab limits. 12983 * In the partial DMA case, we have to use the same size 12984 * CDB for all the transfers. Check lba + blockcount 12985 * against the max LBA so we know that segment of the 12986 * transfer can use the CDB we select. 12987 */ 12988 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 12989 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 12990 12991 /* 12992 * The command will fit into the CDB type 12993 * specified by sd_cdbtab[i]. 12994 */ 12995 cp = sd_cdbtab + i; 12996 12997 /* 12998 * Call scsi_init_pkt so we can fill in the 12999 * CDB. 13000 */ 13001 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13002 bp, cp->sc_grpcode, un->un_status_len, 0, 13003 flags, callback, callback_arg); 13004 13005 if (return_pktp != NULL) { 13006 13007 /* 13008 * Return new value of pkt 13009 */ 13010 *pktpp = return_pktp; 13011 13012 /* 13013 * To be safe, zero the CDB insuring there is 13014 * no leftover data from a previous command. 13015 */ 13016 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13017 13018 /* 13019 * Handle partial DMA mapping 13020 */ 13021 if (return_pktp->pkt_resid != 0) { 13022 13023 /* 13024 * Not going to xfer as many blocks as 13025 * originally expected 13026 */ 13027 blockcount -= 13028 SD_BYTES2TGTBLOCKS(un, 13029 return_pktp->pkt_resid); 13030 } 13031 13032 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13033 13034 /* 13035 * Set command byte based on the CDB 13036 * type we matched. 13037 */ 13038 cdbp->scc_cmd = cp->sc_grpmask | 13039 ((bp->b_flags & B_READ) ? 13040 SCMD_READ : SCMD_WRITE); 13041 13042 sd_fill_scsi1_lun(un, return_pktp); 13043 13044 /* 13045 * Fill in LBA and length 13046 */ 13047 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13048 (cp->sc_grpcode == CDB_GROUP4) || 13049 (cp->sc_grpcode == CDB_GROUP0) || 13050 (cp->sc_grpcode == CDB_GROUP5)); 13051 13052 if (cp->sc_grpcode == CDB_GROUP1) { 13053 FORMG1ADDR(cdbp, lba); 13054 FORMG1COUNT(cdbp, blockcount); 13055 return (0); 13056 } else if (cp->sc_grpcode == CDB_GROUP4) { 13057 FORMG4LONGADDR(cdbp, lba); 13058 FORMG4COUNT(cdbp, blockcount); 13059 return (0); 13060 } else if (cp->sc_grpcode == CDB_GROUP0) { 13061 FORMG0ADDR(cdbp, lba); 13062 FORMG0COUNT(cdbp, blockcount); 13063 return (0); 13064 } else if (cp->sc_grpcode == CDB_GROUP5) { 13065 FORMG5ADDR(cdbp, lba); 13066 FORMG5COUNT(cdbp, blockcount); 13067 return (0); 13068 } 13069 13070 /* 13071 * It should be impossible to not match one 13072 * of the CDB types above, so we should never 13073 * reach this point. Set the CDB command byte 13074 * to test-unit-ready to avoid writing 13075 * to somewhere we don't intend. 13076 */ 13077 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13078 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13079 } else { 13080 /* 13081 * Couldn't get scsi_pkt 13082 */ 13083 return (SD_PKT_ALLOC_FAILURE); 13084 } 13085 } 13086 } 13087 13088 /* 13089 * None of the available CDB types were suitable. This really 13090 * should never happen: on a 64 bit system we support 13091 * READ16/WRITE16 which will hold an entire 64 bit disk address 13092 * and on a 32 bit system we will refuse to bind to a device 13093 * larger than 2TB so addresses will never be larger than 32 bits. 13094 */ 13095 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13096 } 13097 13098 /* 13099 * Function: sd_setup_next_rw_pkt 13100 * 13101 * Description: Setup packet for partial DMA transfers, except for the 13102 * initial transfer. sd_setup_rw_pkt should be used for 13103 * the initial transfer. 13104 * 13105 * Context: Kernel thread and may be called from interrupt context. 13106 */ 13107 13108 int 13109 sd_setup_next_rw_pkt(struct sd_lun *un, 13110 struct scsi_pkt *pktp, struct buf *bp, 13111 diskaddr_t lba, uint32_t blockcount) 13112 { 13113 uchar_t com; 13114 union scsi_cdb *cdbp; 13115 uchar_t cdb_group_id; 13116 13117 ASSERT(pktp != NULL); 13118 ASSERT(pktp->pkt_cdbp != NULL); 13119 13120 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13121 com = cdbp->scc_cmd; 13122 cdb_group_id = CDB_GROUPID(com); 13123 13124 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13125 (cdb_group_id == CDB_GROUPID_1) || 13126 (cdb_group_id == CDB_GROUPID_4) || 13127 (cdb_group_id == CDB_GROUPID_5)); 13128 13129 /* 13130 * Move pkt to the next portion of the xfer. 13131 * func is NULL_FUNC so we do not have to release 13132 * the disk mutex here. 13133 */ 13134 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13135 NULL_FUNC, NULL) == pktp) { 13136 /* Success. Handle partial DMA */ 13137 if (pktp->pkt_resid != 0) { 13138 blockcount -= 13139 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13140 } 13141 13142 cdbp->scc_cmd = com; 13143 sd_fill_scsi1_lun(un, pktp); 13144 if (cdb_group_id == CDB_GROUPID_1) { 13145 FORMG1ADDR(cdbp, lba); 13146 FORMG1COUNT(cdbp, blockcount); 13147 return (0); 13148 } else if (cdb_group_id == CDB_GROUPID_4) { 13149 FORMG4LONGADDR(cdbp, lba); 13150 FORMG4COUNT(cdbp, blockcount); 13151 return (0); 13152 } else if (cdb_group_id == CDB_GROUPID_0) { 13153 FORMG0ADDR(cdbp, lba); 13154 FORMG0COUNT(cdbp, blockcount); 13155 return (0); 13156 } else if (cdb_group_id == CDB_GROUPID_5) { 13157 FORMG5ADDR(cdbp, lba); 13158 FORMG5COUNT(cdbp, blockcount); 13159 return (0); 13160 } 13161 13162 /* Unreachable */ 13163 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13164 } 13165 13166 /* 13167 * Error setting up next portion of cmd transfer. 13168 * Something is definitely very wrong and this 13169 * should not happen. 13170 */ 13171 return (SD_PKT_ALLOC_FAILURE); 13172 } 13173 13174 /* 13175 * Function: sd_initpkt_for_uscsi 13176 * 13177 * Description: Allocate and initialize for transport a scsi_pkt struct, 13178 * based upon the info specified in the given uscsi_cmd struct. 13179 * 13180 * Return Code: SD_PKT_ALLOC_SUCCESS 13181 * SD_PKT_ALLOC_FAILURE 13182 * SD_PKT_ALLOC_FAILURE_NO_DMA 13183 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13184 * 13185 * Context: Kernel thread and may be called from software interrupt context 13186 * as part of a sdrunout callback. This function may not block or 13187 * call routines that block 13188 */ 13189 13190 static int 13191 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13192 { 13193 struct uscsi_cmd *uscmd; 13194 struct sd_xbuf *xp; 13195 struct scsi_pkt *pktp; 13196 struct sd_lun *un; 13197 uint32_t flags = 0; 13198 13199 ASSERT(bp != NULL); 13200 ASSERT(pktpp != NULL); 13201 xp = SD_GET_XBUF(bp); 13202 ASSERT(xp != NULL); 13203 un = SD_GET_UN(bp); 13204 ASSERT(un != NULL); 13205 ASSERT(mutex_owned(SD_MUTEX(un))); 13206 13207 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13208 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13209 ASSERT(uscmd != NULL); 13210 13211 SD_TRACE(SD_LOG_IO_CORE, un, 13212 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13213 13214 /* Allocate the scsi_pkt for the command. */ 13215 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13216 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13217 sizeof (struct scsi_arq_status), 0, un->un_pkt_flags, 13218 sdrunout, (caddr_t)un); 13219 13220 if (pktp == NULL) { 13221 *pktpp = NULL; 13222 /* 13223 * Set the driver state to RWAIT to indicate the driver 13224 * is waiting on resource allocations. The driver will not 13225 * suspend, pm_suspend, or detatch while the state is RWAIT. 13226 */ 13227 New_state(un, SD_STATE_RWAIT); 13228 13229 SD_ERROR(SD_LOG_IO_CORE, un, 13230 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13231 13232 if ((bp->b_flags & B_ERROR) != 0) { 13233 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13234 } 13235 return (SD_PKT_ALLOC_FAILURE); 13236 } 13237 13238 /* 13239 * We do not do DMA breakup for USCSI commands, so return failure 13240 * here if all the needed DMA resources were not allocated. 13241 */ 13242 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13243 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13244 scsi_destroy_pkt(pktp); 13245 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13246 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13247 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13248 } 13249 13250 /* Init the cdb from the given uscsi struct */ 13251 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13252 uscmd->uscsi_cdb[0], 0, 0, 0); 13253 13254 sd_fill_scsi1_lun(un, pktp); 13255 13256 /* 13257 * Set up the optional USCSI flags. See the uscsi (7I) man page 13258 * for listing of the supported flags. 13259 */ 13260 13261 if (uscmd->uscsi_flags & USCSI_SILENT) { 13262 flags |= FLAG_SILENT; 13263 } 13264 13265 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13266 flags |= FLAG_DIAGNOSE; 13267 } 13268 13269 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13270 flags |= FLAG_ISOLATE; 13271 } 13272 13273 if (un->un_f_is_fibre == FALSE) { 13274 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13275 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13276 } 13277 } 13278 13279 /* 13280 * Set the pkt flags here so we save time later. 13281 * Note: These flags are NOT in the uscsi man page!!! 13282 */ 13283 if (uscmd->uscsi_flags & USCSI_HEAD) { 13284 flags |= FLAG_HEAD; 13285 } 13286 13287 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13288 flags |= FLAG_NOINTR; 13289 } 13290 13291 /* 13292 * For tagged queueing, things get a bit complicated. 13293 * Check first for head of queue and last for ordered queue. 13294 * If neither head nor order, use the default driver tag flags. 13295 */ 13296 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13297 if (uscmd->uscsi_flags & USCSI_HTAG) { 13298 flags |= FLAG_HTAG; 13299 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13300 flags |= FLAG_OTAG; 13301 } else { 13302 flags |= un->un_tagflags & FLAG_TAGMASK; 13303 } 13304 } 13305 13306 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13307 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13308 } 13309 13310 pktp->pkt_flags = flags; 13311 13312 /* Copy the caller's CDB into the pkt... */ 13313 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13314 13315 if (uscmd->uscsi_timeout == 0) { 13316 pktp->pkt_time = un->un_uscsi_timeout; 13317 } else { 13318 pktp->pkt_time = uscmd->uscsi_timeout; 13319 } 13320 13321 /* need it later to identify USCSI request in sdintr */ 13322 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13323 13324 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13325 13326 pktp->pkt_private = bp; 13327 pktp->pkt_comp = sdintr; 13328 *pktpp = pktp; 13329 13330 SD_TRACE(SD_LOG_IO_CORE, un, 13331 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13332 13333 return (SD_PKT_ALLOC_SUCCESS); 13334 } 13335 13336 13337 /* 13338 * Function: sd_destroypkt_for_uscsi 13339 * 13340 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13341 * IOs.. Also saves relevant info into the associated uscsi_cmd 13342 * struct. 13343 * 13344 * Context: May be called under interrupt context 13345 */ 13346 13347 static void 13348 sd_destroypkt_for_uscsi(struct buf *bp) 13349 { 13350 struct uscsi_cmd *uscmd; 13351 struct sd_xbuf *xp; 13352 struct scsi_pkt *pktp; 13353 struct sd_lun *un; 13354 13355 ASSERT(bp != NULL); 13356 xp = SD_GET_XBUF(bp); 13357 ASSERT(xp != NULL); 13358 un = SD_GET_UN(bp); 13359 ASSERT(un != NULL); 13360 ASSERT(!mutex_owned(SD_MUTEX(un))); 13361 pktp = SD_GET_PKTP(bp); 13362 ASSERT(pktp != NULL); 13363 13364 SD_TRACE(SD_LOG_IO_CORE, un, 13365 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13366 13367 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13368 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13369 ASSERT(uscmd != NULL); 13370 13371 /* Save the status and the residual into the uscsi_cmd struct */ 13372 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13373 uscmd->uscsi_resid = bp->b_resid; 13374 13375 /* 13376 * If enabled, copy any saved sense data into the area specified 13377 * by the uscsi command. 13378 */ 13379 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13380 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13381 /* 13382 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13383 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13384 */ 13385 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13386 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13387 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 13388 } 13389 13390 /* We are done with the scsi_pkt; free it now */ 13391 ASSERT(SD_GET_PKTP(bp) != NULL); 13392 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13393 13394 SD_TRACE(SD_LOG_IO_CORE, un, 13395 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13396 } 13397 13398 13399 /* 13400 * Function: sd_bioclone_alloc 13401 * 13402 * Description: Allocate a buf(9S) and init it as per the given buf 13403 * and the various arguments. The associated sd_xbuf 13404 * struct is (nearly) duplicated. The struct buf *bp 13405 * argument is saved in new_xp->xb_private. 13406 * 13407 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13408 * datalen - size of data area for the shadow bp 13409 * blkno - starting LBA 13410 * func - function pointer for b_iodone in the shadow buf. (May 13411 * be NULL if none.) 13412 * 13413 * Return Code: Pointer to allocates buf(9S) struct 13414 * 13415 * Context: Can sleep. 13416 */ 13417 13418 static struct buf * 13419 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13420 daddr_t blkno, int (*func)(struct buf *)) 13421 { 13422 struct sd_lun *un; 13423 struct sd_xbuf *xp; 13424 struct sd_xbuf *new_xp; 13425 struct buf *new_bp; 13426 13427 ASSERT(bp != NULL); 13428 xp = SD_GET_XBUF(bp); 13429 ASSERT(xp != NULL); 13430 un = SD_GET_UN(bp); 13431 ASSERT(un != NULL); 13432 ASSERT(!mutex_owned(SD_MUTEX(un))); 13433 13434 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13435 NULL, KM_SLEEP); 13436 13437 new_bp->b_lblkno = blkno; 13438 13439 /* 13440 * Allocate an xbuf for the shadow bp and copy the contents of the 13441 * original xbuf into it. 13442 */ 13443 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13444 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13445 13446 /* 13447 * The given bp is automatically saved in the xb_private member 13448 * of the new xbuf. Callers are allowed to depend on this. 13449 */ 13450 new_xp->xb_private = bp; 13451 13452 new_bp->b_private = new_xp; 13453 13454 return (new_bp); 13455 } 13456 13457 /* 13458 * Function: sd_shadow_buf_alloc 13459 * 13460 * Description: Allocate a buf(9S) and init it as per the given buf 13461 * and the various arguments. The associated sd_xbuf 13462 * struct is (nearly) duplicated. The struct buf *bp 13463 * argument is saved in new_xp->xb_private. 13464 * 13465 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13466 * datalen - size of data area for the shadow bp 13467 * bflags - B_READ or B_WRITE (pseudo flag) 13468 * blkno - starting LBA 13469 * func - function pointer for b_iodone in the shadow buf. (May 13470 * be NULL if none.) 13471 * 13472 * Return Code: Pointer to allocates buf(9S) struct 13473 * 13474 * Context: Can sleep. 13475 */ 13476 13477 static struct buf * 13478 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13479 daddr_t blkno, int (*func)(struct buf *)) 13480 { 13481 struct sd_lun *un; 13482 struct sd_xbuf *xp; 13483 struct sd_xbuf *new_xp; 13484 struct buf *new_bp; 13485 13486 ASSERT(bp != NULL); 13487 xp = SD_GET_XBUF(bp); 13488 ASSERT(xp != NULL); 13489 un = SD_GET_UN(bp); 13490 ASSERT(un != NULL); 13491 ASSERT(!mutex_owned(SD_MUTEX(un))); 13492 13493 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13494 bp_mapin(bp); 13495 } 13496 13497 bflags &= (B_READ | B_WRITE); 13498 #if defined(__i386) || defined(__amd64) 13499 new_bp = getrbuf(KM_SLEEP); 13500 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13501 new_bp->b_bcount = datalen; 13502 new_bp->b_flags = bp->b_flags | bflags; 13503 #else 13504 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13505 datalen, bflags, SLEEP_FUNC, NULL); 13506 #endif 13507 new_bp->av_forw = NULL; 13508 new_bp->av_back = NULL; 13509 new_bp->b_dev = bp->b_dev; 13510 new_bp->b_blkno = blkno; 13511 new_bp->b_iodone = func; 13512 new_bp->b_edev = bp->b_edev; 13513 new_bp->b_resid = 0; 13514 13515 /* We need to preserve the B_FAILFAST flag */ 13516 if (bp->b_flags & B_FAILFAST) { 13517 new_bp->b_flags |= B_FAILFAST; 13518 } 13519 13520 /* 13521 * Allocate an xbuf for the shadow bp and copy the contents of the 13522 * original xbuf into it. 13523 */ 13524 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13525 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13526 13527 /* Need later to copy data between the shadow buf & original buf! */ 13528 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13529 13530 /* 13531 * The given bp is automatically saved in the xb_private member 13532 * of the new xbuf. Callers are allowed to depend on this. 13533 */ 13534 new_xp->xb_private = bp; 13535 13536 new_bp->b_private = new_xp; 13537 13538 return (new_bp); 13539 } 13540 13541 /* 13542 * Function: sd_bioclone_free 13543 * 13544 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13545 * in the larger than partition operation. 13546 * 13547 * Context: May be called under interrupt context 13548 */ 13549 13550 static void 13551 sd_bioclone_free(struct buf *bp) 13552 { 13553 struct sd_xbuf *xp; 13554 13555 ASSERT(bp != NULL); 13556 xp = SD_GET_XBUF(bp); 13557 ASSERT(xp != NULL); 13558 13559 /* 13560 * Call bp_mapout() before freeing the buf, in case a lower 13561 * layer or HBA had done a bp_mapin(). we must do this here 13562 * as we are the "originator" of the shadow buf. 13563 */ 13564 bp_mapout(bp); 13565 13566 /* 13567 * Null out b_iodone before freeing the bp, to ensure that the driver 13568 * never gets confused by a stale value in this field. (Just a little 13569 * extra defensiveness here.) 13570 */ 13571 bp->b_iodone = NULL; 13572 13573 freerbuf(bp); 13574 13575 kmem_free(xp, sizeof (struct sd_xbuf)); 13576 } 13577 13578 /* 13579 * Function: sd_shadow_buf_free 13580 * 13581 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13582 * 13583 * Context: May be called under interrupt context 13584 */ 13585 13586 static void 13587 sd_shadow_buf_free(struct buf *bp) 13588 { 13589 struct sd_xbuf *xp; 13590 13591 ASSERT(bp != NULL); 13592 xp = SD_GET_XBUF(bp); 13593 ASSERT(xp != NULL); 13594 13595 #if defined(__sparc) 13596 /* 13597 * Call bp_mapout() before freeing the buf, in case a lower 13598 * layer or HBA had done a bp_mapin(). we must do this here 13599 * as we are the "originator" of the shadow buf. 13600 */ 13601 bp_mapout(bp); 13602 #endif 13603 13604 /* 13605 * Null out b_iodone before freeing the bp, to ensure that the driver 13606 * never gets confused by a stale value in this field. (Just a little 13607 * extra defensiveness here.) 13608 */ 13609 bp->b_iodone = NULL; 13610 13611 #if defined(__i386) || defined(__amd64) 13612 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13613 freerbuf(bp); 13614 #else 13615 scsi_free_consistent_buf(bp); 13616 #endif 13617 13618 kmem_free(xp, sizeof (struct sd_xbuf)); 13619 } 13620 13621 13622 /* 13623 * Function: sd_print_transport_rejected_message 13624 * 13625 * Description: This implements the ludicrously complex rules for printing 13626 * a "transport rejected" message. This is to address the 13627 * specific problem of having a flood of this error message 13628 * produced when a failover occurs. 13629 * 13630 * Context: Any. 13631 */ 13632 13633 static void 13634 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13635 int code) 13636 { 13637 ASSERT(un != NULL); 13638 ASSERT(mutex_owned(SD_MUTEX(un))); 13639 ASSERT(xp != NULL); 13640 13641 /* 13642 * Print the "transport rejected" message under the following 13643 * conditions: 13644 * 13645 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13646 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13647 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13648 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13649 * scsi_transport(9F) (which indicates that the target might have 13650 * gone off-line). This uses the un->un_tran_fatal_count 13651 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13652 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13653 * from scsi_transport(). 13654 * 13655 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13656 * the preceeding cases in order for the message to be printed. 13657 */ 13658 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13659 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13660 (code != TRAN_FATAL_ERROR) || 13661 (un->un_tran_fatal_count == 1)) { 13662 switch (code) { 13663 case TRAN_BADPKT: 13664 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13665 "transport rejected bad packet\n"); 13666 break; 13667 case TRAN_FATAL_ERROR: 13668 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13669 "transport rejected fatal error\n"); 13670 break; 13671 default: 13672 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13673 "transport rejected (%d)\n", code); 13674 break; 13675 } 13676 } 13677 } 13678 } 13679 13680 13681 /* 13682 * Function: sd_add_buf_to_waitq 13683 * 13684 * Description: Add the given buf(9S) struct to the wait queue for the 13685 * instance. If sorting is enabled, then the buf is added 13686 * to the queue via an elevator sort algorithm (a la 13687 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13688 * If sorting is not enabled, then the buf is just added 13689 * to the end of the wait queue. 13690 * 13691 * Return Code: void 13692 * 13693 * Context: Does not sleep/block, therefore technically can be called 13694 * from any context. However if sorting is enabled then the 13695 * execution time is indeterminate, and may take long if 13696 * the wait queue grows large. 13697 */ 13698 13699 static void 13700 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13701 { 13702 struct buf *ap; 13703 13704 ASSERT(bp != NULL); 13705 ASSERT(un != NULL); 13706 ASSERT(mutex_owned(SD_MUTEX(un))); 13707 13708 /* If the queue is empty, add the buf as the only entry & return. */ 13709 if (un->un_waitq_headp == NULL) { 13710 ASSERT(un->un_waitq_tailp == NULL); 13711 un->un_waitq_headp = un->un_waitq_tailp = bp; 13712 bp->av_forw = NULL; 13713 return; 13714 } 13715 13716 ASSERT(un->un_waitq_tailp != NULL); 13717 13718 /* 13719 * If sorting is disabled, just add the buf to the tail end of 13720 * the wait queue and return. 13721 */ 13722 if (un->un_f_disksort_disabled) { 13723 un->un_waitq_tailp->av_forw = bp; 13724 un->un_waitq_tailp = bp; 13725 bp->av_forw = NULL; 13726 return; 13727 } 13728 13729 /* 13730 * Sort thru the list of requests currently on the wait queue 13731 * and add the new buf request at the appropriate position. 13732 * 13733 * The un->un_waitq_headp is an activity chain pointer on which 13734 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13735 * first queue holds those requests which are positioned after 13736 * the current SD_GET_BLKNO() (in the first request); the second holds 13737 * requests which came in after their SD_GET_BLKNO() number was passed. 13738 * Thus we implement a one way scan, retracting after reaching 13739 * the end of the drive to the first request on the second 13740 * queue, at which time it becomes the first queue. 13741 * A one-way scan is natural because of the way UNIX read-ahead 13742 * blocks are allocated. 13743 * 13744 * If we lie after the first request, then we must locate the 13745 * second request list and add ourselves to it. 13746 */ 13747 ap = un->un_waitq_headp; 13748 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13749 while (ap->av_forw != NULL) { 13750 /* 13751 * Look for an "inversion" in the (normally 13752 * ascending) block numbers. This indicates 13753 * the start of the second request list. 13754 */ 13755 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13756 /* 13757 * Search the second request list for the 13758 * first request at a larger block number. 13759 * We go before that; however if there is 13760 * no such request, we go at the end. 13761 */ 13762 do { 13763 if (SD_GET_BLKNO(bp) < 13764 SD_GET_BLKNO(ap->av_forw)) { 13765 goto insert; 13766 } 13767 ap = ap->av_forw; 13768 } while (ap->av_forw != NULL); 13769 goto insert; /* after last */ 13770 } 13771 ap = ap->av_forw; 13772 } 13773 13774 /* 13775 * No inversions... we will go after the last, and 13776 * be the first request in the second request list. 13777 */ 13778 goto insert; 13779 } 13780 13781 /* 13782 * Request is at/after the current request... 13783 * sort in the first request list. 13784 */ 13785 while (ap->av_forw != NULL) { 13786 /* 13787 * We want to go after the current request (1) if 13788 * there is an inversion after it (i.e. it is the end 13789 * of the first request list), or (2) if the next 13790 * request is a larger block no. than our request. 13791 */ 13792 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13793 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13794 goto insert; 13795 } 13796 ap = ap->av_forw; 13797 } 13798 13799 /* 13800 * Neither a second list nor a larger request, therefore 13801 * we go at the end of the first list (which is the same 13802 * as the end of the whole schebang). 13803 */ 13804 insert: 13805 bp->av_forw = ap->av_forw; 13806 ap->av_forw = bp; 13807 13808 /* 13809 * If we inserted onto the tail end of the waitq, make sure the 13810 * tail pointer is updated. 13811 */ 13812 if (ap == un->un_waitq_tailp) { 13813 un->un_waitq_tailp = bp; 13814 } 13815 } 13816 13817 13818 /* 13819 * Function: sd_start_cmds 13820 * 13821 * Description: Remove and transport cmds from the driver queues. 13822 * 13823 * Arguments: un - pointer to the unit (soft state) struct for the target. 13824 * 13825 * immed_bp - ptr to a buf to be transported immediately. Only 13826 * the immed_bp is transported; bufs on the waitq are not 13827 * processed and the un_retry_bp is not checked. If immed_bp is 13828 * NULL, then normal queue processing is performed. 13829 * 13830 * Context: May be called from kernel thread context, interrupt context, 13831 * or runout callback context. This function may not block or 13832 * call routines that block. 13833 */ 13834 13835 static void 13836 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13837 { 13838 struct sd_xbuf *xp; 13839 struct buf *bp; 13840 void (*statp)(kstat_io_t *); 13841 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13842 void (*saved_statp)(kstat_io_t *); 13843 #endif 13844 int rval; 13845 13846 ASSERT(un != NULL); 13847 ASSERT(mutex_owned(SD_MUTEX(un))); 13848 ASSERT(un->un_ncmds_in_transport >= 0); 13849 ASSERT(un->un_throttle >= 0); 13850 13851 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13852 13853 do { 13854 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13855 saved_statp = NULL; 13856 #endif 13857 13858 /* 13859 * If we are syncing or dumping, fail the command to 13860 * avoid recursively calling back into scsi_transport(). 13861 */ 13862 if (ddi_in_panic()) { 13863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13864 "sd_start_cmds: panicking\n"); 13865 goto exit; 13866 } 13867 13868 if ((bp = immed_bp) != NULL) { 13869 /* 13870 * We have a bp that must be transported immediately. 13871 * It's OK to transport the immed_bp here without doing 13872 * the throttle limit check because the immed_bp is 13873 * always used in a retry/recovery case. This means 13874 * that we know we are not at the throttle limit by 13875 * virtue of the fact that to get here we must have 13876 * already gotten a command back via sdintr(). This also 13877 * relies on (1) the command on un_retry_bp preventing 13878 * further commands from the waitq from being issued; 13879 * and (2) the code in sd_retry_command checking the 13880 * throttle limit before issuing a delayed or immediate 13881 * retry. This holds even if the throttle limit is 13882 * currently ratcheted down from its maximum value. 13883 */ 13884 statp = kstat_runq_enter; 13885 if (bp == un->un_retry_bp) { 13886 ASSERT((un->un_retry_statp == NULL) || 13887 (un->un_retry_statp == kstat_waitq_enter) || 13888 (un->un_retry_statp == 13889 kstat_runq_back_to_waitq)); 13890 /* 13891 * If the waitq kstat was incremented when 13892 * sd_set_retry_bp() queued this bp for a retry, 13893 * then we must set up statp so that the waitq 13894 * count will get decremented correctly below. 13895 * Also we must clear un->un_retry_statp to 13896 * ensure that we do not act on a stale value 13897 * in this field. 13898 */ 13899 if ((un->un_retry_statp == kstat_waitq_enter) || 13900 (un->un_retry_statp == 13901 kstat_runq_back_to_waitq)) { 13902 statp = kstat_waitq_to_runq; 13903 } 13904 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13905 saved_statp = un->un_retry_statp; 13906 #endif 13907 un->un_retry_statp = NULL; 13908 13909 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13910 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 13911 "un_throttle:%d un_ncmds_in_transport:%d\n", 13912 un, un->un_retry_bp, un->un_throttle, 13913 un->un_ncmds_in_transport); 13914 } else { 13915 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 13916 "processing priority bp:0x%p\n", bp); 13917 } 13918 13919 } else if ((bp = un->un_waitq_headp) != NULL) { 13920 /* 13921 * A command on the waitq is ready to go, but do not 13922 * send it if: 13923 * 13924 * (1) the throttle limit has been reached, or 13925 * (2) a retry is pending, or 13926 * (3) a START_STOP_UNIT callback pending, or 13927 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13928 * command is pending. 13929 * 13930 * For all of these conditions, IO processing will 13931 * restart after the condition is cleared. 13932 */ 13933 if (un->un_ncmds_in_transport >= un->un_throttle) { 13934 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13935 "sd_start_cmds: exiting, " 13936 "throttle limit reached!\n"); 13937 goto exit; 13938 } 13939 if (un->un_retry_bp != NULL) { 13940 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13941 "sd_start_cmds: exiting, retry pending!\n"); 13942 goto exit; 13943 } 13944 if (un->un_startstop_timeid != NULL) { 13945 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13946 "sd_start_cmds: exiting, " 13947 "START_STOP pending!\n"); 13948 goto exit; 13949 } 13950 if (un->un_direct_priority_timeid != NULL) { 13951 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13952 "sd_start_cmds: exiting, " 13953 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 13954 goto exit; 13955 } 13956 13957 /* Dequeue the command */ 13958 un->un_waitq_headp = bp->av_forw; 13959 if (un->un_waitq_headp == NULL) { 13960 un->un_waitq_tailp = NULL; 13961 } 13962 bp->av_forw = NULL; 13963 statp = kstat_waitq_to_runq; 13964 SD_TRACE(SD_LOG_IO_CORE, un, 13965 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 13966 13967 } else { 13968 /* No work to do so bail out now */ 13969 SD_TRACE(SD_LOG_IO_CORE, un, 13970 "sd_start_cmds: no more work, exiting!\n"); 13971 goto exit; 13972 } 13973 13974 /* 13975 * Reset the state to normal. This is the mechanism by which 13976 * the state transitions from either SD_STATE_RWAIT or 13977 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 13978 * If state is SD_STATE_PM_CHANGING then this command is 13979 * part of the device power control and the state must 13980 * not be put back to normal. Doing so would would 13981 * allow new commands to proceed when they shouldn't, 13982 * the device may be going off. 13983 */ 13984 if ((un->un_state != SD_STATE_SUSPENDED) && 13985 (un->un_state != SD_STATE_PM_CHANGING)) { 13986 New_state(un, SD_STATE_NORMAL); 13987 } 13988 13989 xp = SD_GET_XBUF(bp); 13990 ASSERT(xp != NULL); 13991 13992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13993 /* 13994 * Allocate the scsi_pkt if we need one, or attach DMA 13995 * resources if we have a scsi_pkt that needs them. The 13996 * latter should only occur for commands that are being 13997 * retried. 13998 */ 13999 if ((xp->xb_pktp == NULL) || 14000 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14001 #else 14002 if (xp->xb_pktp == NULL) { 14003 #endif 14004 /* 14005 * There is no scsi_pkt allocated for this buf. Call 14006 * the initpkt function to allocate & init one. 14007 * 14008 * The scsi_init_pkt runout callback functionality is 14009 * implemented as follows: 14010 * 14011 * 1) The initpkt function always calls 14012 * scsi_init_pkt(9F) with sdrunout specified as the 14013 * callback routine. 14014 * 2) A successful packet allocation is initialized and 14015 * the I/O is transported. 14016 * 3) The I/O associated with an allocation resource 14017 * failure is left on its queue to be retried via 14018 * runout or the next I/O. 14019 * 4) The I/O associated with a DMA error is removed 14020 * from the queue and failed with EIO. Processing of 14021 * the transport queues is also halted to be 14022 * restarted via runout or the next I/O. 14023 * 5) The I/O associated with a CDB size or packet 14024 * size error is removed from the queue and failed 14025 * with EIO. Processing of the transport queues is 14026 * continued. 14027 * 14028 * Note: there is no interface for canceling a runout 14029 * callback. To prevent the driver from detaching or 14030 * suspending while a runout is pending the driver 14031 * state is set to SD_STATE_RWAIT 14032 * 14033 * Note: using the scsi_init_pkt callback facility can 14034 * result in an I/O request persisting at the head of 14035 * the list which cannot be satisfied even after 14036 * multiple retries. In the future the driver may 14037 * implement some kind of maximum runout count before 14038 * failing an I/O. 14039 * 14040 * Note: the use of funcp below may seem superfluous, 14041 * but it helps warlock figure out the correct 14042 * initpkt function calls (see [s]sd.wlcmd). 14043 */ 14044 struct scsi_pkt *pktp; 14045 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14046 14047 ASSERT(bp != un->un_rqs_bp); 14048 14049 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14050 switch ((*funcp)(bp, &pktp)) { 14051 case SD_PKT_ALLOC_SUCCESS: 14052 xp->xb_pktp = pktp; 14053 SD_TRACE(SD_LOG_IO_CORE, un, 14054 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14055 pktp); 14056 goto got_pkt; 14057 14058 case SD_PKT_ALLOC_FAILURE: 14059 /* 14060 * Temporary (hopefully) resource depletion. 14061 * Since retries and RQS commands always have a 14062 * scsi_pkt allocated, these cases should never 14063 * get here. So the only cases this needs to 14064 * handle is a bp from the waitq (which we put 14065 * back onto the waitq for sdrunout), or a bp 14066 * sent as an immed_bp (which we just fail). 14067 */ 14068 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14069 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14070 14071 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14072 14073 if (bp == immed_bp) { 14074 /* 14075 * If SD_XB_DMA_FREED is clear, then 14076 * this is a failure to allocate a 14077 * scsi_pkt, and we must fail the 14078 * command. 14079 */ 14080 if ((xp->xb_pkt_flags & 14081 SD_XB_DMA_FREED) == 0) { 14082 break; 14083 } 14084 14085 /* 14086 * If this immediate command is NOT our 14087 * un_retry_bp, then we must fail it. 14088 */ 14089 if (bp != un->un_retry_bp) { 14090 break; 14091 } 14092 14093 /* 14094 * We get here if this cmd is our 14095 * un_retry_bp that was DMAFREED, but 14096 * scsi_init_pkt() failed to reallocate 14097 * DMA resources when we attempted to 14098 * retry it. This can happen when an 14099 * mpxio failover is in progress, but 14100 * we don't want to just fail the 14101 * command in this case. 14102 * 14103 * Use timeout(9F) to restart it after 14104 * a 100ms delay. We don't want to 14105 * let sdrunout() restart it, because 14106 * sdrunout() is just supposed to start 14107 * commands that are sitting on the 14108 * wait queue. The un_retry_bp stays 14109 * set until the command completes, but 14110 * sdrunout can be called many times 14111 * before that happens. Since sdrunout 14112 * cannot tell if the un_retry_bp is 14113 * already in the transport, it could 14114 * end up calling scsi_transport() for 14115 * the un_retry_bp multiple times. 14116 * 14117 * Also: don't schedule the callback 14118 * if some other callback is already 14119 * pending. 14120 */ 14121 if (un->un_retry_statp == NULL) { 14122 /* 14123 * restore the kstat pointer to 14124 * keep kstat counts coherent 14125 * when we do retry the command. 14126 */ 14127 un->un_retry_statp = 14128 saved_statp; 14129 } 14130 14131 if ((un->un_startstop_timeid == NULL) && 14132 (un->un_retry_timeid == NULL) && 14133 (un->un_direct_priority_timeid == 14134 NULL)) { 14135 14136 un->un_retry_timeid = 14137 timeout( 14138 sd_start_retry_command, 14139 un, SD_RESTART_TIMEOUT); 14140 } 14141 goto exit; 14142 } 14143 14144 #else 14145 if (bp == immed_bp) { 14146 break; /* Just fail the command */ 14147 } 14148 #endif 14149 14150 /* Add the buf back to the head of the waitq */ 14151 bp->av_forw = un->un_waitq_headp; 14152 un->un_waitq_headp = bp; 14153 if (un->un_waitq_tailp == NULL) { 14154 un->un_waitq_tailp = bp; 14155 } 14156 goto exit; 14157 14158 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14159 /* 14160 * HBA DMA resource failure. Fail the command 14161 * and continue processing of the queues. 14162 */ 14163 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14164 "sd_start_cmds: " 14165 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14166 break; 14167 14168 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14169 /* 14170 * Note:x86: Partial DMA mapping not supported 14171 * for USCSI commands, and all the needed DMA 14172 * resources were not allocated. 14173 */ 14174 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14175 "sd_start_cmds: " 14176 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14177 break; 14178 14179 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14180 /* 14181 * Note:x86: Request cannot fit into CDB based 14182 * on lba and len. 14183 */ 14184 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14185 "sd_start_cmds: " 14186 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14187 break; 14188 14189 default: 14190 /* Should NEVER get here! */ 14191 panic("scsi_initpkt error"); 14192 /*NOTREACHED*/ 14193 } 14194 14195 /* 14196 * Fatal error in allocating a scsi_pkt for this buf. 14197 * Update kstats & return the buf with an error code. 14198 * We must use sd_return_failed_command_no_restart() to 14199 * avoid a recursive call back into sd_start_cmds(). 14200 * However this also means that we must keep processing 14201 * the waitq here in order to avoid stalling. 14202 */ 14203 if (statp == kstat_waitq_to_runq) { 14204 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14205 } 14206 sd_return_failed_command_no_restart(un, bp, EIO); 14207 if (bp == immed_bp) { 14208 /* immed_bp is gone by now, so clear this */ 14209 immed_bp = NULL; 14210 } 14211 continue; 14212 } 14213 got_pkt: 14214 if (bp == immed_bp) { 14215 /* goto the head of the class.... */ 14216 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14217 } 14218 14219 un->un_ncmds_in_transport++; 14220 SD_UPDATE_KSTATS(un, statp, bp); 14221 14222 /* 14223 * Call scsi_transport() to send the command to the target. 14224 * According to SCSA architecture, we must drop the mutex here 14225 * before calling scsi_transport() in order to avoid deadlock. 14226 * Note that the scsi_pkt's completion routine can be executed 14227 * (from interrupt context) even before the call to 14228 * scsi_transport() returns. 14229 */ 14230 SD_TRACE(SD_LOG_IO_CORE, un, 14231 "sd_start_cmds: calling scsi_transport()\n"); 14232 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14233 14234 mutex_exit(SD_MUTEX(un)); 14235 rval = scsi_transport(xp->xb_pktp); 14236 mutex_enter(SD_MUTEX(un)); 14237 14238 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14239 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14240 14241 switch (rval) { 14242 case TRAN_ACCEPT: 14243 /* Clear this with every pkt accepted by the HBA */ 14244 un->un_tran_fatal_count = 0; 14245 break; /* Success; try the next cmd (if any) */ 14246 14247 case TRAN_BUSY: 14248 un->un_ncmds_in_transport--; 14249 ASSERT(un->un_ncmds_in_transport >= 0); 14250 14251 /* 14252 * Don't retry request sense, the sense data 14253 * is lost when another request is sent. 14254 * Free up the rqs buf and retry 14255 * the original failed cmd. Update kstat. 14256 */ 14257 if (bp == un->un_rqs_bp) { 14258 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14259 bp = sd_mark_rqs_idle(un, xp); 14260 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14261 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 14262 kstat_waitq_enter); 14263 goto exit; 14264 } 14265 14266 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14267 /* 14268 * Free the DMA resources for the scsi_pkt. This will 14269 * allow mpxio to select another path the next time 14270 * we call scsi_transport() with this scsi_pkt. 14271 * See sdintr() for the rationalization behind this. 14272 */ 14273 if ((un->un_f_is_fibre == TRUE) && 14274 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14275 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14276 scsi_dmafree(xp->xb_pktp); 14277 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14278 } 14279 #endif 14280 14281 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14282 /* 14283 * Commands that are SD_PATH_DIRECT_PRIORITY 14284 * are for error recovery situations. These do 14285 * not use the normal command waitq, so if they 14286 * get a TRAN_BUSY we cannot put them back onto 14287 * the waitq for later retry. One possible 14288 * problem is that there could already be some 14289 * other command on un_retry_bp that is waiting 14290 * for this one to complete, so we would be 14291 * deadlocked if we put this command back onto 14292 * the waitq for later retry (since un_retry_bp 14293 * must complete before the driver gets back to 14294 * commands on the waitq). 14295 * 14296 * To avoid deadlock we must schedule a callback 14297 * that will restart this command after a set 14298 * interval. This should keep retrying for as 14299 * long as the underlying transport keeps 14300 * returning TRAN_BUSY (just like for other 14301 * commands). Use the same timeout interval as 14302 * for the ordinary TRAN_BUSY retry. 14303 */ 14304 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14305 "sd_start_cmds: scsi_transport() returned " 14306 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14307 14308 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14309 un->un_direct_priority_timeid = 14310 timeout(sd_start_direct_priority_command, 14311 bp, SD_BSY_TIMEOUT / 500); 14312 14313 goto exit; 14314 } 14315 14316 /* 14317 * For TRAN_BUSY, we want to reduce the throttle value, 14318 * unless we are retrying a command. 14319 */ 14320 if (bp != un->un_retry_bp) { 14321 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14322 } 14323 14324 /* 14325 * Set up the bp to be tried again 10 ms later. 14326 * Note:x86: Is there a timeout value in the sd_lun 14327 * for this condition? 14328 */ 14329 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 14330 kstat_runq_back_to_waitq); 14331 goto exit; 14332 14333 case TRAN_FATAL_ERROR: 14334 un->un_tran_fatal_count++; 14335 /* FALLTHRU */ 14336 14337 case TRAN_BADPKT: 14338 default: 14339 un->un_ncmds_in_transport--; 14340 ASSERT(un->un_ncmds_in_transport >= 0); 14341 14342 /* 14343 * If this is our REQUEST SENSE command with a 14344 * transport error, we must get back the pointers 14345 * to the original buf, and mark the REQUEST 14346 * SENSE command as "available". 14347 */ 14348 if (bp == un->un_rqs_bp) { 14349 bp = sd_mark_rqs_idle(un, xp); 14350 xp = SD_GET_XBUF(bp); 14351 } else { 14352 /* 14353 * Legacy behavior: do not update transport 14354 * error count for request sense commands. 14355 */ 14356 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14357 } 14358 14359 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14360 sd_print_transport_rejected_message(un, xp, rval); 14361 14362 /* 14363 * We must use sd_return_failed_command_no_restart() to 14364 * avoid a recursive call back into sd_start_cmds(). 14365 * However this also means that we must keep processing 14366 * the waitq here in order to avoid stalling. 14367 */ 14368 sd_return_failed_command_no_restart(un, bp, EIO); 14369 14370 /* 14371 * Notify any threads waiting in sd_ddi_suspend() that 14372 * a command completion has occurred. 14373 */ 14374 if (un->un_state == SD_STATE_SUSPENDED) { 14375 cv_broadcast(&un->un_disk_busy_cv); 14376 } 14377 14378 if (bp == immed_bp) { 14379 /* immed_bp is gone by now, so clear this */ 14380 immed_bp = NULL; 14381 } 14382 break; 14383 } 14384 14385 } while (immed_bp == NULL); 14386 14387 exit: 14388 ASSERT(mutex_owned(SD_MUTEX(un))); 14389 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14390 } 14391 14392 14393 /* 14394 * Function: sd_return_command 14395 * 14396 * Description: Returns a command to its originator (with or without an 14397 * error). Also starts commands waiting to be transported 14398 * to the target. 14399 * 14400 * Context: May be called from interrupt, kernel, or timeout context 14401 */ 14402 14403 static void 14404 sd_return_command(struct sd_lun *un, struct buf *bp) 14405 { 14406 struct sd_xbuf *xp; 14407 #if defined(__i386) || defined(__amd64) 14408 struct scsi_pkt *pktp; 14409 #endif 14410 14411 ASSERT(bp != NULL); 14412 ASSERT(un != NULL); 14413 ASSERT(mutex_owned(SD_MUTEX(un))); 14414 ASSERT(bp != un->un_rqs_bp); 14415 xp = SD_GET_XBUF(bp); 14416 ASSERT(xp != NULL); 14417 14418 #if defined(__i386) || defined(__amd64) 14419 pktp = SD_GET_PKTP(bp); 14420 #endif 14421 14422 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14423 14424 #if defined(__i386) || defined(__amd64) 14425 /* 14426 * Note:x86: check for the "sdrestart failed" case. 14427 */ 14428 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14429 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14430 (xp->xb_pktp->pkt_resid == 0)) { 14431 14432 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14433 /* 14434 * Successfully set up next portion of cmd 14435 * transfer, try sending it 14436 */ 14437 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14438 NULL, NULL, 0, (clock_t)0, NULL); 14439 sd_start_cmds(un, NULL); 14440 return; /* Note:x86: need a return here? */ 14441 } 14442 } 14443 #endif 14444 14445 /* 14446 * If this is the failfast bp, clear it from un_failfast_bp. This 14447 * can happen if upon being re-tried the failfast bp either 14448 * succeeded or encountered another error (possibly even a different 14449 * error than the one that precipitated the failfast state, but in 14450 * that case it would have had to exhaust retries as well). Regardless, 14451 * this should not occur whenever the instance is in the active 14452 * failfast state. 14453 */ 14454 if (bp == un->un_failfast_bp) { 14455 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14456 un->un_failfast_bp = NULL; 14457 } 14458 14459 /* 14460 * Clear the failfast state upon successful completion of ANY cmd. 14461 */ 14462 if (bp->b_error == 0) { 14463 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14464 } 14465 14466 /* 14467 * This is used if the command was retried one or more times. Show that 14468 * we are done with it, and allow processing of the waitq to resume. 14469 */ 14470 if (bp == un->un_retry_bp) { 14471 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14472 "sd_return_command: un:0x%p: " 14473 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14474 un->un_retry_bp = NULL; 14475 un->un_retry_statp = NULL; 14476 } 14477 14478 SD_UPDATE_RDWR_STATS(un, bp); 14479 SD_UPDATE_PARTITION_STATS(un, bp); 14480 14481 switch (un->un_state) { 14482 case SD_STATE_SUSPENDED: 14483 /* 14484 * Notify any threads waiting in sd_ddi_suspend() that 14485 * a command completion has occurred. 14486 */ 14487 cv_broadcast(&un->un_disk_busy_cv); 14488 break; 14489 default: 14490 sd_start_cmds(un, NULL); 14491 break; 14492 } 14493 14494 /* Return this command up the iodone chain to its originator. */ 14495 mutex_exit(SD_MUTEX(un)); 14496 14497 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14498 xp->xb_pktp = NULL; 14499 14500 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14501 14502 ASSERT(!mutex_owned(SD_MUTEX(un))); 14503 mutex_enter(SD_MUTEX(un)); 14504 14505 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14506 } 14507 14508 14509 /* 14510 * Function: sd_return_failed_command 14511 * 14512 * Description: Command completion when an error occurred. 14513 * 14514 * Context: May be called from interrupt context 14515 */ 14516 14517 static void 14518 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14519 { 14520 ASSERT(bp != NULL); 14521 ASSERT(un != NULL); 14522 ASSERT(mutex_owned(SD_MUTEX(un))); 14523 14524 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14525 "sd_return_failed_command: entry\n"); 14526 14527 /* 14528 * b_resid could already be nonzero due to a partial data 14529 * transfer, so do not change it here. 14530 */ 14531 SD_BIOERROR(bp, errcode); 14532 14533 sd_return_command(un, bp); 14534 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14535 "sd_return_failed_command: exit\n"); 14536 } 14537 14538 14539 /* 14540 * Function: sd_return_failed_command_no_restart 14541 * 14542 * Description: Same as sd_return_failed_command, but ensures that no 14543 * call back into sd_start_cmds will be issued. 14544 * 14545 * Context: May be called from interrupt context 14546 */ 14547 14548 static void 14549 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14550 int errcode) 14551 { 14552 struct sd_xbuf *xp; 14553 14554 ASSERT(bp != NULL); 14555 ASSERT(un != NULL); 14556 ASSERT(mutex_owned(SD_MUTEX(un))); 14557 xp = SD_GET_XBUF(bp); 14558 ASSERT(xp != NULL); 14559 ASSERT(errcode != 0); 14560 14561 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14562 "sd_return_failed_command_no_restart: entry\n"); 14563 14564 /* 14565 * b_resid could already be nonzero due to a partial data 14566 * transfer, so do not change it here. 14567 */ 14568 SD_BIOERROR(bp, errcode); 14569 14570 /* 14571 * If this is the failfast bp, clear it. This can happen if the 14572 * failfast bp encounterd a fatal error when we attempted to 14573 * re-try it (such as a scsi_transport(9F) failure). However 14574 * we should NOT be in an active failfast state if the failfast 14575 * bp is not NULL. 14576 */ 14577 if (bp == un->un_failfast_bp) { 14578 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14579 un->un_failfast_bp = NULL; 14580 } 14581 14582 if (bp == un->un_retry_bp) { 14583 /* 14584 * This command was retried one or more times. Show that we are 14585 * done with it, and allow processing of the waitq to resume. 14586 */ 14587 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14588 "sd_return_failed_command_no_restart: " 14589 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14590 un->un_retry_bp = NULL; 14591 un->un_retry_statp = NULL; 14592 } 14593 14594 SD_UPDATE_RDWR_STATS(un, bp); 14595 SD_UPDATE_PARTITION_STATS(un, bp); 14596 14597 mutex_exit(SD_MUTEX(un)); 14598 14599 if (xp->xb_pktp != NULL) { 14600 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14601 xp->xb_pktp = NULL; 14602 } 14603 14604 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14605 14606 mutex_enter(SD_MUTEX(un)); 14607 14608 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14609 "sd_return_failed_command_no_restart: exit\n"); 14610 } 14611 14612 14613 /* 14614 * Function: sd_retry_command 14615 * 14616 * Description: queue up a command for retry, or (optionally) fail it 14617 * if retry counts are exhausted. 14618 * 14619 * Arguments: un - Pointer to the sd_lun struct for the target. 14620 * 14621 * bp - Pointer to the buf for the command to be retried. 14622 * 14623 * retry_check_flag - Flag to see which (if any) of the retry 14624 * counts should be decremented/checked. If the indicated 14625 * retry count is exhausted, then the command will not be 14626 * retried; it will be failed instead. This should use a 14627 * value equal to one of the following: 14628 * 14629 * SD_RETRIES_NOCHECK 14630 * SD_RESD_RETRIES_STANDARD 14631 * SD_RETRIES_VICTIM 14632 * 14633 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14634 * if the check should be made to see of FLAG_ISOLATE is set 14635 * in the pkt. If FLAG_ISOLATE is set, then the command is 14636 * not retried, it is simply failed. 14637 * 14638 * user_funcp - Ptr to function to call before dispatching the 14639 * command. May be NULL if no action needs to be performed. 14640 * (Primarily intended for printing messages.) 14641 * 14642 * user_arg - Optional argument to be passed along to 14643 * the user_funcp call. 14644 * 14645 * failure_code - errno return code to set in the bp if the 14646 * command is going to be failed. 14647 * 14648 * retry_delay - Retry delay interval in (clock_t) units. May 14649 * be zero which indicates that the retry should be retried 14650 * immediately (ie, without an intervening delay). 14651 * 14652 * statp - Ptr to kstat function to be updated if the command 14653 * is queued for a delayed retry. May be NULL if no kstat 14654 * update is desired. 14655 * 14656 * Context: May be called from interupt context. 14657 */ 14658 14659 static void 14660 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14661 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14662 code), void *user_arg, int failure_code, clock_t retry_delay, 14663 void (*statp)(kstat_io_t *)) 14664 { 14665 struct sd_xbuf *xp; 14666 struct scsi_pkt *pktp; 14667 14668 ASSERT(un != NULL); 14669 ASSERT(mutex_owned(SD_MUTEX(un))); 14670 ASSERT(bp != NULL); 14671 xp = SD_GET_XBUF(bp); 14672 ASSERT(xp != NULL); 14673 pktp = SD_GET_PKTP(bp); 14674 ASSERT(pktp != NULL); 14675 14676 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14677 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14678 14679 /* 14680 * If we are syncing or dumping, fail the command to avoid 14681 * recursively calling back into scsi_transport(). 14682 */ 14683 if (ddi_in_panic()) { 14684 goto fail_command_no_log; 14685 } 14686 14687 /* 14688 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14689 * log an error and fail the command. 14690 */ 14691 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14692 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14693 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14694 sd_dump_memory(un, SD_LOG_IO, "CDB", 14695 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14696 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14697 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14698 goto fail_command; 14699 } 14700 14701 /* 14702 * If we are suspended, then put the command onto head of the 14703 * wait queue since we don't want to start more commands. 14704 */ 14705 switch (un->un_state) { 14706 case SD_STATE_SUSPENDED: 14707 case SD_STATE_DUMPING: 14708 bp->av_forw = un->un_waitq_headp; 14709 un->un_waitq_headp = bp; 14710 if (un->un_waitq_tailp == NULL) { 14711 un->un_waitq_tailp = bp; 14712 } 14713 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14715 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14716 return; 14717 default: 14718 break; 14719 } 14720 14721 /* 14722 * If the caller wants us to check FLAG_ISOLATE, then see if that 14723 * is set; if it is then we do not want to retry the command. 14724 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14725 */ 14726 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14727 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14728 goto fail_command; 14729 } 14730 } 14731 14732 14733 /* 14734 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14735 * command timeout or a selection timeout has occurred. This means 14736 * that we were unable to establish an kind of communication with 14737 * the target, and subsequent retries and/or commands are likely 14738 * to encounter similar results and take a long time to complete. 14739 * 14740 * If this is a failfast error condition, we need to update the 14741 * failfast state, even if this bp does not have B_FAILFAST set. 14742 */ 14743 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14744 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14745 ASSERT(un->un_failfast_bp == NULL); 14746 /* 14747 * If we are already in the active failfast state, and 14748 * another failfast error condition has been detected, 14749 * then fail this command if it has B_FAILFAST set. 14750 * If B_FAILFAST is clear, then maintain the legacy 14751 * behavior of retrying heroically, even tho this will 14752 * take a lot more time to fail the command. 14753 */ 14754 if (bp->b_flags & B_FAILFAST) { 14755 goto fail_command; 14756 } 14757 } else { 14758 /* 14759 * We're not in the active failfast state, but we 14760 * have a failfast error condition, so we must begin 14761 * transition to the next state. We do this regardless 14762 * of whether or not this bp has B_FAILFAST set. 14763 */ 14764 if (un->un_failfast_bp == NULL) { 14765 /* 14766 * This is the first bp to meet a failfast 14767 * condition so save it on un_failfast_bp & 14768 * do normal retry processing. Do not enter 14769 * active failfast state yet. This marks 14770 * entry into the "failfast pending" state. 14771 */ 14772 un->un_failfast_bp = bp; 14773 14774 } else if (un->un_failfast_bp == bp) { 14775 /* 14776 * This is the second time *this* bp has 14777 * encountered a failfast error condition, 14778 * so enter active failfast state & flush 14779 * queues as appropriate. 14780 */ 14781 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14782 un->un_failfast_bp = NULL; 14783 sd_failfast_flushq(un); 14784 14785 /* 14786 * Fail this bp now if B_FAILFAST set; 14787 * otherwise continue with retries. (It would 14788 * be pretty ironic if this bp succeeded on a 14789 * subsequent retry after we just flushed all 14790 * the queues). 14791 */ 14792 if (bp->b_flags & B_FAILFAST) { 14793 goto fail_command; 14794 } 14795 14796 #if !defined(lint) && !defined(__lint) 14797 } else { 14798 /* 14799 * If neither of the preceeding conditionals 14800 * was true, it means that there is some 14801 * *other* bp that has met an inital failfast 14802 * condition and is currently either being 14803 * retried or is waiting to be retried. In 14804 * that case we should perform normal retry 14805 * processing on *this* bp, since there is a 14806 * chance that the current failfast condition 14807 * is transient and recoverable. If that does 14808 * not turn out to be the case, then retries 14809 * will be cleared when the wait queue is 14810 * flushed anyway. 14811 */ 14812 #endif 14813 } 14814 } 14815 } else { 14816 /* 14817 * SD_RETRIES_FAILFAST is clear, which indicates that we 14818 * likely were able to at least establish some level of 14819 * communication with the target and subsequent commands 14820 * and/or retries are likely to get through to the target, 14821 * In this case we want to be aggressive about clearing 14822 * the failfast state. Note that this does not affect 14823 * the "failfast pending" condition. 14824 */ 14825 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14826 } 14827 14828 14829 /* 14830 * Check the specified retry count to see if we can still do 14831 * any retries with this pkt before we should fail it. 14832 */ 14833 switch (retry_check_flag & SD_RETRIES_MASK) { 14834 case SD_RETRIES_VICTIM: 14835 /* 14836 * Check the victim retry count. If exhausted, then fall 14837 * thru & check against the standard retry count. 14838 */ 14839 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14840 /* Increment count & proceed with the retry */ 14841 xp->xb_victim_retry_count++; 14842 break; 14843 } 14844 /* Victim retries exhausted, fall back to std. retries... */ 14845 /* FALLTHRU */ 14846 14847 case SD_RETRIES_STANDARD: 14848 if (xp->xb_retry_count >= un->un_retry_count) { 14849 /* Retries exhausted, fail the command */ 14850 SD_TRACE(SD_LOG_IO_CORE, un, 14851 "sd_retry_command: retries exhausted!\n"); 14852 /* 14853 * update b_resid for failed SCMD_READ & SCMD_WRITE 14854 * commands with nonzero pkt_resid. 14855 */ 14856 if ((pktp->pkt_reason == CMD_CMPLT) && 14857 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14858 (pktp->pkt_resid != 0)) { 14859 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 14860 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 14861 SD_UPDATE_B_RESID(bp, pktp); 14862 } 14863 } 14864 goto fail_command; 14865 } 14866 xp->xb_retry_count++; 14867 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14868 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14869 break; 14870 14871 case SD_RETRIES_UA: 14872 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 14873 /* Retries exhausted, fail the command */ 14874 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14875 "Unit Attention retries exhausted. " 14876 "Check the target.\n"); 14877 goto fail_command; 14878 } 14879 xp->xb_ua_retry_count++; 14880 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14881 "sd_retry_command: retry count:%d\n", 14882 xp->xb_ua_retry_count); 14883 break; 14884 14885 case SD_RETRIES_BUSY: 14886 if (xp->xb_retry_count >= un->un_busy_retry_count) { 14887 /* Retries exhausted, fail the command */ 14888 SD_TRACE(SD_LOG_IO_CORE, un, 14889 "sd_retry_command: retries exhausted!\n"); 14890 goto fail_command; 14891 } 14892 xp->xb_retry_count++; 14893 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14894 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14895 break; 14896 14897 case SD_RETRIES_NOCHECK: 14898 default: 14899 /* No retry count to check. Just proceed with the retry */ 14900 break; 14901 } 14902 14903 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14904 14905 /* 14906 * If we were given a zero timeout, we must attempt to retry the 14907 * command immediately (ie, without a delay). 14908 */ 14909 if (retry_delay == 0) { 14910 /* 14911 * Check some limiting conditions to see if we can actually 14912 * do the immediate retry. If we cannot, then we must 14913 * fall back to queueing up a delayed retry. 14914 */ 14915 if (un->un_ncmds_in_transport >= un->un_throttle) { 14916 /* 14917 * We are at the throttle limit for the target, 14918 * fall back to delayed retry. 14919 */ 14920 retry_delay = SD_BSY_TIMEOUT; 14921 statp = kstat_waitq_enter; 14922 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14923 "sd_retry_command: immed. retry hit throttle!\n"); 14924 } else { 14925 /* 14926 * We're clear to proceed with the immediate retry. 14927 * First call the user-provided function (if any) 14928 */ 14929 if (user_funcp != NULL) { 14930 (*user_funcp)(un, bp, user_arg, 14931 SD_IMMEDIATE_RETRY_ISSUED); 14932 } 14933 14934 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14935 "sd_retry_command: issuing immediate retry\n"); 14936 14937 /* 14938 * Call sd_start_cmds() to transport the command to 14939 * the target. 14940 */ 14941 sd_start_cmds(un, bp); 14942 14943 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14944 "sd_retry_command exit\n"); 14945 return; 14946 } 14947 } 14948 14949 /* 14950 * Set up to retry the command after a delay. 14951 * First call the user-provided function (if any) 14952 */ 14953 if (user_funcp != NULL) { 14954 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 14955 } 14956 14957 sd_set_retry_bp(un, bp, retry_delay, statp); 14958 14959 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14960 return; 14961 14962 fail_command: 14963 14964 if (user_funcp != NULL) { 14965 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 14966 } 14967 14968 fail_command_no_log: 14969 14970 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14971 "sd_retry_command: returning failed command\n"); 14972 14973 sd_return_failed_command(un, bp, failure_code); 14974 14975 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 14976 } 14977 14978 14979 /* 14980 * Function: sd_set_retry_bp 14981 * 14982 * Description: Set up the given bp for retry. 14983 * 14984 * Arguments: un - ptr to associated softstate 14985 * bp - ptr to buf(9S) for the command 14986 * retry_delay - time interval before issuing retry (may be 0) 14987 * statp - optional pointer to kstat function 14988 * 14989 * Context: May be called under interrupt context 14990 */ 14991 14992 static void 14993 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 14994 void (*statp)(kstat_io_t *)) 14995 { 14996 ASSERT(un != NULL); 14997 ASSERT(mutex_owned(SD_MUTEX(un))); 14998 ASSERT(bp != NULL); 14999 15000 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15001 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15002 15003 /* 15004 * Indicate that the command is being retried. This will not allow any 15005 * other commands on the wait queue to be transported to the target 15006 * until this command has been completed (success or failure). The 15007 * "retry command" is not transported to the target until the given 15008 * time delay expires, unless the user specified a 0 retry_delay. 15009 * 15010 * Note: the timeout(9F) callback routine is what actually calls 15011 * sd_start_cmds() to transport the command, with the exception of a 15012 * zero retry_delay. The only current implementor of a zero retry delay 15013 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15014 */ 15015 if (un->un_retry_bp == NULL) { 15016 ASSERT(un->un_retry_statp == NULL); 15017 un->un_retry_bp = bp; 15018 15019 /* 15020 * If the user has not specified a delay the command should 15021 * be queued and no timeout should be scheduled. 15022 */ 15023 if (retry_delay == 0) { 15024 /* 15025 * Save the kstat pointer that will be used in the 15026 * call to SD_UPDATE_KSTATS() below, so that 15027 * sd_start_cmds() can correctly decrement the waitq 15028 * count when it is time to transport this command. 15029 */ 15030 un->un_retry_statp = statp; 15031 goto done; 15032 } 15033 } 15034 15035 if (un->un_retry_bp == bp) { 15036 /* 15037 * Save the kstat pointer that will be used in the call to 15038 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15039 * correctly decrement the waitq count when it is time to 15040 * transport this command. 15041 */ 15042 un->un_retry_statp = statp; 15043 15044 /* 15045 * Schedule a timeout if: 15046 * 1) The user has specified a delay. 15047 * 2) There is not a START_STOP_UNIT callback pending. 15048 * 15049 * If no delay has been specified, then it is up to the caller 15050 * to ensure that IO processing continues without stalling. 15051 * Effectively, this means that the caller will issue the 15052 * required call to sd_start_cmds(). The START_STOP_UNIT 15053 * callback does this after the START STOP UNIT command has 15054 * completed. In either of these cases we should not schedule 15055 * a timeout callback here. Also don't schedule the timeout if 15056 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15057 */ 15058 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15059 (un->un_direct_priority_timeid == NULL)) { 15060 un->un_retry_timeid = 15061 timeout(sd_start_retry_command, un, retry_delay); 15062 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15063 "sd_set_retry_bp: setting timeout: un: 0x%p" 15064 " bp:0x%p un_retry_timeid:0x%p\n", 15065 un, bp, un->un_retry_timeid); 15066 } 15067 } else { 15068 /* 15069 * We only get in here if there is already another command 15070 * waiting to be retried. In this case, we just put the 15071 * given command onto the wait queue, so it can be transported 15072 * after the current retry command has completed. 15073 * 15074 * Also we have to make sure that if the command at the head 15075 * of the wait queue is the un_failfast_bp, that we do not 15076 * put ahead of it any other commands that are to be retried. 15077 */ 15078 if ((un->un_failfast_bp != NULL) && 15079 (un->un_failfast_bp == un->un_waitq_headp)) { 15080 /* 15081 * Enqueue this command AFTER the first command on 15082 * the wait queue (which is also un_failfast_bp). 15083 */ 15084 bp->av_forw = un->un_waitq_headp->av_forw; 15085 un->un_waitq_headp->av_forw = bp; 15086 if (un->un_waitq_headp == un->un_waitq_tailp) { 15087 un->un_waitq_tailp = bp; 15088 } 15089 } else { 15090 /* Enqueue this command at the head of the waitq. */ 15091 bp->av_forw = un->un_waitq_headp; 15092 un->un_waitq_headp = bp; 15093 if (un->un_waitq_tailp == NULL) { 15094 un->un_waitq_tailp = bp; 15095 } 15096 } 15097 15098 if (statp == NULL) { 15099 statp = kstat_waitq_enter; 15100 } 15101 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15102 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15103 } 15104 15105 done: 15106 if (statp != NULL) { 15107 SD_UPDATE_KSTATS(un, statp, bp); 15108 } 15109 15110 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15111 "sd_set_retry_bp: exit un:0x%p\n", un); 15112 } 15113 15114 15115 /* 15116 * Function: sd_start_retry_command 15117 * 15118 * Description: Start the command that has been waiting on the target's 15119 * retry queue. Called from timeout(9F) context after the 15120 * retry delay interval has expired. 15121 * 15122 * Arguments: arg - pointer to associated softstate for the device. 15123 * 15124 * Context: timeout(9F) thread context. May not sleep. 15125 */ 15126 15127 static void 15128 sd_start_retry_command(void *arg) 15129 { 15130 struct sd_lun *un = arg; 15131 15132 ASSERT(un != NULL); 15133 ASSERT(!mutex_owned(SD_MUTEX(un))); 15134 15135 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15136 "sd_start_retry_command: entry\n"); 15137 15138 mutex_enter(SD_MUTEX(un)); 15139 15140 un->un_retry_timeid = NULL; 15141 15142 if (un->un_retry_bp != NULL) { 15143 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15144 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15145 un, un->un_retry_bp); 15146 sd_start_cmds(un, un->un_retry_bp); 15147 } 15148 15149 mutex_exit(SD_MUTEX(un)); 15150 15151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15152 "sd_start_retry_command: exit\n"); 15153 } 15154 15155 15156 /* 15157 * Function: sd_start_direct_priority_command 15158 * 15159 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15160 * received TRAN_BUSY when we called scsi_transport() to send it 15161 * to the underlying HBA. This function is called from timeout(9F) 15162 * context after the delay interval has expired. 15163 * 15164 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15165 * 15166 * Context: timeout(9F) thread context. May not sleep. 15167 */ 15168 15169 static void 15170 sd_start_direct_priority_command(void *arg) 15171 { 15172 struct buf *priority_bp = arg; 15173 struct sd_lun *un; 15174 15175 ASSERT(priority_bp != NULL); 15176 un = SD_GET_UN(priority_bp); 15177 ASSERT(un != NULL); 15178 ASSERT(!mutex_owned(SD_MUTEX(un))); 15179 15180 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15181 "sd_start_direct_priority_command: entry\n"); 15182 15183 mutex_enter(SD_MUTEX(un)); 15184 un->un_direct_priority_timeid = NULL; 15185 sd_start_cmds(un, priority_bp); 15186 mutex_exit(SD_MUTEX(un)); 15187 15188 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15189 "sd_start_direct_priority_command: exit\n"); 15190 } 15191 15192 15193 /* 15194 * Function: sd_send_request_sense_command 15195 * 15196 * Description: Sends a REQUEST SENSE command to the target 15197 * 15198 * Context: May be called from interrupt context. 15199 */ 15200 15201 static void 15202 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15203 struct scsi_pkt *pktp) 15204 { 15205 ASSERT(bp != NULL); 15206 ASSERT(un != NULL); 15207 ASSERT(mutex_owned(SD_MUTEX(un))); 15208 15209 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15210 "entry: buf:0x%p\n", bp); 15211 15212 /* 15213 * If we are syncing or dumping, then fail the command to avoid a 15214 * recursive callback into scsi_transport(). Also fail the command 15215 * if we are suspended (legacy behavior). 15216 */ 15217 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15218 (un->un_state == SD_STATE_DUMPING)) { 15219 sd_return_failed_command(un, bp, EIO); 15220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15221 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15222 return; 15223 } 15224 15225 /* 15226 * Retry the failed command and don't issue the request sense if: 15227 * 1) the sense buf is busy 15228 * 2) we have 1 or more outstanding commands on the target 15229 * (the sense data will be cleared or invalidated any way) 15230 * 15231 * Note: There could be an issue with not checking a retry limit here, 15232 * the problem is determining which retry limit to check. 15233 */ 15234 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15235 /* Don't retry if the command is flagged as non-retryable */ 15236 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15237 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15238 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 15239 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15240 "sd_send_request_sense_command: " 15241 "at full throttle, retrying exit\n"); 15242 } else { 15243 sd_return_failed_command(un, bp, EIO); 15244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15245 "sd_send_request_sense_command: " 15246 "at full throttle, non-retryable exit\n"); 15247 } 15248 return; 15249 } 15250 15251 sd_mark_rqs_busy(un, bp); 15252 sd_start_cmds(un, un->un_rqs_bp); 15253 15254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15255 "sd_send_request_sense_command: exit\n"); 15256 } 15257 15258 15259 /* 15260 * Function: sd_mark_rqs_busy 15261 * 15262 * Description: Indicate that the request sense bp for this instance is 15263 * in use. 15264 * 15265 * Context: May be called under interrupt context 15266 */ 15267 15268 static void 15269 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15270 { 15271 struct sd_xbuf *sense_xp; 15272 15273 ASSERT(un != NULL); 15274 ASSERT(bp != NULL); 15275 ASSERT(mutex_owned(SD_MUTEX(un))); 15276 ASSERT(un->un_sense_isbusy == 0); 15277 15278 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15279 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15280 15281 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15282 ASSERT(sense_xp != NULL); 15283 15284 SD_INFO(SD_LOG_IO, un, 15285 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15286 15287 ASSERT(sense_xp->xb_pktp != NULL); 15288 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15289 == (FLAG_SENSING | FLAG_HEAD)); 15290 15291 un->un_sense_isbusy = 1; 15292 un->un_rqs_bp->b_resid = 0; 15293 sense_xp->xb_pktp->pkt_resid = 0; 15294 sense_xp->xb_pktp->pkt_reason = 0; 15295 15296 /* So we can get back the bp at interrupt time! */ 15297 sense_xp->xb_sense_bp = bp; 15298 15299 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15300 15301 /* 15302 * Mark this buf as awaiting sense data. (This is already set in 15303 * the pkt_flags for the RQS packet.) 15304 */ 15305 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15306 15307 sense_xp->xb_retry_count = 0; 15308 sense_xp->xb_victim_retry_count = 0; 15309 sense_xp->xb_ua_retry_count = 0; 15310 sense_xp->xb_dma_resid = 0; 15311 15312 /* Clean up the fields for auto-request sense */ 15313 sense_xp->xb_sense_status = 0; 15314 sense_xp->xb_sense_state = 0; 15315 sense_xp->xb_sense_resid = 0; 15316 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15317 15318 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15319 } 15320 15321 15322 /* 15323 * Function: sd_mark_rqs_idle 15324 * 15325 * Description: SD_MUTEX must be held continuously through this routine 15326 * to prevent reuse of the rqs struct before the caller can 15327 * complete it's processing. 15328 * 15329 * Return Code: Pointer to the RQS buf 15330 * 15331 * Context: May be called under interrupt context 15332 */ 15333 15334 static struct buf * 15335 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15336 { 15337 struct buf *bp; 15338 ASSERT(un != NULL); 15339 ASSERT(sense_xp != NULL); 15340 ASSERT(mutex_owned(SD_MUTEX(un))); 15341 ASSERT(un->un_sense_isbusy != 0); 15342 15343 un->un_sense_isbusy = 0; 15344 bp = sense_xp->xb_sense_bp; 15345 sense_xp->xb_sense_bp = NULL; 15346 15347 /* This pkt is no longer interested in getting sense data */ 15348 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15349 15350 return (bp); 15351 } 15352 15353 15354 15355 /* 15356 * Function: sd_alloc_rqs 15357 * 15358 * Description: Set up the unit to receive auto request sense data 15359 * 15360 * Return Code: DDI_SUCCESS or DDI_FAILURE 15361 * 15362 * Context: Called under attach(9E) context 15363 */ 15364 15365 static int 15366 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15367 { 15368 struct sd_xbuf *xp; 15369 15370 ASSERT(un != NULL); 15371 ASSERT(!mutex_owned(SD_MUTEX(un))); 15372 ASSERT(un->un_rqs_bp == NULL); 15373 ASSERT(un->un_rqs_pktp == NULL); 15374 15375 /* 15376 * First allocate the required buf and scsi_pkt structs, then set up 15377 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15378 */ 15379 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15380 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15381 if (un->un_rqs_bp == NULL) { 15382 return (DDI_FAILURE); 15383 } 15384 15385 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15386 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15387 15388 if (un->un_rqs_pktp == NULL) { 15389 sd_free_rqs(un); 15390 return (DDI_FAILURE); 15391 } 15392 15393 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15394 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15395 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 15396 15397 sd_fill_scsi1_lun(un, un->un_rqs_pktp); 15398 15399 /* Set up the other needed members in the ARQ scsi_pkt. */ 15400 un->un_rqs_pktp->pkt_comp = sdintr; 15401 un->un_rqs_pktp->pkt_time = sd_io_time; 15402 un->un_rqs_pktp->pkt_flags |= 15403 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15404 15405 /* 15406 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15407 * provide any intpkt, destroypkt routines as we take care of 15408 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15409 */ 15410 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15411 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15412 xp->xb_pktp = un->un_rqs_pktp; 15413 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15414 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15415 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15416 15417 /* 15418 * Save the pointer to the request sense private bp so it can 15419 * be retrieved in sdintr. 15420 */ 15421 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15422 ASSERT(un->un_rqs_bp->b_private == xp); 15423 15424 /* 15425 * See if the HBA supports auto-request sense for the specified 15426 * target/lun. If it does, then try to enable it (if not already 15427 * enabled). 15428 * 15429 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15430 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15431 * return success. However, in both of these cases ARQ is always 15432 * enabled and scsi_ifgetcap will always return true. The best approach 15433 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15434 * 15435 * The 3rd case is the HBA (adp) always return enabled on 15436 * scsi_ifgetgetcap even when it's not enable, the best approach 15437 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15438 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15439 */ 15440 15441 if (un->un_f_is_fibre == TRUE) { 15442 un->un_f_arq_enabled = TRUE; 15443 } else { 15444 #if defined(__i386) || defined(__amd64) 15445 /* 15446 * Circumvent the Adaptec bug, remove this code when 15447 * the bug is fixed 15448 */ 15449 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15450 #endif 15451 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15452 case 0: 15453 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15454 "sd_alloc_rqs: HBA supports ARQ\n"); 15455 /* 15456 * ARQ is supported by this HBA but currently is not 15457 * enabled. Attempt to enable it and if successful then 15458 * mark this instance as ARQ enabled. 15459 */ 15460 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15461 == 1) { 15462 /* Successfully enabled ARQ in the HBA */ 15463 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15464 "sd_alloc_rqs: ARQ enabled\n"); 15465 un->un_f_arq_enabled = TRUE; 15466 } else { 15467 /* Could not enable ARQ in the HBA */ 15468 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15469 "sd_alloc_rqs: failed ARQ enable\n"); 15470 un->un_f_arq_enabled = FALSE; 15471 } 15472 break; 15473 case 1: 15474 /* 15475 * ARQ is supported by this HBA and is already enabled. 15476 * Just mark ARQ as enabled for this instance. 15477 */ 15478 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15479 "sd_alloc_rqs: ARQ already enabled\n"); 15480 un->un_f_arq_enabled = TRUE; 15481 break; 15482 default: 15483 /* 15484 * ARQ is not supported by this HBA; disable it for this 15485 * instance. 15486 */ 15487 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15488 "sd_alloc_rqs: HBA does not support ARQ\n"); 15489 un->un_f_arq_enabled = FALSE; 15490 break; 15491 } 15492 } 15493 15494 return (DDI_SUCCESS); 15495 } 15496 15497 15498 /* 15499 * Function: sd_free_rqs 15500 * 15501 * Description: Cleanup for the pre-instance RQS command. 15502 * 15503 * Context: Kernel thread context 15504 */ 15505 15506 static void 15507 sd_free_rqs(struct sd_lun *un) 15508 { 15509 ASSERT(un != NULL); 15510 15511 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15512 15513 /* 15514 * If consistent memory is bound to a scsi_pkt, the pkt 15515 * has to be destroyed *before* freeing the consistent memory. 15516 * Don't change the sequence of this operations. 15517 * scsi_destroy_pkt() might access memory, which isn't allowed, 15518 * after it was freed in scsi_free_consistent_buf(). 15519 */ 15520 if (un->un_rqs_pktp != NULL) { 15521 scsi_destroy_pkt(un->un_rqs_pktp); 15522 un->un_rqs_pktp = NULL; 15523 } 15524 15525 if (un->un_rqs_bp != NULL) { 15526 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 15527 scsi_free_consistent_buf(un->un_rqs_bp); 15528 un->un_rqs_bp = NULL; 15529 } 15530 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15531 } 15532 15533 15534 15535 /* 15536 * Function: sd_reduce_throttle 15537 * 15538 * Description: Reduces the maximun # of outstanding commands on a 15539 * target to the current number of outstanding commands. 15540 * Queues a tiemout(9F) callback to restore the limit 15541 * after a specified interval has elapsed. 15542 * Typically used when we get a TRAN_BUSY return code 15543 * back from scsi_transport(). 15544 * 15545 * Arguments: un - ptr to the sd_lun softstate struct 15546 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15547 * 15548 * Context: May be called from interrupt context 15549 */ 15550 15551 static void 15552 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15553 { 15554 ASSERT(un != NULL); 15555 ASSERT(mutex_owned(SD_MUTEX(un))); 15556 ASSERT(un->un_ncmds_in_transport >= 0); 15557 15558 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15559 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15560 un, un->un_throttle, un->un_ncmds_in_transport); 15561 15562 if (un->un_throttle > 1) { 15563 if (un->un_f_use_adaptive_throttle == TRUE) { 15564 switch (throttle_type) { 15565 case SD_THROTTLE_TRAN_BUSY: 15566 if (un->un_busy_throttle == 0) { 15567 un->un_busy_throttle = un->un_throttle; 15568 } 15569 break; 15570 case SD_THROTTLE_QFULL: 15571 un->un_busy_throttle = 0; 15572 break; 15573 default: 15574 ASSERT(FALSE); 15575 } 15576 15577 if (un->un_ncmds_in_transport > 0) { 15578 un->un_throttle = un->un_ncmds_in_transport; 15579 } 15580 } else { 15581 if (un->un_ncmds_in_transport == 0) { 15582 un->un_throttle = 1; 15583 } else { 15584 un->un_throttle = un->un_ncmds_in_transport; 15585 } 15586 } 15587 } 15588 15589 /* Reschedule the timeout if none is currently active */ 15590 if (un->un_reset_throttle_timeid == NULL) { 15591 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15592 un, sd_reset_throttle_timeout); 15593 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15594 "sd_reduce_throttle: timeout scheduled!\n"); 15595 } 15596 15597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15598 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15599 } 15600 15601 15602 15603 /* 15604 * Function: sd_restore_throttle 15605 * 15606 * Description: Callback function for timeout(9F). Resets the current 15607 * value of un->un_throttle to its default. 15608 * 15609 * Arguments: arg - pointer to associated softstate for the device. 15610 * 15611 * Context: May be called from interrupt context 15612 */ 15613 15614 static void 15615 sd_restore_throttle(void *arg) 15616 { 15617 struct sd_lun *un = arg; 15618 15619 ASSERT(un != NULL); 15620 ASSERT(!mutex_owned(SD_MUTEX(un))); 15621 15622 mutex_enter(SD_MUTEX(un)); 15623 15624 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15625 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15626 15627 un->un_reset_throttle_timeid = NULL; 15628 15629 if (un->un_f_use_adaptive_throttle == TRUE) { 15630 /* 15631 * If un_busy_throttle is nonzero, then it contains the 15632 * value that un_throttle was when we got a TRAN_BUSY back 15633 * from scsi_transport(). We want to revert back to this 15634 * value. 15635 */ 15636 if (un->un_busy_throttle > 0) { 15637 un->un_throttle = un->un_busy_throttle; 15638 un->un_busy_throttle = 0; 15639 } 15640 15641 /* 15642 * If un_throttle has fallen below the low-water mark, we 15643 * restore the maximum value here (and allow it to ratchet 15644 * down again if necessary). 15645 */ 15646 if (un->un_throttle < un->un_min_throttle) { 15647 un->un_throttle = un->un_saved_throttle; 15648 } 15649 } else { 15650 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15651 "restoring limit from 0x%x to 0x%x\n", 15652 un->un_throttle, un->un_saved_throttle); 15653 un->un_throttle = un->un_saved_throttle; 15654 } 15655 15656 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15657 "sd_restore_throttle: calling sd_start_cmds!\n"); 15658 15659 sd_start_cmds(un, NULL); 15660 15661 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15662 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15663 un, un->un_throttle); 15664 15665 mutex_exit(SD_MUTEX(un)); 15666 15667 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15668 } 15669 15670 /* 15671 * Function: sdrunout 15672 * 15673 * Description: Callback routine for scsi_init_pkt when a resource allocation 15674 * fails. 15675 * 15676 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15677 * soft state instance. 15678 * 15679 * Return Code: The scsi_init_pkt routine allows for the callback function to 15680 * return a 0 indicating the callback should be rescheduled or a 1 15681 * indicating not to reschedule. This routine always returns 1 15682 * because the driver always provides a callback function to 15683 * scsi_init_pkt. This results in a callback always being scheduled 15684 * (via the scsi_init_pkt callback implementation) if a resource 15685 * failure occurs. 15686 * 15687 * Context: This callback function may not block or call routines that block 15688 * 15689 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15690 * request persisting at the head of the list which cannot be 15691 * satisfied even after multiple retries. In the future the driver 15692 * may implement some time of maximum runout count before failing 15693 * an I/O. 15694 */ 15695 15696 static int 15697 sdrunout(caddr_t arg) 15698 { 15699 struct sd_lun *un = (struct sd_lun *)arg; 15700 15701 ASSERT(un != NULL); 15702 ASSERT(!mutex_owned(SD_MUTEX(un))); 15703 15704 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15705 15706 mutex_enter(SD_MUTEX(un)); 15707 sd_start_cmds(un, NULL); 15708 mutex_exit(SD_MUTEX(un)); 15709 /* 15710 * This callback routine always returns 1 (i.e. do not reschedule) 15711 * because we always specify sdrunout as the callback handler for 15712 * scsi_init_pkt inside the call to sd_start_cmds. 15713 */ 15714 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15715 return (1); 15716 } 15717 15718 15719 /* 15720 * Function: sdintr 15721 * 15722 * Description: Completion callback routine for scsi_pkt(9S) structs 15723 * sent to the HBA driver via scsi_transport(9F). 15724 * 15725 * Context: Interrupt context 15726 */ 15727 15728 static void 15729 sdintr(struct scsi_pkt *pktp) 15730 { 15731 struct buf *bp; 15732 struct sd_xbuf *xp; 15733 struct sd_lun *un; 15734 15735 ASSERT(pktp != NULL); 15736 bp = (struct buf *)pktp->pkt_private; 15737 ASSERT(bp != NULL); 15738 xp = SD_GET_XBUF(bp); 15739 ASSERT(xp != NULL); 15740 ASSERT(xp->xb_pktp != NULL); 15741 un = SD_GET_UN(bp); 15742 ASSERT(un != NULL); 15743 ASSERT(!mutex_owned(SD_MUTEX(un))); 15744 15745 #ifdef SD_FAULT_INJECTION 15746 15747 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15748 /* SD FaultInjection */ 15749 sd_faultinjection(pktp); 15750 15751 #endif /* SD_FAULT_INJECTION */ 15752 15753 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15754 " xp:0x%p, un:0x%p\n", bp, xp, un); 15755 15756 mutex_enter(SD_MUTEX(un)); 15757 15758 /* Reduce the count of the #commands currently in transport */ 15759 un->un_ncmds_in_transport--; 15760 ASSERT(un->un_ncmds_in_transport >= 0); 15761 15762 /* Increment counter to indicate that the callback routine is active */ 15763 un->un_in_callback++; 15764 15765 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15766 15767 #ifdef SDDEBUG 15768 if (bp == un->un_retry_bp) { 15769 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15770 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15771 un, un->un_retry_bp, un->un_ncmds_in_transport); 15772 } 15773 #endif 15774 15775 /* 15776 * If pkt_reason is CMD_DEV_GONE, just fail the command 15777 */ 15778 if (pktp->pkt_reason == CMD_DEV_GONE) { 15779 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15780 "Device is gone\n"); 15781 sd_return_failed_command(un, bp, EIO); 15782 goto exit; 15783 } 15784 15785 /* 15786 * First see if the pkt has auto-request sense data with it.... 15787 * Look at the packet state first so we don't take a performance 15788 * hit looking at the arq enabled flag unless absolutely necessary. 15789 */ 15790 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15791 (un->un_f_arq_enabled == TRUE)) { 15792 /* 15793 * The HBA did an auto request sense for this command so check 15794 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15795 * driver command that should not be retried. 15796 */ 15797 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15798 /* 15799 * Save the relevant sense info into the xp for the 15800 * original cmd. 15801 */ 15802 struct scsi_arq_status *asp; 15803 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15804 xp->xb_sense_status = 15805 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15806 xp->xb_sense_state = asp->sts_rqpkt_state; 15807 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15808 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15809 min(sizeof (struct scsi_extended_sense), 15810 SENSE_LENGTH)); 15811 15812 /* fail the command */ 15813 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15814 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15815 sd_return_failed_command(un, bp, EIO); 15816 goto exit; 15817 } 15818 15819 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15820 /* 15821 * We want to either retry or fail this command, so free 15822 * the DMA resources here. If we retry the command then 15823 * the DMA resources will be reallocated in sd_start_cmds(). 15824 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15825 * causes the *entire* transfer to start over again from the 15826 * beginning of the request, even for PARTIAL chunks that 15827 * have already transferred successfully. 15828 */ 15829 if ((un->un_f_is_fibre == TRUE) && 15830 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15831 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15832 scsi_dmafree(pktp); 15833 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15834 } 15835 #endif 15836 15837 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15838 "sdintr: arq done, sd_handle_auto_request_sense\n"); 15839 15840 sd_handle_auto_request_sense(un, bp, xp, pktp); 15841 goto exit; 15842 } 15843 15844 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15845 if (pktp->pkt_flags & FLAG_SENSING) { 15846 /* This pktp is from the unit's REQUEST_SENSE command */ 15847 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15848 "sdintr: sd_handle_request_sense\n"); 15849 sd_handle_request_sense(un, bp, xp, pktp); 15850 goto exit; 15851 } 15852 15853 /* 15854 * Check to see if the command successfully completed as requested; 15855 * this is the most common case (and also the hot performance path). 15856 * 15857 * Requirements for successful completion are: 15858 * pkt_reason is CMD_CMPLT and packet status is status good. 15859 * In addition: 15860 * - A residual of zero indicates successful completion no matter what 15861 * the command is. 15862 * - If the residual is not zero and the command is not a read or 15863 * write, then it's still defined as successful completion. In other 15864 * words, if the command is a read or write the residual must be 15865 * zero for successful completion. 15866 * - If the residual is not zero and the command is a read or 15867 * write, and it's a USCSICMD, then it's still defined as 15868 * successful completion. 15869 */ 15870 if ((pktp->pkt_reason == CMD_CMPLT) && 15871 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15872 15873 /* 15874 * Since this command is returned with a good status, we 15875 * can reset the count for Sonoma failover. 15876 */ 15877 un->un_sonoma_failure_count = 0; 15878 15879 /* 15880 * Return all USCSI commands on good status 15881 */ 15882 if (pktp->pkt_resid == 0) { 15883 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15884 "sdintr: returning command for resid == 0\n"); 15885 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15886 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15887 SD_UPDATE_B_RESID(bp, pktp); 15888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15889 "sdintr: returning command for resid != 0\n"); 15890 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15891 SD_UPDATE_B_RESID(bp, pktp); 15892 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15893 "sdintr: returning uscsi command\n"); 15894 } else { 15895 goto not_successful; 15896 } 15897 sd_return_command(un, bp); 15898 15899 /* 15900 * Decrement counter to indicate that the callback routine 15901 * is done. 15902 */ 15903 un->un_in_callback--; 15904 ASSERT(un->un_in_callback >= 0); 15905 mutex_exit(SD_MUTEX(un)); 15906 15907 return; 15908 } 15909 15910 not_successful: 15911 15912 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15913 /* 15914 * The following is based upon knowledge of the underlying transport 15915 * and its use of DMA resources. This code should be removed when 15916 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15917 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15918 * and sd_start_cmds(). 15919 * 15920 * Free any DMA resources associated with this command if there 15921 * is a chance it could be retried or enqueued for later retry. 15922 * If we keep the DMA binding then mpxio cannot reissue the 15923 * command on another path whenever a path failure occurs. 15924 * 15925 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15926 * causes the *entire* transfer to start over again from the 15927 * beginning of the request, even for PARTIAL chunks that 15928 * have already transferred successfully. 15929 * 15930 * This is only done for non-uscsi commands (and also skipped for the 15931 * driver's internal RQS command). Also just do this for Fibre Channel 15932 * devices as these are the only ones that support mpxio. 15933 */ 15934 if ((un->un_f_is_fibre == TRUE) && 15935 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15936 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15937 scsi_dmafree(pktp); 15938 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15939 } 15940 #endif 15941 15942 /* 15943 * The command did not successfully complete as requested so check 15944 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15945 * driver command that should not be retried so just return. If 15946 * FLAG_DIAGNOSE is not set the error will be processed below. 15947 */ 15948 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15949 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15950 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 15951 /* 15952 * Issue a request sense if a check condition caused the error 15953 * (we handle the auto request sense case above), otherwise 15954 * just fail the command. 15955 */ 15956 if ((pktp->pkt_reason == CMD_CMPLT) && 15957 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 15958 sd_send_request_sense_command(un, bp, pktp); 15959 } else { 15960 sd_return_failed_command(un, bp, EIO); 15961 } 15962 goto exit; 15963 } 15964 15965 /* 15966 * The command did not successfully complete as requested so process 15967 * the error, retry, and/or attempt recovery. 15968 */ 15969 switch (pktp->pkt_reason) { 15970 case CMD_CMPLT: 15971 switch (SD_GET_PKT_STATUS(pktp)) { 15972 case STATUS_GOOD: 15973 /* 15974 * The command completed successfully with a non-zero 15975 * residual 15976 */ 15977 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15978 "sdintr: STATUS_GOOD \n"); 15979 sd_pkt_status_good(un, bp, xp, pktp); 15980 break; 15981 15982 case STATUS_CHECK: 15983 case STATUS_TERMINATED: 15984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15985 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 15986 sd_pkt_status_check_condition(un, bp, xp, pktp); 15987 break; 15988 15989 case STATUS_BUSY: 15990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15991 "sdintr: STATUS_BUSY\n"); 15992 sd_pkt_status_busy(un, bp, xp, pktp); 15993 break; 15994 15995 case STATUS_RESERVATION_CONFLICT: 15996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15997 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 15998 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 15999 break; 16000 16001 case STATUS_QFULL: 16002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16003 "sdintr: STATUS_QFULL\n"); 16004 sd_pkt_status_qfull(un, bp, xp, pktp); 16005 break; 16006 16007 case STATUS_MET: 16008 case STATUS_INTERMEDIATE: 16009 case STATUS_SCSI2: 16010 case STATUS_INTERMEDIATE_MET: 16011 case STATUS_ACA_ACTIVE: 16012 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16013 "Unexpected SCSI status received: 0x%x\n", 16014 SD_GET_PKT_STATUS(pktp)); 16015 sd_return_failed_command(un, bp, EIO); 16016 break; 16017 16018 default: 16019 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16020 "Invalid SCSI status received: 0x%x\n", 16021 SD_GET_PKT_STATUS(pktp)); 16022 sd_return_failed_command(un, bp, EIO); 16023 break; 16024 16025 } 16026 break; 16027 16028 case CMD_INCOMPLETE: 16029 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16030 "sdintr: CMD_INCOMPLETE\n"); 16031 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16032 break; 16033 case CMD_TRAN_ERR: 16034 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16035 "sdintr: CMD_TRAN_ERR\n"); 16036 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16037 break; 16038 case CMD_RESET: 16039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16040 "sdintr: CMD_RESET \n"); 16041 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16042 break; 16043 case CMD_ABORTED: 16044 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16045 "sdintr: CMD_ABORTED \n"); 16046 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16047 break; 16048 case CMD_TIMEOUT: 16049 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16050 "sdintr: CMD_TIMEOUT\n"); 16051 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16052 break; 16053 case CMD_UNX_BUS_FREE: 16054 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16055 "sdintr: CMD_UNX_BUS_FREE \n"); 16056 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16057 break; 16058 case CMD_TAG_REJECT: 16059 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16060 "sdintr: CMD_TAG_REJECT\n"); 16061 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16062 break; 16063 default: 16064 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16065 "sdintr: default\n"); 16066 sd_pkt_reason_default(un, bp, xp, pktp); 16067 break; 16068 } 16069 16070 exit: 16071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16072 16073 /* Decrement counter to indicate that the callback routine is done. */ 16074 un->un_in_callback--; 16075 ASSERT(un->un_in_callback >= 0); 16076 16077 /* 16078 * At this point, the pkt has been dispatched, ie, it is either 16079 * being re-tried or has been returned to its caller and should 16080 * not be referenced. 16081 */ 16082 16083 mutex_exit(SD_MUTEX(un)); 16084 } 16085 16086 16087 /* 16088 * Function: sd_print_incomplete_msg 16089 * 16090 * Description: Prints the error message for a CMD_INCOMPLETE error. 16091 * 16092 * Arguments: un - ptr to associated softstate for the device. 16093 * bp - ptr to the buf(9S) for the command. 16094 * arg - message string ptr 16095 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16096 * or SD_NO_RETRY_ISSUED. 16097 * 16098 * Context: May be called under interrupt context 16099 */ 16100 16101 static void 16102 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16103 { 16104 struct scsi_pkt *pktp; 16105 char *msgp; 16106 char *cmdp = arg; 16107 16108 ASSERT(un != NULL); 16109 ASSERT(mutex_owned(SD_MUTEX(un))); 16110 ASSERT(bp != NULL); 16111 ASSERT(arg != NULL); 16112 pktp = SD_GET_PKTP(bp); 16113 ASSERT(pktp != NULL); 16114 16115 switch (code) { 16116 case SD_DELAYED_RETRY_ISSUED: 16117 case SD_IMMEDIATE_RETRY_ISSUED: 16118 msgp = "retrying"; 16119 break; 16120 case SD_NO_RETRY_ISSUED: 16121 default: 16122 msgp = "giving up"; 16123 break; 16124 } 16125 16126 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16127 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16128 "incomplete %s- %s\n", cmdp, msgp); 16129 } 16130 } 16131 16132 16133 16134 /* 16135 * Function: sd_pkt_status_good 16136 * 16137 * Description: Processing for a STATUS_GOOD code in pkt_status. 16138 * 16139 * Context: May be called under interrupt context 16140 */ 16141 16142 static void 16143 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16144 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16145 { 16146 char *cmdp; 16147 16148 ASSERT(un != NULL); 16149 ASSERT(mutex_owned(SD_MUTEX(un))); 16150 ASSERT(bp != NULL); 16151 ASSERT(xp != NULL); 16152 ASSERT(pktp != NULL); 16153 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16154 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16155 ASSERT(pktp->pkt_resid != 0); 16156 16157 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16158 16159 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16160 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16161 case SCMD_READ: 16162 cmdp = "read"; 16163 break; 16164 case SCMD_WRITE: 16165 cmdp = "write"; 16166 break; 16167 default: 16168 SD_UPDATE_B_RESID(bp, pktp); 16169 sd_return_command(un, bp); 16170 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16171 return; 16172 } 16173 16174 /* 16175 * See if we can retry the read/write, preferrably immediately. 16176 * If retries are exhaused, then sd_retry_command() will update 16177 * the b_resid count. 16178 */ 16179 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16180 cmdp, EIO, (clock_t)0, NULL); 16181 16182 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16183 } 16184 16185 16186 16187 16188 16189 /* 16190 * Function: sd_handle_request_sense 16191 * 16192 * Description: Processing for non-auto Request Sense command. 16193 * 16194 * Arguments: un - ptr to associated softstate 16195 * sense_bp - ptr to buf(9S) for the RQS command 16196 * sense_xp - ptr to the sd_xbuf for the RQS command 16197 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16198 * 16199 * Context: May be called under interrupt context 16200 */ 16201 16202 static void 16203 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16204 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16205 { 16206 struct buf *cmd_bp; /* buf for the original command */ 16207 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16208 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16209 16210 ASSERT(un != NULL); 16211 ASSERT(mutex_owned(SD_MUTEX(un))); 16212 ASSERT(sense_bp != NULL); 16213 ASSERT(sense_xp != NULL); 16214 ASSERT(sense_pktp != NULL); 16215 16216 /* 16217 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16218 * RQS command and not the original command. 16219 */ 16220 ASSERT(sense_pktp == un->un_rqs_pktp); 16221 ASSERT(sense_bp == un->un_rqs_bp); 16222 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16223 (FLAG_SENSING | FLAG_HEAD)); 16224 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16225 FLAG_SENSING) == FLAG_SENSING); 16226 16227 /* These are the bp, xp, and pktp for the original command */ 16228 cmd_bp = sense_xp->xb_sense_bp; 16229 cmd_xp = SD_GET_XBUF(cmd_bp); 16230 cmd_pktp = SD_GET_PKTP(cmd_bp); 16231 16232 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16233 /* 16234 * The REQUEST SENSE command failed. Release the REQUEST 16235 * SENSE command for re-use, get back the bp for the original 16236 * command, and attempt to re-try the original command if 16237 * FLAG_DIAGNOSE is not set in the original packet. 16238 */ 16239 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16240 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16241 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16242 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16243 NULL, NULL, EIO, (clock_t)0, NULL); 16244 return; 16245 } 16246 } 16247 16248 /* 16249 * Save the relevant sense info into the xp for the original cmd. 16250 * 16251 * Note: if the request sense failed the state info will be zero 16252 * as set in sd_mark_rqs_busy() 16253 */ 16254 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16255 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16256 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16257 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 16258 16259 /* 16260 * Free up the RQS command.... 16261 * NOTE: 16262 * Must do this BEFORE calling sd_validate_sense_data! 16263 * sd_validate_sense_data may return the original command in 16264 * which case the pkt will be freed and the flags can no 16265 * longer be touched. 16266 * SD_MUTEX is held through this process until the command 16267 * is dispatched based upon the sense data, so there are 16268 * no race conditions. 16269 */ 16270 (void) sd_mark_rqs_idle(un, sense_xp); 16271 16272 /* 16273 * For a retryable command see if we have valid sense data, if so then 16274 * turn it over to sd_decode_sense() to figure out the right course of 16275 * action. Just fail a non-retryable command. 16276 */ 16277 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16278 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 16279 SD_SENSE_DATA_IS_VALID) { 16280 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16281 } 16282 } else { 16283 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16284 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16285 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16286 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16287 sd_return_failed_command(un, cmd_bp, EIO); 16288 } 16289 } 16290 16291 16292 16293 16294 /* 16295 * Function: sd_handle_auto_request_sense 16296 * 16297 * Description: Processing for auto-request sense information. 16298 * 16299 * Arguments: un - ptr to associated softstate 16300 * bp - ptr to buf(9S) for the command 16301 * xp - ptr to the sd_xbuf for the command 16302 * pktp - ptr to the scsi_pkt(9S) for the command 16303 * 16304 * Context: May be called under interrupt context 16305 */ 16306 16307 static void 16308 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16309 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16310 { 16311 struct scsi_arq_status *asp; 16312 16313 ASSERT(un != NULL); 16314 ASSERT(mutex_owned(SD_MUTEX(un))); 16315 ASSERT(bp != NULL); 16316 ASSERT(xp != NULL); 16317 ASSERT(pktp != NULL); 16318 ASSERT(pktp != un->un_rqs_pktp); 16319 ASSERT(bp != un->un_rqs_bp); 16320 16321 /* 16322 * For auto-request sense, we get a scsi_arq_status back from 16323 * the HBA, with the sense data in the sts_sensedata member. 16324 * The pkt_scbp of the packet points to this scsi_arq_status. 16325 */ 16326 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16327 16328 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16329 /* 16330 * The auto REQUEST SENSE failed; see if we can re-try 16331 * the original command. 16332 */ 16333 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16334 "auto request sense failed (reason=%s)\n", 16335 scsi_rname(asp->sts_rqpkt_reason)); 16336 16337 sd_reset_target(un, pktp); 16338 16339 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16340 NULL, NULL, EIO, (clock_t)0, NULL); 16341 return; 16342 } 16343 16344 /* Save the relevant sense info into the xp for the original cmd. */ 16345 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16346 xp->xb_sense_state = asp->sts_rqpkt_state; 16347 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16348 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16349 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 16350 16351 /* 16352 * See if we have valid sense data, if so then turn it over to 16353 * sd_decode_sense() to figure out the right course of action. 16354 */ 16355 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 16356 sd_decode_sense(un, bp, xp, pktp); 16357 } 16358 } 16359 16360 16361 /* 16362 * Function: sd_print_sense_failed_msg 16363 * 16364 * Description: Print log message when RQS has failed. 16365 * 16366 * Arguments: un - ptr to associated softstate 16367 * bp - ptr to buf(9S) for the command 16368 * arg - generic message string ptr 16369 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16370 * or SD_NO_RETRY_ISSUED 16371 * 16372 * Context: May be called from interrupt context 16373 */ 16374 16375 static void 16376 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16377 int code) 16378 { 16379 char *msgp = arg; 16380 16381 ASSERT(un != NULL); 16382 ASSERT(mutex_owned(SD_MUTEX(un))); 16383 ASSERT(bp != NULL); 16384 16385 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16386 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16387 } 16388 } 16389 16390 16391 /* 16392 * Function: sd_validate_sense_data 16393 * 16394 * Description: Check the given sense data for validity. 16395 * If the sense data is not valid, the command will 16396 * be either failed or retried! 16397 * 16398 * Return Code: SD_SENSE_DATA_IS_INVALID 16399 * SD_SENSE_DATA_IS_VALID 16400 * 16401 * Context: May be called from interrupt context 16402 */ 16403 16404 static int 16405 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 16406 { 16407 struct scsi_extended_sense *esp; 16408 struct scsi_pkt *pktp; 16409 size_t actual_len; 16410 char *msgp = NULL; 16411 16412 ASSERT(un != NULL); 16413 ASSERT(mutex_owned(SD_MUTEX(un))); 16414 ASSERT(bp != NULL); 16415 ASSERT(bp != un->un_rqs_bp); 16416 ASSERT(xp != NULL); 16417 16418 pktp = SD_GET_PKTP(bp); 16419 ASSERT(pktp != NULL); 16420 16421 /* 16422 * Check the status of the RQS command (auto or manual). 16423 */ 16424 switch (xp->xb_sense_status & STATUS_MASK) { 16425 case STATUS_GOOD: 16426 break; 16427 16428 case STATUS_RESERVATION_CONFLICT: 16429 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16430 return (SD_SENSE_DATA_IS_INVALID); 16431 16432 case STATUS_BUSY: 16433 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16434 "Busy Status on REQUEST SENSE\n"); 16435 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16436 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16437 return (SD_SENSE_DATA_IS_INVALID); 16438 16439 case STATUS_QFULL: 16440 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16441 "QFULL Status on REQUEST SENSE\n"); 16442 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16443 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16444 return (SD_SENSE_DATA_IS_INVALID); 16445 16446 case STATUS_CHECK: 16447 case STATUS_TERMINATED: 16448 msgp = "Check Condition on REQUEST SENSE\n"; 16449 goto sense_failed; 16450 16451 default: 16452 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16453 goto sense_failed; 16454 } 16455 16456 /* 16457 * See if we got the minimum required amount of sense data. 16458 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16459 * or less. 16460 */ 16461 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 16462 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16463 (actual_len == 0)) { 16464 msgp = "Request Sense couldn't get sense data\n"; 16465 goto sense_failed; 16466 } 16467 16468 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16469 msgp = "Not enough sense information\n"; 16470 goto sense_failed; 16471 } 16472 16473 /* 16474 * We require the extended sense data 16475 */ 16476 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16477 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16478 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16479 static char tmp[8]; 16480 static char buf[148]; 16481 char *p = (char *)(xp->xb_sense_data); 16482 int i; 16483 16484 mutex_enter(&sd_sense_mutex); 16485 (void) strcpy(buf, "undecodable sense information:"); 16486 for (i = 0; i < actual_len; i++) { 16487 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16488 (void) strcpy(&buf[strlen(buf)], tmp); 16489 } 16490 i = strlen(buf); 16491 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16492 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16493 mutex_exit(&sd_sense_mutex); 16494 } 16495 /* Note: Legacy behavior, fail the command with no retry */ 16496 sd_return_failed_command(un, bp, EIO); 16497 return (SD_SENSE_DATA_IS_INVALID); 16498 } 16499 16500 /* 16501 * Check that es_code is valid (es_class concatenated with es_code 16502 * make up the "response code" field. es_class will always be 7, so 16503 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16504 * format. 16505 */ 16506 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16507 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16508 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16509 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16510 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16511 goto sense_failed; 16512 } 16513 16514 return (SD_SENSE_DATA_IS_VALID); 16515 16516 sense_failed: 16517 /* 16518 * If the request sense failed (for whatever reason), attempt 16519 * to retry the original command. 16520 */ 16521 #if defined(__i386) || defined(__amd64) 16522 /* 16523 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16524 * sddef.h for Sparc platform, and x86 uses 1 binary 16525 * for both SCSI/FC. 16526 * The SD_RETRY_DELAY value need to be adjusted here 16527 * when SD_RETRY_DELAY change in sddef.h 16528 */ 16529 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16530 sd_print_sense_failed_msg, msgp, EIO, 16531 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16532 #else 16533 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16534 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16535 #endif 16536 16537 return (SD_SENSE_DATA_IS_INVALID); 16538 } 16539 16540 16541 16542 /* 16543 * Function: sd_decode_sense 16544 * 16545 * Description: Take recovery action(s) when SCSI Sense Data is received. 16546 * 16547 * Context: Interrupt context. 16548 */ 16549 16550 static void 16551 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16552 struct scsi_pkt *pktp) 16553 { 16554 struct scsi_extended_sense *esp; 16555 struct scsi_descr_sense_hdr *sdsp; 16556 uint8_t asc, ascq, sense_key; 16557 16558 ASSERT(un != NULL); 16559 ASSERT(mutex_owned(SD_MUTEX(un))); 16560 ASSERT(bp != NULL); 16561 ASSERT(bp != un->un_rqs_bp); 16562 ASSERT(xp != NULL); 16563 ASSERT(pktp != NULL); 16564 16565 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16566 16567 switch (esp->es_code) { 16568 case CODE_FMT_DESCR_CURRENT: 16569 case CODE_FMT_DESCR_DEFERRED: 16570 sdsp = (struct scsi_descr_sense_hdr *)xp->xb_sense_data; 16571 sense_key = sdsp->ds_key; 16572 asc = sdsp->ds_add_code; 16573 ascq = sdsp->ds_qual_code; 16574 break; 16575 case CODE_FMT_VENDOR_SPECIFIC: 16576 case CODE_FMT_FIXED_CURRENT: 16577 case CODE_FMT_FIXED_DEFERRED: 16578 default: 16579 sense_key = esp->es_key; 16580 asc = esp->es_add_code; 16581 ascq = esp->es_qual_code; 16582 break; 16583 } 16584 16585 switch (sense_key) { 16586 case KEY_NO_SENSE: 16587 sd_sense_key_no_sense(un, bp, xp, pktp); 16588 break; 16589 case KEY_RECOVERABLE_ERROR: 16590 sd_sense_key_recoverable_error(un, asc, bp, xp, pktp); 16591 break; 16592 case KEY_NOT_READY: 16593 sd_sense_key_not_ready(un, asc, ascq, bp, xp, pktp); 16594 break; 16595 case KEY_MEDIUM_ERROR: 16596 case KEY_HARDWARE_ERROR: 16597 sd_sense_key_medium_or_hardware_error(un, 16598 sense_key, asc, bp, xp, pktp); 16599 break; 16600 case KEY_ILLEGAL_REQUEST: 16601 sd_sense_key_illegal_request(un, bp, xp, pktp); 16602 break; 16603 case KEY_UNIT_ATTENTION: 16604 sd_sense_key_unit_attention(un, asc, bp, xp, pktp); 16605 break; 16606 case KEY_WRITE_PROTECT: 16607 case KEY_VOLUME_OVERFLOW: 16608 case KEY_MISCOMPARE: 16609 sd_sense_key_fail_command(un, bp, xp, pktp); 16610 break; 16611 case KEY_BLANK_CHECK: 16612 sd_sense_key_blank_check(un, bp, xp, pktp); 16613 break; 16614 case KEY_ABORTED_COMMAND: 16615 sd_sense_key_aborted_command(un, bp, xp, pktp); 16616 break; 16617 case KEY_VENDOR_UNIQUE: 16618 case KEY_COPY_ABORTED: 16619 case KEY_EQUAL: 16620 case KEY_RESERVED: 16621 default: 16622 sd_sense_key_default(un, sense_key, bp, xp, pktp); 16623 break; 16624 } 16625 } 16626 16627 16628 /* 16629 * Function: sd_dump_memory 16630 * 16631 * Description: Debug logging routine to print the contents of a user provided 16632 * buffer. The output of the buffer is broken up into 256 byte 16633 * segments due to a size constraint of the scsi_log. 16634 * implementation. 16635 * 16636 * Arguments: un - ptr to softstate 16637 * comp - component mask 16638 * title - "title" string to preceed data when printed 16639 * data - ptr to data block to be printed 16640 * len - size of data block to be printed 16641 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16642 * 16643 * Context: May be called from interrupt context 16644 */ 16645 16646 #define SD_DUMP_MEMORY_BUF_SIZE 256 16647 16648 static char *sd_dump_format_string[] = { 16649 " 0x%02x", 16650 " %c" 16651 }; 16652 16653 static void 16654 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16655 int len, int fmt) 16656 { 16657 int i, j; 16658 int avail_count; 16659 int start_offset; 16660 int end_offset; 16661 size_t entry_len; 16662 char *bufp; 16663 char *local_buf; 16664 char *format_string; 16665 16666 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16667 16668 /* 16669 * In the debug version of the driver, this function is called from a 16670 * number of places which are NOPs in the release driver. 16671 * The debug driver therefore has additional methods of filtering 16672 * debug output. 16673 */ 16674 #ifdef SDDEBUG 16675 /* 16676 * In the debug version of the driver we can reduce the amount of debug 16677 * messages by setting sd_error_level to something other than 16678 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16679 * sd_component_mask. 16680 */ 16681 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16682 (sd_error_level != SCSI_ERR_ALL)) { 16683 return; 16684 } 16685 if (((sd_component_mask & comp) == 0) || 16686 (sd_error_level != SCSI_ERR_ALL)) { 16687 return; 16688 } 16689 #else 16690 if (sd_error_level != SCSI_ERR_ALL) { 16691 return; 16692 } 16693 #endif 16694 16695 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16696 bufp = local_buf; 16697 /* 16698 * Available length is the length of local_buf[], minus the 16699 * length of the title string, minus one for the ":", minus 16700 * one for the newline, minus one for the NULL terminator. 16701 * This gives the #bytes available for holding the printed 16702 * values from the given data buffer. 16703 */ 16704 if (fmt == SD_LOG_HEX) { 16705 format_string = sd_dump_format_string[0]; 16706 } else /* SD_LOG_CHAR */ { 16707 format_string = sd_dump_format_string[1]; 16708 } 16709 /* 16710 * Available count is the number of elements from the given 16711 * data buffer that we can fit into the available length. 16712 * This is based upon the size of the format string used. 16713 * Make one entry and find it's size. 16714 */ 16715 (void) sprintf(bufp, format_string, data[0]); 16716 entry_len = strlen(bufp); 16717 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16718 16719 j = 0; 16720 while (j < len) { 16721 bufp = local_buf; 16722 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16723 start_offset = j; 16724 16725 end_offset = start_offset + avail_count; 16726 16727 (void) sprintf(bufp, "%s:", title); 16728 bufp += strlen(bufp); 16729 for (i = start_offset; ((i < end_offset) && (j < len)); 16730 i++, j++) { 16731 (void) sprintf(bufp, format_string, data[i]); 16732 bufp += entry_len; 16733 } 16734 (void) sprintf(bufp, "\n"); 16735 16736 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16737 } 16738 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16739 } 16740 16741 /* 16742 * Function: sd_print_sense_msg 16743 * 16744 * Description: Log a message based upon the given sense data. 16745 * 16746 * Arguments: un - ptr to associated softstate 16747 * bp - ptr to buf(9S) for the command 16748 * arg - ptr to associate sd_sense_info struct 16749 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16750 * or SD_NO_RETRY_ISSUED 16751 * 16752 * Context: May be called from interrupt context 16753 */ 16754 16755 static void 16756 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16757 { 16758 struct sd_xbuf *xp; 16759 struct scsi_pkt *pktp; 16760 struct scsi_extended_sense *sensep; 16761 daddr_t request_blkno; 16762 diskaddr_t err_blkno; 16763 int severity; 16764 int pfa_flag; 16765 int fixed_format = TRUE; 16766 extern struct scsi_key_strings scsi_cmds[]; 16767 16768 ASSERT(un != NULL); 16769 ASSERT(mutex_owned(SD_MUTEX(un))); 16770 ASSERT(bp != NULL); 16771 xp = SD_GET_XBUF(bp); 16772 ASSERT(xp != NULL); 16773 pktp = SD_GET_PKTP(bp); 16774 ASSERT(pktp != NULL); 16775 ASSERT(arg != NULL); 16776 16777 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16778 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16779 16780 if ((code == SD_DELAYED_RETRY_ISSUED) || 16781 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16782 severity = SCSI_ERR_RETRYABLE; 16783 } 16784 16785 /* Use absolute block number for the request block number */ 16786 request_blkno = xp->xb_blkno; 16787 16788 /* 16789 * Now try to get the error block number from the sense data 16790 */ 16791 sensep = (struct scsi_extended_sense *)xp->xb_sense_data; 16792 switch (sensep->es_code) { 16793 case CODE_FMT_DESCR_CURRENT: 16794 case CODE_FMT_DESCR_DEFERRED: 16795 err_blkno = 16796 sd_extract_sense_info_descr( 16797 (struct scsi_descr_sense_hdr *)sensep); 16798 fixed_format = FALSE; 16799 break; 16800 case CODE_FMT_FIXED_CURRENT: 16801 case CODE_FMT_FIXED_DEFERRED: 16802 case CODE_FMT_VENDOR_SPECIFIC: 16803 default: 16804 /* 16805 * With the es_valid bit set, we assume that the error 16806 * blkno is in the sense data. Also, if xp->xb_blkno is 16807 * greater than 0xffffffff then the target *should* have used 16808 * a descriptor sense format (or it shouldn't have set 16809 * the es_valid bit), and we may as well ignore the 16810 * 32-bit value. 16811 */ 16812 if ((sensep->es_valid != 0) && (xp->xb_blkno <= 0xffffffff)) { 16813 err_blkno = (diskaddr_t) 16814 ((sensep->es_info_1 << 24) | 16815 (sensep->es_info_2 << 16) | 16816 (sensep->es_info_3 << 8) | 16817 (sensep->es_info_4)); 16818 } else { 16819 err_blkno = (diskaddr_t)-1; 16820 } 16821 break; 16822 } 16823 16824 if (err_blkno == (diskaddr_t)-1) { 16825 /* 16826 * Without the es_valid bit set (for fixed format) or an 16827 * information descriptor (for descriptor format) we cannot 16828 * be certain of the error blkno, so just use the 16829 * request_blkno. 16830 */ 16831 err_blkno = (diskaddr_t)request_blkno; 16832 } else { 16833 /* 16834 * We retrieved the error block number from the information 16835 * portion of the sense data. 16836 * 16837 * For USCSI commands we are better off using the error 16838 * block no. as the requested block no. (This is the best 16839 * we can estimate.) 16840 */ 16841 if ((SD_IS_BUFIO(xp) == FALSE) && 16842 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 16843 request_blkno = err_blkno; 16844 } 16845 } 16846 16847 /* 16848 * The following will log the buffer contents for the release driver 16849 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 16850 * level is set to verbose. 16851 */ 16852 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16853 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16854 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16855 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16856 16857 if (pfa_flag == FALSE) { 16858 /* This is normally only set for USCSI */ 16859 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16860 return; 16861 } 16862 16863 if ((SD_IS_BUFIO(xp) == TRUE) && 16864 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16865 (severity < sd_error_level))) { 16866 return; 16867 } 16868 } 16869 16870 /* 16871 * If the data is fixed format then check for Sonoma Failover, 16872 * and keep a count of how many failed I/O's. We should not have 16873 * to worry about Sonoma returning descriptor format sense data, 16874 * and asc/ascq are in a different location in descriptor format. 16875 */ 16876 if (fixed_format && 16877 (SD_IS_LSI(un)) && (sensep->es_key == KEY_ILLEGAL_REQUEST) && 16878 (sensep->es_add_code == 0x94) && (sensep->es_qual_code == 0x01)) { 16879 un->un_sonoma_failure_count++; 16880 if (un->un_sonoma_failure_count > 1) { 16881 return; 16882 } 16883 } 16884 16885 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16886 request_blkno, err_blkno, scsi_cmds, sensep, 16887 un->un_additional_codes, NULL); 16888 } 16889 16890 /* 16891 * Function: sd_extract_sense_info_descr 16892 * 16893 * Description: Retrieve "information" field from descriptor format 16894 * sense data. Iterates through each sense descriptor 16895 * looking for the information descriptor and returns 16896 * the information field from that descriptor. 16897 * 16898 * Context: May be called from interrupt context 16899 */ 16900 16901 static diskaddr_t 16902 sd_extract_sense_info_descr(struct scsi_descr_sense_hdr *sdsp) 16903 { 16904 diskaddr_t result; 16905 uint8_t *descr_offset; 16906 int valid_sense_length; 16907 struct scsi_information_sense_descr *isd; 16908 16909 /* 16910 * Initialize result to -1 indicating there is no information 16911 * descriptor 16912 */ 16913 result = (diskaddr_t)-1; 16914 16915 /* 16916 * The first descriptor will immediately follow the header 16917 */ 16918 descr_offset = (uint8_t *)(sdsp+1); /* Pointer arithmetic */ 16919 16920 /* 16921 * Calculate the amount of valid sense data 16922 */ 16923 valid_sense_length = 16924 min((sizeof (struct scsi_descr_sense_hdr) + 16925 sdsp->ds_addl_sense_length), 16926 SENSE_LENGTH); 16927 16928 /* 16929 * Iterate through the list of descriptors, stopping when we 16930 * run out of sense data 16931 */ 16932 while ((descr_offset + sizeof (struct scsi_information_sense_descr)) <= 16933 (uint8_t *)sdsp + valid_sense_length) { 16934 /* 16935 * Check if this is an information descriptor. We can 16936 * use the scsi_information_sense_descr structure as a 16937 * template sense the first two fields are always the 16938 * same 16939 */ 16940 isd = (struct scsi_information_sense_descr *)descr_offset; 16941 if (isd->isd_descr_type == DESCR_INFORMATION) { 16942 /* 16943 * Found an information descriptor. Copy the 16944 * information field. There will only be one 16945 * information descriptor so we can stop looking. 16946 */ 16947 result = 16948 (((diskaddr_t)isd->isd_information[0] << 56) | 16949 ((diskaddr_t)isd->isd_information[1] << 48) | 16950 ((diskaddr_t)isd->isd_information[2] << 40) | 16951 ((diskaddr_t)isd->isd_information[3] << 32) | 16952 ((diskaddr_t)isd->isd_information[4] << 24) | 16953 ((diskaddr_t)isd->isd_information[5] << 16) | 16954 ((diskaddr_t)isd->isd_information[6] << 8) | 16955 ((diskaddr_t)isd->isd_information[7])); 16956 break; 16957 } 16958 16959 /* 16960 * Get pointer to the next descriptor. The "additional 16961 * length" field holds the length of the descriptor except 16962 * for the "type" and "additional length" fields, so 16963 * we need to add 2 to get the total length. 16964 */ 16965 descr_offset += (isd->isd_addl_length + 2); 16966 } 16967 16968 return (result); 16969 } 16970 16971 /* 16972 * Function: sd_sense_key_no_sense 16973 * 16974 * Description: Recovery action when sense data was not received. 16975 * 16976 * Context: May be called from interrupt context 16977 */ 16978 16979 static void 16980 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 16981 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16982 { 16983 struct sd_sense_info si; 16984 16985 ASSERT(un != NULL); 16986 ASSERT(mutex_owned(SD_MUTEX(un))); 16987 ASSERT(bp != NULL); 16988 ASSERT(xp != NULL); 16989 ASSERT(pktp != NULL); 16990 16991 si.ssi_severity = SCSI_ERR_FATAL; 16992 si.ssi_pfa_flag = FALSE; 16993 16994 SD_UPDATE_ERRSTATS(un, sd_softerrs); 16995 16996 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 16997 &si, EIO, (clock_t)0, NULL); 16998 } 16999 17000 17001 /* 17002 * Function: sd_sense_key_recoverable_error 17003 * 17004 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17005 * 17006 * Context: May be called from interrupt context 17007 */ 17008 17009 static void 17010 sd_sense_key_recoverable_error(struct sd_lun *un, 17011 uint8_t asc, 17012 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17013 { 17014 struct sd_sense_info si; 17015 17016 ASSERT(un != NULL); 17017 ASSERT(mutex_owned(SD_MUTEX(un))); 17018 ASSERT(bp != NULL); 17019 ASSERT(xp != NULL); 17020 ASSERT(pktp != NULL); 17021 17022 /* 17023 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17024 */ 17025 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17026 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17027 si.ssi_severity = SCSI_ERR_INFO; 17028 si.ssi_pfa_flag = TRUE; 17029 } else { 17030 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17031 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17032 si.ssi_severity = SCSI_ERR_RECOVERED; 17033 si.ssi_pfa_flag = FALSE; 17034 } 17035 17036 if (pktp->pkt_resid == 0) { 17037 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17038 sd_return_command(un, bp); 17039 return; 17040 } 17041 17042 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17043 &si, EIO, (clock_t)0, NULL); 17044 } 17045 17046 17047 17048 17049 /* 17050 * Function: sd_sense_key_not_ready 17051 * 17052 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17053 * 17054 * Context: May be called from interrupt context 17055 */ 17056 17057 static void 17058 sd_sense_key_not_ready(struct sd_lun *un, 17059 uint8_t asc, uint8_t ascq, 17060 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17061 { 17062 struct sd_sense_info si; 17063 17064 ASSERT(un != NULL); 17065 ASSERT(mutex_owned(SD_MUTEX(un))); 17066 ASSERT(bp != NULL); 17067 ASSERT(xp != NULL); 17068 ASSERT(pktp != NULL); 17069 17070 si.ssi_severity = SCSI_ERR_FATAL; 17071 si.ssi_pfa_flag = FALSE; 17072 17073 /* 17074 * Update error stats after first NOT READY error. Disks may have 17075 * been powered down and may need to be restarted. For CDROMs, 17076 * report NOT READY errors only if media is present. 17077 */ 17078 if ((ISCD(un) && (un->un_f_geometry_is_valid == TRUE)) || 17079 (xp->xb_retry_count > 0)) { 17080 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17081 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17082 } 17083 17084 /* 17085 * Just fail if the "not ready" retry limit has been reached. 17086 */ 17087 if (xp->xb_retry_count >= un->un_notready_retry_count) { 17088 /* Special check for error message printing for removables. */ 17089 if ((ISREMOVABLE(un)) && (asc == 0x04) && 17090 (ascq >= 0x04)) { 17091 si.ssi_severity = SCSI_ERR_ALL; 17092 } 17093 goto fail_command; 17094 } 17095 17096 /* 17097 * Check the ASC and ASCQ in the sense data as needed, to determine 17098 * what to do. 17099 */ 17100 switch (asc) { 17101 case 0x04: /* LOGICAL UNIT NOT READY */ 17102 /* 17103 * disk drives that don't spin up result in a very long delay 17104 * in format without warning messages. We will log a message 17105 * if the error level is set to verbose. 17106 */ 17107 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17108 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17109 "logical unit not ready, resetting disk\n"); 17110 } 17111 17112 /* 17113 * There are different requirements for CDROMs and disks for 17114 * the number of retries. If a CD-ROM is giving this, it is 17115 * probably reading TOC and is in the process of getting 17116 * ready, so we should keep on trying for a long time to make 17117 * sure that all types of media are taken in account (for 17118 * some media the drive takes a long time to read TOC). For 17119 * disks we do not want to retry this too many times as this 17120 * can cause a long hang in format when the drive refuses to 17121 * spin up (a very common failure). 17122 */ 17123 switch (ascq) { 17124 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17125 /* 17126 * Disk drives frequently refuse to spin up which 17127 * results in a very long hang in format without 17128 * warning messages. 17129 * 17130 * Note: This code preserves the legacy behavior of 17131 * comparing xb_retry_count against zero for fibre 17132 * channel targets instead of comparing against the 17133 * un_reset_retry_count value. The reason for this 17134 * discrepancy has been so utterly lost beneath the 17135 * Sands of Time that even Indiana Jones could not 17136 * find it. 17137 */ 17138 if (un->un_f_is_fibre == TRUE) { 17139 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17140 (xp->xb_retry_count > 0)) && 17141 (un->un_startstop_timeid == NULL)) { 17142 scsi_log(SD_DEVINFO(un), sd_label, 17143 CE_WARN, "logical unit not ready, " 17144 "resetting disk\n"); 17145 sd_reset_target(un, pktp); 17146 } 17147 } else { 17148 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17149 (xp->xb_retry_count > 17150 un->un_reset_retry_count)) && 17151 (un->un_startstop_timeid == NULL)) { 17152 scsi_log(SD_DEVINFO(un), sd_label, 17153 CE_WARN, "logical unit not ready, " 17154 "resetting disk\n"); 17155 sd_reset_target(un, pktp); 17156 } 17157 } 17158 break; 17159 17160 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17161 /* 17162 * If the target is in the process of becoming 17163 * ready, just proceed with the retry. This can 17164 * happen with CD-ROMs that take a long time to 17165 * read TOC after a power cycle or reset. 17166 */ 17167 goto do_retry; 17168 17169 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17170 break; 17171 17172 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17173 /* 17174 * Retries cannot help here so just fail right away. 17175 */ 17176 goto fail_command; 17177 17178 case 0x88: 17179 /* 17180 * Vendor-unique code for T3/T4: it indicates a 17181 * path problem in a mutipathed config, but as far as 17182 * the target driver is concerned it equates to a fatal 17183 * error, so we should just fail the command right away 17184 * (without printing anything to the console). If this 17185 * is not a T3/T4, fall thru to the default recovery 17186 * action. 17187 * T3/T4 is FC only, don't need to check is_fibre 17188 */ 17189 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17190 sd_return_failed_command(un, bp, EIO); 17191 return; 17192 } 17193 /* FALLTHRU */ 17194 17195 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17196 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17197 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17198 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17199 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17200 default: /* Possible future codes in SCSI spec? */ 17201 /* 17202 * For removable-media devices, do not retry if 17203 * ASCQ > 2 as these result mostly from USCSI commands 17204 * on MMC devices issued to check status of an 17205 * operation initiated in immediate mode. Also for 17206 * ASCQ >= 4 do not print console messages as these 17207 * mainly represent a user-initiated operation 17208 * instead of a system failure. 17209 */ 17210 if (ISREMOVABLE(un)) { 17211 si.ssi_severity = SCSI_ERR_ALL; 17212 goto fail_command; 17213 } 17214 break; 17215 } 17216 17217 /* 17218 * As part of our recovery attempt for the NOT READY 17219 * condition, we issue a START STOP UNIT command. However 17220 * we want to wait for a short delay before attempting this 17221 * as there may still be more commands coming back from the 17222 * target with the check condition. To do this we use 17223 * timeout(9F) to call sd_start_stop_unit_callback() after 17224 * the delay interval expires. (sd_start_stop_unit_callback() 17225 * dispatches sd_start_stop_unit_task(), which will issue 17226 * the actual START STOP UNIT command. The delay interval 17227 * is one-half of the delay that we will use to retry the 17228 * command that generated the NOT READY condition. 17229 * 17230 * Note that we could just dispatch sd_start_stop_unit_task() 17231 * from here and allow it to sleep for the delay interval, 17232 * but then we would be tying up the taskq thread 17233 * uncesessarily for the duration of the delay. 17234 * 17235 * Do not issue the START STOP UNIT if the current command 17236 * is already a START STOP UNIT. 17237 */ 17238 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17239 break; 17240 } 17241 17242 /* 17243 * Do not schedule the timeout if one is already pending. 17244 */ 17245 if (un->un_startstop_timeid != NULL) { 17246 SD_INFO(SD_LOG_ERROR, un, 17247 "sd_sense_key_not_ready: restart already issued to" 17248 " 0x%x : 0x%x\n", SD_TARGET(un), SD_LUN(un)); 17249 break; 17250 } 17251 17252 /* 17253 * Schedule the START STOP UNIT command, then queue the command 17254 * for a retry. 17255 * 17256 * Note: A timeout is not scheduled for this retry because we 17257 * want the retry to be serial with the START_STOP_UNIT. The 17258 * retry will be started when the START_STOP_UNIT is completed 17259 * in sd_start_stop_unit_task. 17260 */ 17261 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17262 un, SD_BSY_TIMEOUT / 2); 17263 xp->xb_retry_count++; 17264 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17265 return; 17266 17267 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17268 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17269 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17270 "unit does not respond to selection\n"); 17271 } 17272 break; 17273 17274 case 0x3A: /* MEDIUM NOT PRESENT */ 17275 if (sd_error_level >= SCSI_ERR_FATAL) { 17276 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17277 "Caddy not inserted in drive\n"); 17278 } 17279 17280 sr_ejected(un); 17281 un->un_mediastate = DKIO_EJECTED; 17282 /* The state has changed, inform the media watch routines */ 17283 cv_broadcast(&un->un_state_cv); 17284 /* Just fail if no media is present in the drive. */ 17285 goto fail_command; 17286 17287 default: 17288 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17289 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17290 "Unit not Ready. Additional sense code 0x%x\n", 17291 asc); 17292 } 17293 break; 17294 } 17295 17296 do_retry: 17297 17298 /* 17299 * Retry the command, as some targets may report NOT READY for 17300 * several seconds after being reset. 17301 */ 17302 xp->xb_retry_count++; 17303 si.ssi_severity = SCSI_ERR_RETRYABLE; 17304 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17305 &si, EIO, SD_BSY_TIMEOUT, NULL); 17306 17307 return; 17308 17309 fail_command: 17310 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17311 sd_return_failed_command(un, bp, EIO); 17312 } 17313 17314 17315 17316 /* 17317 * Function: sd_sense_key_medium_or_hardware_error 17318 * 17319 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17320 * sense key. 17321 * 17322 * Context: May be called from interrupt context 17323 */ 17324 17325 static void 17326 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17327 int sense_key, uint8_t asc, 17328 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17329 { 17330 struct sd_sense_info si; 17331 17332 ASSERT(un != NULL); 17333 ASSERT(mutex_owned(SD_MUTEX(un))); 17334 ASSERT(bp != NULL); 17335 ASSERT(xp != NULL); 17336 ASSERT(pktp != NULL); 17337 17338 si.ssi_severity = SCSI_ERR_FATAL; 17339 si.ssi_pfa_flag = FALSE; 17340 17341 if (sense_key == KEY_MEDIUM_ERROR) { 17342 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17343 } 17344 17345 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17346 17347 if ((un->un_reset_retry_count != 0) && 17348 (xp->xb_retry_count == un->un_reset_retry_count)) { 17349 mutex_exit(SD_MUTEX(un)); 17350 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17351 if (un->un_f_allow_bus_device_reset == TRUE) { 17352 17353 boolean_t try_resetting_target = B_TRUE; 17354 17355 /* 17356 * We need to be able to handle specific ASC when we are 17357 * handling a KEY_HARDWARE_ERROR. In particular 17358 * taking the default action of resetting the target may 17359 * not be the appropriate way to attempt recovery. 17360 * Resetting a target because of a single LUN failure 17361 * victimizes all LUNs on that target. 17362 * 17363 * This is true for the LSI arrays, if an LSI 17364 * array controller returns an ASC of 0x84 (LUN Dead) we 17365 * should trust it. 17366 */ 17367 17368 if (sense_key == KEY_HARDWARE_ERROR) { 17369 switch (asc) { 17370 case 0x84: 17371 if (SD_IS_LSI(un)) { 17372 try_resetting_target = B_FALSE; 17373 } 17374 break; 17375 default: 17376 break; 17377 } 17378 } 17379 17380 if (try_resetting_target == B_TRUE) { 17381 int reset_retval = 0; 17382 if (un->un_f_lun_reset_enabled == TRUE) { 17383 SD_TRACE(SD_LOG_IO_CORE, un, 17384 "sd_sense_key_medium_or_hardware_" 17385 "error: issuing RESET_LUN\n"); 17386 reset_retval = 17387 scsi_reset(SD_ADDRESS(un), 17388 RESET_LUN); 17389 } 17390 if (reset_retval == 0) { 17391 SD_TRACE(SD_LOG_IO_CORE, un, 17392 "sd_sense_key_medium_or_hardware_" 17393 "error: issuing RESET_TARGET\n"); 17394 (void) scsi_reset(SD_ADDRESS(un), 17395 RESET_TARGET); 17396 } 17397 } 17398 } 17399 mutex_enter(SD_MUTEX(un)); 17400 } 17401 17402 /* 17403 * This really ought to be a fatal error, but we will retry anyway 17404 * as some drives report this as a spurious error. 17405 */ 17406 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17407 &si, EIO, (clock_t)0, NULL); 17408 } 17409 17410 17411 17412 /* 17413 * Function: sd_sense_key_illegal_request 17414 * 17415 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17416 * 17417 * Context: May be called from interrupt context 17418 */ 17419 17420 static void 17421 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17422 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17423 { 17424 struct sd_sense_info si; 17425 17426 ASSERT(un != NULL); 17427 ASSERT(mutex_owned(SD_MUTEX(un))); 17428 ASSERT(bp != NULL); 17429 ASSERT(xp != NULL); 17430 ASSERT(pktp != NULL); 17431 17432 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17433 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17434 17435 si.ssi_severity = SCSI_ERR_INFO; 17436 si.ssi_pfa_flag = FALSE; 17437 17438 /* Pointless to retry if the target thinks it's an illegal request */ 17439 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17440 sd_return_failed_command(un, bp, EIO); 17441 } 17442 17443 17444 17445 17446 /* 17447 * Function: sd_sense_key_unit_attention 17448 * 17449 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17450 * 17451 * Context: May be called from interrupt context 17452 */ 17453 17454 static void 17455 sd_sense_key_unit_attention(struct sd_lun *un, 17456 uint8_t asc, 17457 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17458 { 17459 /* 17460 * For UNIT ATTENTION we allow retries for one minute. Devices 17461 * like Sonoma can return UNIT ATTENTION close to a minute 17462 * under certain conditions. 17463 */ 17464 int retry_check_flag = SD_RETRIES_UA; 17465 struct sd_sense_info si; 17466 17467 ASSERT(un != NULL); 17468 ASSERT(mutex_owned(SD_MUTEX(un))); 17469 ASSERT(bp != NULL); 17470 ASSERT(xp != NULL); 17471 ASSERT(pktp != NULL); 17472 17473 si.ssi_severity = SCSI_ERR_INFO; 17474 si.ssi_pfa_flag = FALSE; 17475 17476 17477 switch (asc) { 17478 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17479 if (sd_report_pfa != 0) { 17480 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17481 si.ssi_pfa_flag = TRUE; 17482 retry_check_flag = SD_RETRIES_STANDARD; 17483 goto do_retry; 17484 } 17485 break; 17486 17487 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17488 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17489 un->un_resvd_status |= 17490 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17491 } 17492 /* FALLTHRU */ 17493 17494 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17495 if (!ISREMOVABLE(un)) { 17496 break; 17497 } 17498 17499 /* 17500 * When we get a unit attention from a removable-media device, 17501 * it may be in a state that will take a long time to recover 17502 * (e.g., from a reset). Since we are executing in interrupt 17503 * context here, we cannot wait around for the device to come 17504 * back. So hand this command off to sd_media_change_task() 17505 * for deferred processing under taskq thread context. (Note 17506 * that the command still may be failed if a problem is 17507 * encountered at a later time.) 17508 */ 17509 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17510 KM_NOSLEEP) == 0) { 17511 /* 17512 * Cannot dispatch the request so fail the command. 17513 */ 17514 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17515 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17516 si.ssi_severity = SCSI_ERR_FATAL; 17517 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17518 sd_return_failed_command(un, bp, EIO); 17519 } 17520 /* 17521 * Either the command has been successfully dispatched to a 17522 * task Q for retrying, or the dispatch failed. In either case 17523 * do NOT retry again by calling sd_retry_command. This sets up 17524 * two retries of the same command and when one completes and 17525 * frees the resources the other will access freed memory, 17526 * a bad thing. 17527 */ 17528 return; 17529 17530 default: 17531 break; 17532 } 17533 17534 if (!ISREMOVABLE(un)) { 17535 /* 17536 * Do not update these here for removables. For removables 17537 * these stats are updated (1) above if we failed to dispatch 17538 * sd_media_change_task(), or (2) sd_media_change_task() may 17539 * update these later if it encounters an error. 17540 */ 17541 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17542 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17543 } 17544 17545 do_retry: 17546 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17547 EIO, SD_UA_RETRY_DELAY, NULL); 17548 } 17549 17550 17551 17552 /* 17553 * Function: sd_sense_key_fail_command 17554 * 17555 * Description: Use to fail a command when we don't like the sense key that 17556 * was returned. 17557 * 17558 * Context: May be called from interrupt context 17559 */ 17560 17561 static void 17562 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17563 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17564 { 17565 struct sd_sense_info si; 17566 17567 ASSERT(un != NULL); 17568 ASSERT(mutex_owned(SD_MUTEX(un))); 17569 ASSERT(bp != NULL); 17570 ASSERT(xp != NULL); 17571 ASSERT(pktp != NULL); 17572 17573 si.ssi_severity = SCSI_ERR_FATAL; 17574 si.ssi_pfa_flag = FALSE; 17575 17576 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17577 sd_return_failed_command(un, bp, EIO); 17578 } 17579 17580 17581 17582 /* 17583 * Function: sd_sense_key_blank_check 17584 * 17585 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17586 * Has no monetary connotation. 17587 * 17588 * Context: May be called from interrupt context 17589 */ 17590 17591 static void 17592 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17593 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17594 { 17595 struct sd_sense_info si; 17596 17597 ASSERT(un != NULL); 17598 ASSERT(mutex_owned(SD_MUTEX(un))); 17599 ASSERT(bp != NULL); 17600 ASSERT(xp != NULL); 17601 ASSERT(pktp != NULL); 17602 17603 /* 17604 * Blank check is not fatal for removable devices, therefore 17605 * it does not require a console message. 17606 */ 17607 si.ssi_severity = (ISREMOVABLE(un)) ? SCSI_ERR_ALL : SCSI_ERR_FATAL; 17608 si.ssi_pfa_flag = FALSE; 17609 17610 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17611 sd_return_failed_command(un, bp, EIO); 17612 } 17613 17614 17615 17616 17617 /* 17618 * Function: sd_sense_key_aborted_command 17619 * 17620 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17621 * 17622 * Context: May be called from interrupt context 17623 */ 17624 17625 static void 17626 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17627 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17628 { 17629 struct sd_sense_info si; 17630 17631 ASSERT(un != NULL); 17632 ASSERT(mutex_owned(SD_MUTEX(un))); 17633 ASSERT(bp != NULL); 17634 ASSERT(xp != NULL); 17635 ASSERT(pktp != NULL); 17636 17637 si.ssi_severity = SCSI_ERR_FATAL; 17638 si.ssi_pfa_flag = FALSE; 17639 17640 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17641 17642 /* 17643 * This really ought to be a fatal error, but we will retry anyway 17644 * as some drives report this as a spurious error. 17645 */ 17646 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17647 &si, EIO, (clock_t)0, NULL); 17648 } 17649 17650 17651 17652 /* 17653 * Function: sd_sense_key_default 17654 * 17655 * Description: Default recovery action for several SCSI sense keys (basically 17656 * attempts a retry). 17657 * 17658 * Context: May be called from interrupt context 17659 */ 17660 17661 static void 17662 sd_sense_key_default(struct sd_lun *un, 17663 int sense_key, 17664 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17665 { 17666 struct sd_sense_info si; 17667 17668 ASSERT(un != NULL); 17669 ASSERT(mutex_owned(SD_MUTEX(un))); 17670 ASSERT(bp != NULL); 17671 ASSERT(xp != NULL); 17672 ASSERT(pktp != NULL); 17673 17674 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17675 17676 /* 17677 * Undecoded sense key. Attempt retries and hope that will fix 17678 * the problem. Otherwise, we're dead. 17679 */ 17680 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17681 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17682 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17683 } 17684 17685 si.ssi_severity = SCSI_ERR_FATAL; 17686 si.ssi_pfa_flag = FALSE; 17687 17688 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17689 &si, EIO, (clock_t)0, NULL); 17690 } 17691 17692 17693 17694 /* 17695 * Function: sd_print_retry_msg 17696 * 17697 * Description: Print a message indicating the retry action being taken. 17698 * 17699 * Arguments: un - ptr to associated softstate 17700 * bp - ptr to buf(9S) for the command 17701 * arg - not used. 17702 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17703 * or SD_NO_RETRY_ISSUED 17704 * 17705 * Context: May be called from interrupt context 17706 */ 17707 /* ARGSUSED */ 17708 static void 17709 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17710 { 17711 struct sd_xbuf *xp; 17712 struct scsi_pkt *pktp; 17713 char *reasonp; 17714 char *msgp; 17715 17716 ASSERT(un != NULL); 17717 ASSERT(mutex_owned(SD_MUTEX(un))); 17718 ASSERT(bp != NULL); 17719 pktp = SD_GET_PKTP(bp); 17720 ASSERT(pktp != NULL); 17721 xp = SD_GET_XBUF(bp); 17722 ASSERT(xp != NULL); 17723 17724 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17725 mutex_enter(&un->un_pm_mutex); 17726 if ((un->un_state == SD_STATE_SUSPENDED) || 17727 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17728 (pktp->pkt_flags & FLAG_SILENT)) { 17729 mutex_exit(&un->un_pm_mutex); 17730 goto update_pkt_reason; 17731 } 17732 mutex_exit(&un->un_pm_mutex); 17733 17734 /* 17735 * Suppress messages if they are all the same pkt_reason; with 17736 * TQ, many (up to 256) are returned with the same pkt_reason. 17737 * If we are in panic, then suppress the retry messages. 17738 */ 17739 switch (flag) { 17740 case SD_NO_RETRY_ISSUED: 17741 msgp = "giving up"; 17742 break; 17743 case SD_IMMEDIATE_RETRY_ISSUED: 17744 case SD_DELAYED_RETRY_ISSUED: 17745 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17746 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17747 (sd_error_level != SCSI_ERR_ALL))) { 17748 return; 17749 } 17750 msgp = "retrying command"; 17751 break; 17752 default: 17753 goto update_pkt_reason; 17754 } 17755 17756 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17757 scsi_rname(pktp->pkt_reason)); 17758 17759 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17760 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17761 17762 update_pkt_reason: 17763 /* 17764 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17765 * This is to prevent multiple console messages for the same failure 17766 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17767 * when the command is retried successfully because there still may be 17768 * more commands coming back with the same value of pktp->pkt_reason. 17769 */ 17770 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17771 un->un_last_pkt_reason = pktp->pkt_reason; 17772 } 17773 } 17774 17775 17776 /* 17777 * Function: sd_print_cmd_incomplete_msg 17778 * 17779 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17780 * 17781 * Arguments: un - ptr to associated softstate 17782 * bp - ptr to buf(9S) for the command 17783 * arg - passed to sd_print_retry_msg() 17784 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17785 * or SD_NO_RETRY_ISSUED 17786 * 17787 * Context: May be called from interrupt context 17788 */ 17789 17790 static void 17791 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17792 int code) 17793 { 17794 dev_info_t *dip; 17795 17796 ASSERT(un != NULL); 17797 ASSERT(mutex_owned(SD_MUTEX(un))); 17798 ASSERT(bp != NULL); 17799 17800 switch (code) { 17801 case SD_NO_RETRY_ISSUED: 17802 /* Command was failed. Someone turned off this target? */ 17803 if (un->un_state != SD_STATE_OFFLINE) { 17804 /* 17805 * Suppress message if we are detaching and 17806 * device has been disconnected 17807 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17808 * private interface and not part of the DDI 17809 */ 17810 dip = un->un_sd->sd_dev; 17811 if (!(DEVI_IS_DETACHING(dip) && 17812 DEVI_IS_DEVICE_REMOVED(dip))) { 17813 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17814 "disk not responding to selection\n"); 17815 } 17816 New_state(un, SD_STATE_OFFLINE); 17817 } 17818 break; 17819 17820 case SD_DELAYED_RETRY_ISSUED: 17821 case SD_IMMEDIATE_RETRY_ISSUED: 17822 default: 17823 /* Command was successfully queued for retry */ 17824 sd_print_retry_msg(un, bp, arg, code); 17825 break; 17826 } 17827 } 17828 17829 17830 /* 17831 * Function: sd_pkt_reason_cmd_incomplete 17832 * 17833 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 17834 * 17835 * Context: May be called from interrupt context 17836 */ 17837 17838 static void 17839 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 17840 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17841 { 17842 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 17843 17844 ASSERT(un != NULL); 17845 ASSERT(mutex_owned(SD_MUTEX(un))); 17846 ASSERT(bp != NULL); 17847 ASSERT(xp != NULL); 17848 ASSERT(pktp != NULL); 17849 17850 /* Do not do a reset if selection did not complete */ 17851 /* Note: Should this not just check the bit? */ 17852 if (pktp->pkt_state != STATE_GOT_BUS) { 17853 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17854 sd_reset_target(un, pktp); 17855 } 17856 17857 /* 17858 * If the target was not successfully selected, then set 17859 * SD_RETRIES_FAILFAST to indicate that we lost communication 17860 * with the target, and further retries and/or commands are 17861 * likely to take a long time. 17862 */ 17863 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 17864 flag |= SD_RETRIES_FAILFAST; 17865 } 17866 17867 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17868 17869 sd_retry_command(un, bp, flag, 17870 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17871 } 17872 17873 17874 17875 /* 17876 * Function: sd_pkt_reason_cmd_tran_err 17877 * 17878 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 17879 * 17880 * Context: May be called from interrupt context 17881 */ 17882 17883 static void 17884 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17885 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17886 { 17887 ASSERT(un != NULL); 17888 ASSERT(mutex_owned(SD_MUTEX(un))); 17889 ASSERT(bp != NULL); 17890 ASSERT(xp != NULL); 17891 ASSERT(pktp != NULL); 17892 17893 /* 17894 * Do not reset if we got a parity error, or if 17895 * selection did not complete. 17896 */ 17897 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17898 /* Note: Should this not just check the bit for pkt_state? */ 17899 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17900 (pktp->pkt_state != STATE_GOT_BUS)) { 17901 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17902 sd_reset_target(un, pktp); 17903 } 17904 17905 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17906 17907 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17908 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17909 } 17910 17911 17912 17913 /* 17914 * Function: sd_pkt_reason_cmd_reset 17915 * 17916 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17917 * 17918 * Context: May be called from interrupt context 17919 */ 17920 17921 static void 17922 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17923 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17924 { 17925 ASSERT(un != NULL); 17926 ASSERT(mutex_owned(SD_MUTEX(un))); 17927 ASSERT(bp != NULL); 17928 ASSERT(xp != NULL); 17929 ASSERT(pktp != NULL); 17930 17931 /* The target may still be running the command, so try to reset. */ 17932 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17933 sd_reset_target(un, pktp); 17934 17935 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17936 17937 /* 17938 * If pkt_reason is CMD_RESET chances are that this pkt got 17939 * reset because another target on this bus caused it. The target 17940 * that caused it should get CMD_TIMEOUT with pkt_statistics 17941 * of STAT_TIMEOUT/STAT_DEV_RESET. 17942 */ 17943 17944 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17945 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17946 } 17947 17948 17949 17950 17951 /* 17952 * Function: sd_pkt_reason_cmd_aborted 17953 * 17954 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 17955 * 17956 * Context: May be called from interrupt context 17957 */ 17958 17959 static void 17960 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 17961 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17962 { 17963 ASSERT(un != NULL); 17964 ASSERT(mutex_owned(SD_MUTEX(un))); 17965 ASSERT(bp != NULL); 17966 ASSERT(xp != NULL); 17967 ASSERT(pktp != NULL); 17968 17969 /* The target may still be running the command, so try to reset. */ 17970 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17971 sd_reset_target(un, pktp); 17972 17973 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17974 17975 /* 17976 * If pkt_reason is CMD_ABORTED chances are that this pkt got 17977 * aborted because another target on this bus caused it. The target 17978 * that caused it should get CMD_TIMEOUT with pkt_statistics 17979 * of STAT_TIMEOUT/STAT_DEV_RESET. 17980 */ 17981 17982 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17983 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17984 } 17985 17986 17987 17988 /* 17989 * Function: sd_pkt_reason_cmd_timeout 17990 * 17991 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 17992 * 17993 * Context: May be called from interrupt context 17994 */ 17995 17996 static void 17997 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 17998 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17999 { 18000 ASSERT(un != NULL); 18001 ASSERT(mutex_owned(SD_MUTEX(un))); 18002 ASSERT(bp != NULL); 18003 ASSERT(xp != NULL); 18004 ASSERT(pktp != NULL); 18005 18006 18007 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18008 sd_reset_target(un, pktp); 18009 18010 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18011 18012 /* 18013 * A command timeout indicates that we could not establish 18014 * communication with the target, so set SD_RETRIES_FAILFAST 18015 * as further retries/commands are likely to take a long time. 18016 */ 18017 sd_retry_command(un, bp, 18018 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18019 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18020 } 18021 18022 18023 18024 /* 18025 * Function: sd_pkt_reason_cmd_unx_bus_free 18026 * 18027 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18028 * 18029 * Context: May be called from interrupt context 18030 */ 18031 18032 static void 18033 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18034 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18035 { 18036 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18037 18038 ASSERT(un != NULL); 18039 ASSERT(mutex_owned(SD_MUTEX(un))); 18040 ASSERT(bp != NULL); 18041 ASSERT(xp != NULL); 18042 ASSERT(pktp != NULL); 18043 18044 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18045 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18046 18047 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18048 sd_print_retry_msg : NULL; 18049 18050 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18051 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18052 } 18053 18054 18055 /* 18056 * Function: sd_pkt_reason_cmd_tag_reject 18057 * 18058 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18059 * 18060 * Context: May be called from interrupt context 18061 */ 18062 18063 static void 18064 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18065 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18066 { 18067 ASSERT(un != NULL); 18068 ASSERT(mutex_owned(SD_MUTEX(un))); 18069 ASSERT(bp != NULL); 18070 ASSERT(xp != NULL); 18071 ASSERT(pktp != NULL); 18072 18073 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18074 pktp->pkt_flags = 0; 18075 un->un_tagflags = 0; 18076 if (un->un_f_opt_queueing == TRUE) { 18077 un->un_throttle = min(un->un_throttle, 3); 18078 } else { 18079 un->un_throttle = 1; 18080 } 18081 mutex_exit(SD_MUTEX(un)); 18082 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18083 mutex_enter(SD_MUTEX(un)); 18084 18085 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18086 18087 /* Legacy behavior not to check retry counts here. */ 18088 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18089 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18090 } 18091 18092 18093 /* 18094 * Function: sd_pkt_reason_default 18095 * 18096 * Description: Default recovery actions for SCSA pkt_reason values that 18097 * do not have more explicit recovery actions. 18098 * 18099 * Context: May be called from interrupt context 18100 */ 18101 18102 static void 18103 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18104 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18105 { 18106 ASSERT(un != NULL); 18107 ASSERT(mutex_owned(SD_MUTEX(un))); 18108 ASSERT(bp != NULL); 18109 ASSERT(xp != NULL); 18110 ASSERT(pktp != NULL); 18111 18112 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18113 sd_reset_target(un, pktp); 18114 18115 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18116 18117 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18118 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18119 } 18120 18121 18122 18123 /* 18124 * Function: sd_pkt_status_check_condition 18125 * 18126 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18127 * 18128 * Context: May be called from interrupt context 18129 */ 18130 18131 static void 18132 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18133 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18134 { 18135 ASSERT(un != NULL); 18136 ASSERT(mutex_owned(SD_MUTEX(un))); 18137 ASSERT(bp != NULL); 18138 ASSERT(xp != NULL); 18139 ASSERT(pktp != NULL); 18140 18141 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18142 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18143 18144 /* 18145 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18146 * command will be retried after the request sense). Otherwise, retry 18147 * the command. Note: we are issuing the request sense even though the 18148 * retry limit may have been reached for the failed command. 18149 */ 18150 if (un->un_f_arq_enabled == FALSE) { 18151 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18152 "no ARQ, sending request sense command\n"); 18153 sd_send_request_sense_command(un, bp, pktp); 18154 } else { 18155 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18156 "ARQ,retrying request sense command\n"); 18157 #if defined(__i386) || defined(__amd64) 18158 /* 18159 * The SD_RETRY_DELAY value need to be adjusted here 18160 * when SD_RETRY_DELAY change in sddef.h 18161 */ 18162 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 0, 18163 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18164 NULL); 18165 #else 18166 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18167 0, SD_RETRY_DELAY, NULL); 18168 #endif 18169 } 18170 18171 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18172 } 18173 18174 18175 /* 18176 * Function: sd_pkt_status_busy 18177 * 18178 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18179 * 18180 * Context: May be called from interrupt context 18181 */ 18182 18183 static void 18184 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18185 struct scsi_pkt *pktp) 18186 { 18187 ASSERT(un != NULL); 18188 ASSERT(mutex_owned(SD_MUTEX(un))); 18189 ASSERT(bp != NULL); 18190 ASSERT(xp != NULL); 18191 ASSERT(pktp != NULL); 18192 18193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18194 "sd_pkt_status_busy: entry\n"); 18195 18196 /* If retries are exhausted, just fail the command. */ 18197 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18198 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18199 "device busy too long\n"); 18200 sd_return_failed_command(un, bp, EIO); 18201 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18202 "sd_pkt_status_busy: exit\n"); 18203 return; 18204 } 18205 xp->xb_retry_count++; 18206 18207 /* 18208 * Try to reset the target. However, we do not want to perform 18209 * more than one reset if the device continues to fail. The reset 18210 * will be performed when the retry count reaches the reset 18211 * threshold. This threshold should be set such that at least 18212 * one retry is issued before the reset is performed. 18213 */ 18214 if (xp->xb_retry_count == 18215 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18216 int rval = 0; 18217 mutex_exit(SD_MUTEX(un)); 18218 if (un->un_f_allow_bus_device_reset == TRUE) { 18219 /* 18220 * First try to reset the LUN; if we cannot then 18221 * try to reset the target. 18222 */ 18223 if (un->un_f_lun_reset_enabled == TRUE) { 18224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18225 "sd_pkt_status_busy: RESET_LUN\n"); 18226 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18227 } 18228 if (rval == 0) { 18229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18230 "sd_pkt_status_busy: RESET_TARGET\n"); 18231 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18232 } 18233 } 18234 if (rval == 0) { 18235 /* 18236 * If the RESET_LUN and/or RESET_TARGET failed, 18237 * try RESET_ALL 18238 */ 18239 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18240 "sd_pkt_status_busy: RESET_ALL\n"); 18241 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18242 } 18243 mutex_enter(SD_MUTEX(un)); 18244 if (rval == 0) { 18245 /* 18246 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18247 * At this point we give up & fail the command. 18248 */ 18249 sd_return_failed_command(un, bp, EIO); 18250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18251 "sd_pkt_status_busy: exit (failed cmd)\n"); 18252 return; 18253 } 18254 } 18255 18256 /* 18257 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18258 * we have already checked the retry counts above. 18259 */ 18260 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18261 EIO, SD_BSY_TIMEOUT, NULL); 18262 18263 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18264 "sd_pkt_status_busy: exit\n"); 18265 } 18266 18267 18268 /* 18269 * Function: sd_pkt_status_reservation_conflict 18270 * 18271 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18272 * command status. 18273 * 18274 * Context: May be called from interrupt context 18275 */ 18276 18277 static void 18278 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18279 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18280 { 18281 ASSERT(un != NULL); 18282 ASSERT(mutex_owned(SD_MUTEX(un))); 18283 ASSERT(bp != NULL); 18284 ASSERT(xp != NULL); 18285 ASSERT(pktp != NULL); 18286 18287 /* 18288 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18289 * conflict could be due to various reasons like incorrect keys, not 18290 * registered or not reserved etc. So, we return EACCES to the caller. 18291 */ 18292 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18293 int cmd = SD_GET_PKT_OPCODE(pktp); 18294 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18295 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18296 sd_return_failed_command(un, bp, EACCES); 18297 return; 18298 } 18299 } 18300 18301 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18302 18303 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18304 if (sd_failfast_enable != 0) { 18305 /* By definition, we must panic here.... */ 18306 panic("Reservation Conflict"); 18307 /*NOTREACHED*/ 18308 } 18309 SD_ERROR(SD_LOG_IO, un, 18310 "sd_handle_resv_conflict: Disk Reserved\n"); 18311 sd_return_failed_command(un, bp, EACCES); 18312 return; 18313 } 18314 18315 /* 18316 * 1147670: retry only if sd_retry_on_reservation_conflict 18317 * property is set (default is 1). Retries will not succeed 18318 * on a disk reserved by another initiator. HA systems 18319 * may reset this via sd.conf to avoid these retries. 18320 * 18321 * Note: The legacy return code for this failure is EIO, however EACCES 18322 * seems more appropriate for a reservation conflict. 18323 */ 18324 if (sd_retry_on_reservation_conflict == 0) { 18325 SD_ERROR(SD_LOG_IO, un, 18326 "sd_handle_resv_conflict: Device Reserved\n"); 18327 sd_return_failed_command(un, bp, EIO); 18328 return; 18329 } 18330 18331 /* 18332 * Retry the command if we can. 18333 * 18334 * Note: The legacy return code for this failure is EIO, however EACCES 18335 * seems more appropriate for a reservation conflict. 18336 */ 18337 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18338 (clock_t)2, NULL); 18339 } 18340 18341 18342 18343 /* 18344 * Function: sd_pkt_status_qfull 18345 * 18346 * Description: Handle a QUEUE FULL condition from the target. This can 18347 * occur if the HBA does not handle the queue full condition. 18348 * (Basically this means third-party HBAs as Sun HBAs will 18349 * handle the queue full condition.) Note that if there are 18350 * some commands already in the transport, then the queue full 18351 * has occurred because the queue for this nexus is actually 18352 * full. If there are no commands in the transport, then the 18353 * queue full is resulting from some other initiator or lun 18354 * consuming all the resources at the target. 18355 * 18356 * Context: May be called from interrupt context 18357 */ 18358 18359 static void 18360 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18361 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18362 { 18363 ASSERT(un != NULL); 18364 ASSERT(mutex_owned(SD_MUTEX(un))); 18365 ASSERT(bp != NULL); 18366 ASSERT(xp != NULL); 18367 ASSERT(pktp != NULL); 18368 18369 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18370 "sd_pkt_status_qfull: entry\n"); 18371 18372 /* 18373 * Just lower the QFULL throttle and retry the command. Note that 18374 * we do not limit the number of retries here. 18375 */ 18376 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18377 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18378 SD_RESTART_TIMEOUT, NULL); 18379 18380 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18381 "sd_pkt_status_qfull: exit\n"); 18382 } 18383 18384 18385 /* 18386 * Function: sd_reset_target 18387 * 18388 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18389 * RESET_TARGET, or RESET_ALL. 18390 * 18391 * Context: May be called under interrupt context. 18392 */ 18393 18394 static void 18395 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18396 { 18397 int rval = 0; 18398 18399 ASSERT(un != NULL); 18400 ASSERT(mutex_owned(SD_MUTEX(un))); 18401 ASSERT(pktp != NULL); 18402 18403 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18404 18405 /* 18406 * No need to reset if the transport layer has already done so. 18407 */ 18408 if ((pktp->pkt_statistics & 18409 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18410 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18411 "sd_reset_target: no reset\n"); 18412 return; 18413 } 18414 18415 mutex_exit(SD_MUTEX(un)); 18416 18417 if (un->un_f_allow_bus_device_reset == TRUE) { 18418 if (un->un_f_lun_reset_enabled == TRUE) { 18419 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18420 "sd_reset_target: RESET_LUN\n"); 18421 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18422 } 18423 if (rval == 0) { 18424 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18425 "sd_reset_target: RESET_TARGET\n"); 18426 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18427 } 18428 } 18429 18430 if (rval == 0) { 18431 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18432 "sd_reset_target: RESET_ALL\n"); 18433 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18434 } 18435 18436 mutex_enter(SD_MUTEX(un)); 18437 18438 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18439 } 18440 18441 18442 /* 18443 * Function: sd_media_change_task 18444 * 18445 * Description: Recovery action for CDROM to become available. 18446 * 18447 * Context: Executes in a taskq() thread context 18448 */ 18449 18450 static void 18451 sd_media_change_task(void *arg) 18452 { 18453 struct scsi_pkt *pktp = arg; 18454 struct sd_lun *un; 18455 struct buf *bp; 18456 struct sd_xbuf *xp; 18457 int err = 0; 18458 int retry_count = 0; 18459 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18460 struct sd_sense_info si; 18461 18462 ASSERT(pktp != NULL); 18463 bp = (struct buf *)pktp->pkt_private; 18464 ASSERT(bp != NULL); 18465 xp = SD_GET_XBUF(bp); 18466 ASSERT(xp != NULL); 18467 un = SD_GET_UN(bp); 18468 ASSERT(un != NULL); 18469 ASSERT(!mutex_owned(SD_MUTEX(un))); 18470 ASSERT(ISREMOVABLE(un)); 18471 18472 si.ssi_severity = SCSI_ERR_INFO; 18473 si.ssi_pfa_flag = FALSE; 18474 18475 /* 18476 * When a reset is issued on a CDROM, it takes a long time to 18477 * recover. First few attempts to read capacity and other things 18478 * related to handling unit attention fail (with a ASC 0x4 and 18479 * ASCQ 0x1). In that case we want to do enough retries and we want 18480 * to limit the retries in other cases of genuine failures like 18481 * no media in drive. 18482 */ 18483 while (retry_count++ < retry_limit) { 18484 if ((err = sd_handle_mchange(un)) == 0) { 18485 break; 18486 } 18487 if (err == EAGAIN) { 18488 retry_limit = SD_UNIT_ATTENTION_RETRY; 18489 } 18490 /* Sleep for 0.5 sec. & try again */ 18491 delay(drv_usectohz(500000)); 18492 } 18493 18494 /* 18495 * Dispatch (retry or fail) the original command here, 18496 * along with appropriate console messages.... 18497 * 18498 * Must grab the mutex before calling sd_retry_command, 18499 * sd_print_sense_msg and sd_return_failed_command. 18500 */ 18501 mutex_enter(SD_MUTEX(un)); 18502 if (err != SD_CMD_SUCCESS) { 18503 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18504 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18505 si.ssi_severity = SCSI_ERR_FATAL; 18506 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18507 sd_return_failed_command(un, bp, EIO); 18508 } else { 18509 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18510 &si, EIO, (clock_t)0, NULL); 18511 } 18512 mutex_exit(SD_MUTEX(un)); 18513 } 18514 18515 18516 18517 /* 18518 * Function: sd_handle_mchange 18519 * 18520 * Description: Perform geometry validation & other recovery when CDROM 18521 * has been removed from drive. 18522 * 18523 * Return Code: 0 for success 18524 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18525 * sd_send_scsi_READ_CAPACITY() 18526 * 18527 * Context: Executes in a taskq() thread context 18528 */ 18529 18530 static int 18531 sd_handle_mchange(struct sd_lun *un) 18532 { 18533 uint64_t capacity; 18534 uint32_t lbasize; 18535 int rval; 18536 18537 ASSERT(!mutex_owned(SD_MUTEX(un))); 18538 ASSERT(ISREMOVABLE(un)); 18539 18540 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 18541 SD_PATH_DIRECT_PRIORITY)) != 0) { 18542 return (rval); 18543 } 18544 18545 mutex_enter(SD_MUTEX(un)); 18546 sd_update_block_info(un, lbasize, capacity); 18547 18548 if (un->un_errstats != NULL) { 18549 struct sd_errstats *stp = 18550 (struct sd_errstats *)un->un_errstats->ks_data; 18551 stp->sd_capacity.value.ui64 = (uint64_t) 18552 ((uint64_t)un->un_blockcount * 18553 (uint64_t)un->un_tgt_blocksize); 18554 } 18555 18556 /* 18557 * Note: Maybe let the strategy/partitioning chain worry about getting 18558 * valid geometry. 18559 */ 18560 un->un_f_geometry_is_valid = FALSE; 18561 (void) sd_validate_geometry(un, SD_PATH_DIRECT_PRIORITY); 18562 if (un->un_f_geometry_is_valid == FALSE) { 18563 mutex_exit(SD_MUTEX(un)); 18564 return (EIO); 18565 } 18566 18567 mutex_exit(SD_MUTEX(un)); 18568 18569 /* 18570 * Try to lock the door 18571 */ 18572 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18573 SD_PATH_DIRECT_PRIORITY)); 18574 } 18575 18576 18577 /* 18578 * Function: sd_send_scsi_DOORLOCK 18579 * 18580 * Description: Issue the scsi DOOR LOCK command 18581 * 18582 * Arguments: un - pointer to driver soft state (unit) structure for 18583 * this target. 18584 * flag - SD_REMOVAL_ALLOW 18585 * SD_REMOVAL_PREVENT 18586 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18587 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18588 * to use the USCSI "direct" chain and bypass the normal 18589 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18590 * command is issued as part of an error recovery action. 18591 * 18592 * Return Code: 0 - Success 18593 * errno return code from sd_send_scsi_cmd() 18594 * 18595 * Context: Can sleep. 18596 */ 18597 18598 static int 18599 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18600 { 18601 uchar_t cdb_buf[CDB_GROUP0]; 18602 struct uscsi_cmd ucmd_buf; 18603 struct scsi_extended_sense sense_buf; 18604 int status; 18605 18606 ASSERT(un != NULL); 18607 ASSERT(!mutex_owned(SD_MUTEX(un))); 18608 18609 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18610 18611 /* already determined doorlock is not supported, fake success */ 18612 if (un->un_f_doorlock_supported == FALSE) { 18613 return (0); 18614 } 18615 18616 bzero(cdb_buf, sizeof (cdb_buf)); 18617 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18618 18619 cdb_buf[0] = SCMD_DOORLOCK; 18620 cdb_buf[4] = (uchar_t)flag; 18621 18622 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 18623 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 18624 ucmd_buf.uscsi_bufaddr = NULL; 18625 ucmd_buf.uscsi_buflen = 0; 18626 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18627 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18628 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18629 ucmd_buf.uscsi_timeout = 15; 18630 18631 SD_TRACE(SD_LOG_IO, un, 18632 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18633 18634 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18635 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18636 18637 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18638 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18639 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 18640 /* fake success and skip subsequent doorlock commands */ 18641 un->un_f_doorlock_supported = FALSE; 18642 return (0); 18643 } 18644 18645 return (status); 18646 } 18647 18648 18649 /* 18650 * Function: sd_send_scsi_READ_CAPACITY 18651 * 18652 * Description: This routine uses the scsi READ CAPACITY command to determine 18653 * the device capacity in number of blocks and the device native 18654 * block size. If this function returns a failure, then the 18655 * values in *capp and *lbap are undefined. If the capacity 18656 * returned is 0xffffffff then the lun is too large for a 18657 * normal READ CAPACITY command and the results of a 18658 * READ CAPACITY 16 will be used instead. 18659 * 18660 * Arguments: un - ptr to soft state struct for the target 18661 * capp - ptr to unsigned 64-bit variable to receive the 18662 * capacity value from the command. 18663 * lbap - ptr to unsigned 32-bit varaible to receive the 18664 * block size value from the command 18665 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18666 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18667 * to use the USCSI "direct" chain and bypass the normal 18668 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18669 * command is issued as part of an error recovery action. 18670 * 18671 * Return Code: 0 - Success 18672 * EIO - IO error 18673 * EACCES - Reservation conflict detected 18674 * EAGAIN - Device is becoming ready 18675 * errno return code from sd_send_scsi_cmd() 18676 * 18677 * Context: Can sleep. Blocks until command completes. 18678 */ 18679 18680 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18681 18682 static int 18683 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18684 int path_flag) 18685 { 18686 struct scsi_extended_sense sense_buf; 18687 struct uscsi_cmd ucmd_buf; 18688 uchar_t cdb_buf[CDB_GROUP1]; 18689 uint32_t *capacity_buf; 18690 uint64_t capacity; 18691 uint32_t lbasize; 18692 int status; 18693 18694 ASSERT(un != NULL); 18695 ASSERT(!mutex_owned(SD_MUTEX(un))); 18696 ASSERT(capp != NULL); 18697 ASSERT(lbap != NULL); 18698 18699 SD_TRACE(SD_LOG_IO, un, 18700 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18701 18702 /* 18703 * First send a READ_CAPACITY command to the target. 18704 * (This command is mandatory under SCSI-2.) 18705 * 18706 * Set up the CDB for the READ_CAPACITY command. The Partial 18707 * Medium Indicator bit is cleared. The address field must be 18708 * zero if the PMI bit is zero. 18709 */ 18710 bzero(cdb_buf, sizeof (cdb_buf)); 18711 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18712 18713 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18714 18715 cdb_buf[0] = SCMD_READ_CAPACITY; 18716 18717 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 18718 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 18719 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18720 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18721 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18722 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18723 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18724 ucmd_buf.uscsi_timeout = 60; 18725 18726 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18727 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18728 18729 switch (status) { 18730 case 0: 18731 /* Return failure if we did not get valid capacity data. */ 18732 if (ucmd_buf.uscsi_resid != 0) { 18733 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18734 return (EIO); 18735 } 18736 18737 /* 18738 * Read capacity and block size from the READ CAPACITY 10 data. 18739 * This data may be adjusted later due to device specific 18740 * issues. 18741 * 18742 * According to the SCSI spec, the READ CAPACITY 10 18743 * command returns the following: 18744 * 18745 * bytes 0-3: Maximum logical block address available. 18746 * (MSB in byte:0 & LSB in byte:3) 18747 * 18748 * bytes 4-7: Block length in bytes 18749 * (MSB in byte:4 & LSB in byte:7) 18750 * 18751 */ 18752 capacity = BE_32(capacity_buf[0]); 18753 lbasize = BE_32(capacity_buf[1]); 18754 18755 /* 18756 * Done with capacity_buf 18757 */ 18758 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18759 18760 /* 18761 * if the reported capacity is set to all 0xf's, then 18762 * this disk is too large and requires SBC-2 commands. 18763 * Reissue the request using READ CAPACITY 16. 18764 */ 18765 if (capacity == 0xffffffff) { 18766 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18767 &lbasize, path_flag); 18768 if (status != 0) { 18769 return (status); 18770 } 18771 } 18772 break; /* Success! */ 18773 case EIO: 18774 switch (ucmd_buf.uscsi_status) { 18775 case STATUS_RESERVATION_CONFLICT: 18776 status = EACCES; 18777 break; 18778 case STATUS_CHECK: 18779 /* 18780 * Check condition; look for ASC/ASCQ of 0x04/0x01 18781 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18782 */ 18783 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18784 (sense_buf.es_add_code == 0x04) && 18785 (sense_buf.es_qual_code == 0x01)) { 18786 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18787 return (EAGAIN); 18788 } 18789 break; 18790 default: 18791 break; 18792 } 18793 /* FALLTHRU */ 18794 default: 18795 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18796 return (status); 18797 } 18798 18799 /* 18800 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18801 * (2352 and 0 are common) so for these devices always force the value 18802 * to 2048 as required by the ATAPI specs. 18803 */ 18804 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18805 lbasize = 2048; 18806 } 18807 18808 /* 18809 * Get the maximum LBA value from the READ CAPACITY data. 18810 * Here we assume that the Partial Medium Indicator (PMI) bit 18811 * was cleared when issuing the command. This means that the LBA 18812 * returned from the device is the LBA of the last logical block 18813 * on the logical unit. The actual logical block count will be 18814 * this value plus one. 18815 * 18816 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18817 * so scale the capacity value to reflect this. 18818 */ 18819 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18820 18821 #if defined(__i386) || defined(__amd64) 18822 /* 18823 * On x86, compensate for off-by-1 error (number of sectors on 18824 * media) (1175930) 18825 */ 18826 if (!ISREMOVABLE(un) && (lbasize == un->un_sys_blocksize)) { 18827 capacity -= 1; 18828 } 18829 #endif 18830 18831 /* 18832 * Copy the values from the READ CAPACITY command into the space 18833 * provided by the caller. 18834 */ 18835 *capp = capacity; 18836 *lbap = lbasize; 18837 18838 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18839 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18840 18841 /* 18842 * Both the lbasize and capacity from the device must be nonzero, 18843 * otherwise we assume that the values are not valid and return 18844 * failure to the caller. (4203735) 18845 */ 18846 if ((capacity == 0) || (lbasize == 0)) { 18847 return (EIO); 18848 } 18849 18850 return (0); 18851 } 18852 18853 /* 18854 * Function: sd_send_scsi_READ_CAPACITY_16 18855 * 18856 * Description: This routine uses the scsi READ CAPACITY 16 command to 18857 * determine the device capacity in number of blocks and the 18858 * device native block size. If this function returns a failure, 18859 * then the values in *capp and *lbap are undefined. 18860 * This routine should always be called by 18861 * sd_send_scsi_READ_CAPACITY which will appy any device 18862 * specific adjustments to capacity and lbasize. 18863 * 18864 * Arguments: un - ptr to soft state struct for the target 18865 * capp - ptr to unsigned 64-bit variable to receive the 18866 * capacity value from the command. 18867 * lbap - ptr to unsigned 32-bit varaible to receive the 18868 * block size value from the command 18869 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18870 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18871 * to use the USCSI "direct" chain and bypass the normal 18872 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18873 * this command is issued as part of an error recovery 18874 * action. 18875 * 18876 * Return Code: 0 - Success 18877 * EIO - IO error 18878 * EACCES - Reservation conflict detected 18879 * EAGAIN - Device is becoming ready 18880 * errno return code from sd_send_scsi_cmd() 18881 * 18882 * Context: Can sleep. Blocks until command completes. 18883 */ 18884 18885 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18886 18887 static int 18888 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18889 uint32_t *lbap, int path_flag) 18890 { 18891 struct scsi_extended_sense sense_buf; 18892 struct uscsi_cmd ucmd_buf; 18893 uchar_t cdb_buf[CDB_GROUP4]; 18894 uint64_t *capacity16_buf; 18895 uint64_t capacity; 18896 uint32_t lbasize; 18897 int status; 18898 18899 ASSERT(un != NULL); 18900 ASSERT(!mutex_owned(SD_MUTEX(un))); 18901 ASSERT(capp != NULL); 18902 ASSERT(lbap != NULL); 18903 18904 SD_TRACE(SD_LOG_IO, un, 18905 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18906 18907 /* 18908 * First send a READ_CAPACITY_16 command to the target. 18909 * 18910 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18911 * Medium Indicator bit is cleared. The address field must be 18912 * zero if the PMI bit is zero. 18913 */ 18914 bzero(cdb_buf, sizeof (cdb_buf)); 18915 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18916 18917 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18918 18919 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 18920 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 18921 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18922 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18923 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18924 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18925 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18926 ucmd_buf.uscsi_timeout = 60; 18927 18928 /* 18929 * Read Capacity (16) is a Service Action In command. One 18930 * command byte (0x9E) is overloaded for multiple operations, 18931 * with the second CDB byte specifying the desired operation 18932 */ 18933 cdb_buf[0] = SCMD_SVC_ACTION_IN_G4; 18934 cdb_buf[1] = SSVC_ACTION_READ_CAPACITY_G4; 18935 18936 /* 18937 * Fill in allocation length field 18938 */ 18939 cdb_buf[10] = (uchar_t)((ucmd_buf.uscsi_buflen & 0xff000000) >> 24); 18940 cdb_buf[11] = (uchar_t)((ucmd_buf.uscsi_buflen & 0x00ff0000) >> 16); 18941 cdb_buf[12] = (uchar_t)((ucmd_buf.uscsi_buflen & 0x0000ff00) >> 8); 18942 cdb_buf[13] = (uchar_t)(ucmd_buf.uscsi_buflen & 0x000000ff); 18943 18944 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18945 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18946 18947 switch (status) { 18948 case 0: 18949 /* Return failure if we did not get valid capacity data. */ 18950 if (ucmd_buf.uscsi_resid > 20) { 18951 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18952 return (EIO); 18953 } 18954 18955 /* 18956 * Read capacity and block size from the READ CAPACITY 10 data. 18957 * This data may be adjusted later due to device specific 18958 * issues. 18959 * 18960 * According to the SCSI spec, the READ CAPACITY 10 18961 * command returns the following: 18962 * 18963 * bytes 0-7: Maximum logical block address available. 18964 * (MSB in byte:0 & LSB in byte:7) 18965 * 18966 * bytes 8-11: Block length in bytes 18967 * (MSB in byte:8 & LSB in byte:11) 18968 * 18969 */ 18970 capacity = BE_64(capacity16_buf[0]); 18971 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 18972 18973 /* 18974 * Done with capacity16_buf 18975 */ 18976 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 18977 18978 /* 18979 * if the reported capacity is set to all 0xf's, then 18980 * this disk is too large. This could only happen with 18981 * a device that supports LBAs larger than 64 bits which 18982 * are not defined by any current T10 standards. 18983 */ 18984 if (capacity == 0xffffffffffffffff) { 18985 return (EIO); 18986 } 18987 break; /* Success! */ 18988 case EIO: 18989 switch (ucmd_buf.uscsi_status) { 18990 case STATUS_RESERVATION_CONFLICT: 18991 status = EACCES; 18992 break; 18993 case STATUS_CHECK: 18994 /* 18995 * Check condition; look for ASC/ASCQ of 0x04/0x01 18996 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18997 */ 18998 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18999 (sense_buf.es_add_code == 0x04) && 19000 (sense_buf.es_qual_code == 0x01)) { 19001 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19002 return (EAGAIN); 19003 } 19004 break; 19005 default: 19006 break; 19007 } 19008 /* FALLTHRU */ 19009 default: 19010 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19011 return (status); 19012 } 19013 19014 *capp = capacity; 19015 *lbap = lbasize; 19016 19017 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19018 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19019 19020 return (0); 19021 } 19022 19023 19024 /* 19025 * Function: sd_send_scsi_START_STOP_UNIT 19026 * 19027 * Description: Issue a scsi START STOP UNIT command to the target. 19028 * 19029 * Arguments: un - pointer to driver soft state (unit) structure for 19030 * this target. 19031 * flag - SD_TARGET_START 19032 * SD_TARGET_STOP 19033 * SD_TARGET_EJECT 19034 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19035 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19036 * to use the USCSI "direct" chain and bypass the normal 19037 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19038 * command is issued as part of an error recovery action. 19039 * 19040 * Return Code: 0 - Success 19041 * EIO - IO error 19042 * EACCES - Reservation conflict detected 19043 * ENXIO - Not Ready, medium not present 19044 * errno return code from sd_send_scsi_cmd() 19045 * 19046 * Context: Can sleep. 19047 */ 19048 19049 static int 19050 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 19051 { 19052 struct scsi_extended_sense sense_buf; 19053 uchar_t cdb_buf[CDB_GROUP0]; 19054 struct uscsi_cmd ucmd_buf; 19055 int status; 19056 19057 ASSERT(un != NULL); 19058 ASSERT(!mutex_owned(SD_MUTEX(un))); 19059 19060 SD_TRACE(SD_LOG_IO, un, 19061 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19062 19063 if (ISREMOVABLE(un) && 19064 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19065 (un->un_f_start_stop_supported != TRUE)) { 19066 return (0); 19067 } 19068 19069 bzero(cdb_buf, sizeof (cdb_buf)); 19070 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19071 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19072 19073 cdb_buf[0] = SCMD_START_STOP; 19074 cdb_buf[4] |= (uchar_t)flag; 19075 19076 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19077 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 19078 ucmd_buf.uscsi_bufaddr = NULL; 19079 ucmd_buf.uscsi_buflen = 0; 19080 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19081 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19082 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19083 ucmd_buf.uscsi_timeout = 200; 19084 19085 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19086 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19087 19088 switch (status) { 19089 case 0: 19090 break; /* Success! */ 19091 case EIO: 19092 switch (ucmd_buf.uscsi_status) { 19093 case STATUS_RESERVATION_CONFLICT: 19094 status = EACCES; 19095 break; 19096 case STATUS_CHECK: 19097 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19098 switch (sense_buf.es_key) { 19099 case KEY_ILLEGAL_REQUEST: 19100 status = ENOTSUP; 19101 break; 19102 case KEY_NOT_READY: 19103 if (sense_buf.es_add_code == 0x3A) { 19104 status = ENXIO; 19105 } 19106 break; 19107 default: 19108 break; 19109 } 19110 } 19111 break; 19112 default: 19113 break; 19114 } 19115 break; 19116 default: 19117 break; 19118 } 19119 19120 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19121 19122 return (status); 19123 } 19124 19125 19126 /* 19127 * Function: sd_start_stop_unit_callback 19128 * 19129 * Description: timeout(9F) callback to begin recovery process for a 19130 * device that has spun down. 19131 * 19132 * Arguments: arg - pointer to associated softstate struct. 19133 * 19134 * Context: Executes in a timeout(9F) thread context 19135 */ 19136 19137 static void 19138 sd_start_stop_unit_callback(void *arg) 19139 { 19140 struct sd_lun *un = arg; 19141 ASSERT(un != NULL); 19142 ASSERT(!mutex_owned(SD_MUTEX(un))); 19143 19144 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19145 19146 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19147 } 19148 19149 19150 /* 19151 * Function: sd_start_stop_unit_task 19152 * 19153 * Description: Recovery procedure when a drive is spun down. 19154 * 19155 * Arguments: arg - pointer to associated softstate struct. 19156 * 19157 * Context: Executes in a taskq() thread context 19158 */ 19159 19160 static void 19161 sd_start_stop_unit_task(void *arg) 19162 { 19163 struct sd_lun *un = arg; 19164 19165 ASSERT(un != NULL); 19166 ASSERT(!mutex_owned(SD_MUTEX(un))); 19167 19168 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19169 19170 /* 19171 * Some unformatted drives report not ready error, no need to 19172 * restart if format has been initiated. 19173 */ 19174 mutex_enter(SD_MUTEX(un)); 19175 if (un->un_f_format_in_progress == TRUE) { 19176 mutex_exit(SD_MUTEX(un)); 19177 return; 19178 } 19179 mutex_exit(SD_MUTEX(un)); 19180 19181 /* 19182 * When a START STOP command is issued from here, it is part of a 19183 * failure recovery operation and must be issued before any other 19184 * commands, including any pending retries. Thus it must be sent 19185 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19186 * succeeds or not, we will start I/O after the attempt. 19187 */ 19188 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19189 SD_PATH_DIRECT_PRIORITY); 19190 19191 /* 19192 * The above call blocks until the START_STOP_UNIT command completes. 19193 * Now that it has completed, we must re-try the original IO that 19194 * received the NOT READY condition in the first place. There are 19195 * three possible conditions here: 19196 * 19197 * (1) The original IO is on un_retry_bp. 19198 * (2) The original IO is on the regular wait queue, and un_retry_bp 19199 * is NULL. 19200 * (3) The original IO is on the regular wait queue, and un_retry_bp 19201 * points to some other, unrelated bp. 19202 * 19203 * For each case, we must call sd_start_cmds() with un_retry_bp 19204 * as the argument. If un_retry_bp is NULL, this will initiate 19205 * processing of the regular wait queue. If un_retry_bp is not NULL, 19206 * then this will process the bp on un_retry_bp. That may or may not 19207 * be the original IO, but that does not matter: the important thing 19208 * is to keep the IO processing going at this point. 19209 * 19210 * Note: This is a very specific error recovery sequence associated 19211 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19212 * serialize the I/O with completion of the spin-up. 19213 */ 19214 mutex_enter(SD_MUTEX(un)); 19215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19216 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19217 un, un->un_retry_bp); 19218 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19219 sd_start_cmds(un, un->un_retry_bp); 19220 mutex_exit(SD_MUTEX(un)); 19221 19222 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19223 } 19224 19225 19226 /* 19227 * Function: sd_send_scsi_INQUIRY 19228 * 19229 * Description: Issue the scsi INQUIRY command. 19230 * 19231 * Arguments: un 19232 * bufaddr 19233 * buflen 19234 * evpd 19235 * page_code 19236 * page_length 19237 * 19238 * Return Code: 0 - Success 19239 * errno return code from sd_send_scsi_cmd() 19240 * 19241 * Context: Can sleep. Does not return until command is completed. 19242 */ 19243 19244 static int 19245 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 19246 uchar_t evpd, uchar_t page_code, size_t *residp) 19247 { 19248 uchar_t cdb_buf[CDB_GROUP0]; 19249 struct uscsi_cmd ucmd_buf; 19250 int status; 19251 19252 ASSERT(un != NULL); 19253 ASSERT(!mutex_owned(SD_MUTEX(un))); 19254 ASSERT(bufaddr != NULL); 19255 19256 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19257 19258 bzero(cdb_buf, sizeof (cdb_buf)); 19259 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19260 bzero(bufaddr, buflen); 19261 19262 cdb_buf[0] = SCMD_INQUIRY; 19263 cdb_buf[1] = evpd; 19264 cdb_buf[2] = page_code; 19265 cdb_buf[4] = buflen; 19266 19267 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19268 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 19269 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19270 ucmd_buf.uscsi_buflen = buflen; 19271 ucmd_buf.uscsi_rqbuf = NULL; 19272 ucmd_buf.uscsi_rqlen = 0; 19273 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19274 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19275 19276 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19277 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19278 19279 if ((status == 0) && (residp != NULL)) { 19280 *residp = ucmd_buf.uscsi_resid; 19281 } 19282 19283 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19284 19285 return (status); 19286 } 19287 19288 19289 /* 19290 * Function: sd_send_scsi_TEST_UNIT_READY 19291 * 19292 * Description: Issue the scsi TEST UNIT READY command. 19293 * This routine can be told to set the flag USCSI_DIAGNOSE to 19294 * prevent retrying failed commands. Use this when the intent 19295 * is either to check for device readiness, to clear a Unit 19296 * Attention, or to clear any outstanding sense data. 19297 * However under specific conditions the expected behavior 19298 * is for retries to bring a device ready, so use the flag 19299 * with caution. 19300 * 19301 * Arguments: un 19302 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19303 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19304 * 0: dont check for media present, do retries on cmd. 19305 * 19306 * Return Code: 0 - Success 19307 * EIO - IO error 19308 * EACCES - Reservation conflict detected 19309 * ENXIO - Not Ready, medium not present 19310 * errno return code from sd_send_scsi_cmd() 19311 * 19312 * Context: Can sleep. Does not return until command is completed. 19313 */ 19314 19315 static int 19316 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 19317 { 19318 struct scsi_extended_sense sense_buf; 19319 uchar_t cdb_buf[CDB_GROUP0]; 19320 struct uscsi_cmd ucmd_buf; 19321 int status; 19322 19323 ASSERT(un != NULL); 19324 ASSERT(!mutex_owned(SD_MUTEX(un))); 19325 19326 SD_TRACE(SD_LOG_IO, un, 19327 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19328 19329 /* 19330 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19331 * timeouts when they receive a TUR and the queue is not empty. Check 19332 * the configuration flag set during attach (indicating the drive has 19333 * this firmware bug) and un_ncmds_in_transport before issuing the 19334 * TUR. If there are 19335 * pending commands return success, this is a bit arbitrary but is ok 19336 * for non-removables (i.e. the eliteI disks) and non-clustering 19337 * configurations. 19338 */ 19339 if (un->un_f_cfg_tur_check == TRUE) { 19340 mutex_enter(SD_MUTEX(un)); 19341 if (un->un_ncmds_in_transport != 0) { 19342 mutex_exit(SD_MUTEX(un)); 19343 return (0); 19344 } 19345 mutex_exit(SD_MUTEX(un)); 19346 } 19347 19348 bzero(cdb_buf, sizeof (cdb_buf)); 19349 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19350 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19351 19352 cdb_buf[0] = SCMD_TEST_UNIT_READY; 19353 19354 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19355 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 19356 ucmd_buf.uscsi_bufaddr = NULL; 19357 ucmd_buf.uscsi_buflen = 0; 19358 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19359 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19360 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19361 19362 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19363 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19364 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19365 } 19366 ucmd_buf.uscsi_timeout = 60; 19367 19368 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19369 UIO_SYSSPACE, UIO_SYSSPACE, 19370 ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); 19371 19372 switch (status) { 19373 case 0: 19374 break; /* Success! */ 19375 case EIO: 19376 switch (ucmd_buf.uscsi_status) { 19377 case STATUS_RESERVATION_CONFLICT: 19378 status = EACCES; 19379 break; 19380 case STATUS_CHECK: 19381 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19382 break; 19383 } 19384 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19385 (sense_buf.es_key == KEY_NOT_READY) && 19386 (sense_buf.es_add_code == 0x3A)) { 19387 status = ENXIO; 19388 } 19389 break; 19390 default: 19391 break; 19392 } 19393 break; 19394 default: 19395 break; 19396 } 19397 19398 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19399 19400 return (status); 19401 } 19402 19403 19404 /* 19405 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19406 * 19407 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19408 * 19409 * Arguments: un 19410 * 19411 * Return Code: 0 - Success 19412 * EACCES 19413 * ENOTSUP 19414 * errno return code from sd_send_scsi_cmd() 19415 * 19416 * Context: Can sleep. Does not return until command is completed. 19417 */ 19418 19419 static int 19420 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 19421 uint16_t data_len, uchar_t *data_bufp) 19422 { 19423 struct scsi_extended_sense sense_buf; 19424 struct uscsi_cmd ucmd_buf; 19425 uchar_t cdb_buf[CDB_GROUP1]; 19426 int status; 19427 int no_caller_buf = FALSE; 19428 19429 ASSERT(un != NULL); 19430 ASSERT(!mutex_owned(SD_MUTEX(un))); 19431 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19432 19433 SD_TRACE(SD_LOG_IO, un, 19434 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19435 19436 bzero(cdb_buf, sizeof (cdb_buf)); 19437 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19438 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19439 if (data_bufp == NULL) { 19440 /* Allocate a default buf if the caller did not give one */ 19441 ASSERT(data_len == 0); 19442 data_len = MHIOC_RESV_KEY_SIZE; 19443 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19444 no_caller_buf = TRUE; 19445 } 19446 19447 cdb_buf[0] = SCMD_PERSISTENT_RESERVE_IN; 19448 cdb_buf[1] = usr_cmd; 19449 cdb_buf[7] = (uchar_t)(data_len >> 8); 19450 cdb_buf[8] = (uchar_t)data_len; 19451 19452 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19453 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 19454 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19455 ucmd_buf.uscsi_buflen = data_len; 19456 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19457 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19458 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19459 ucmd_buf.uscsi_timeout = 60; 19460 19461 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19462 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19463 19464 switch (status) { 19465 case 0: 19466 break; /* Success! */ 19467 case EIO: 19468 switch (ucmd_buf.uscsi_status) { 19469 case STATUS_RESERVATION_CONFLICT: 19470 status = EACCES; 19471 break; 19472 case STATUS_CHECK: 19473 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19474 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19475 status = ENOTSUP; 19476 } 19477 break; 19478 default: 19479 break; 19480 } 19481 break; 19482 default: 19483 break; 19484 } 19485 19486 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 19487 19488 if (no_caller_buf == TRUE) { 19489 kmem_free(data_bufp, data_len); 19490 } 19491 19492 return (status); 19493 } 19494 19495 19496 /* 19497 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 19498 * 19499 * Description: This routine is the driver entry point for handling CD-ROM 19500 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 19501 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 19502 * device. 19503 * 19504 * Arguments: un - Pointer to soft state struct for the target. 19505 * usr_cmd SCSI-3 reservation facility command (one of 19506 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 19507 * SD_SCSI3_PREEMPTANDABORT) 19508 * usr_bufp - user provided pointer register, reserve descriptor or 19509 * preempt and abort structure (mhioc_register_t, 19510 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 19511 * 19512 * Return Code: 0 - Success 19513 * EACCES 19514 * ENOTSUP 19515 * errno return code from sd_send_scsi_cmd() 19516 * 19517 * Context: Can sleep. Does not return until command is completed. 19518 */ 19519 19520 static int 19521 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19522 uchar_t *usr_bufp) 19523 { 19524 struct scsi_extended_sense sense_buf; 19525 struct uscsi_cmd ucmd_buf; 19526 uchar_t cdb_buf[CDB_GROUP1]; 19527 int status; 19528 uchar_t data_len = sizeof (sd_prout_t); 19529 sd_prout_t *prp; 19530 19531 ASSERT(un != NULL); 19532 ASSERT(!mutex_owned(SD_MUTEX(un))); 19533 ASSERT(data_len == 24); /* required by scsi spec */ 19534 19535 SD_TRACE(SD_LOG_IO, un, 19536 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19537 19538 if (usr_bufp == NULL) { 19539 return (EINVAL); 19540 } 19541 19542 bzero(cdb_buf, sizeof (cdb_buf)); 19543 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19544 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19545 prp = kmem_zalloc(data_len, KM_SLEEP); 19546 19547 cdb_buf[0] = SCMD_PERSISTENT_RESERVE_OUT; 19548 cdb_buf[1] = usr_cmd; 19549 cdb_buf[8] = data_len; 19550 19551 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19552 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 19553 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19554 ucmd_buf.uscsi_buflen = data_len; 19555 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19556 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19557 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19558 ucmd_buf.uscsi_timeout = 60; 19559 19560 switch (usr_cmd) { 19561 case SD_SCSI3_REGISTER: { 19562 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19563 19564 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19565 bcopy(ptr->newkey.key, prp->service_key, 19566 MHIOC_RESV_KEY_SIZE); 19567 prp->aptpl = ptr->aptpl; 19568 break; 19569 } 19570 case SD_SCSI3_RESERVE: 19571 case SD_SCSI3_RELEASE: { 19572 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19573 19574 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19575 prp->scope_address = BE_32(ptr->scope_specific_addr); 19576 cdb_buf[2] = ptr->type; 19577 break; 19578 } 19579 case SD_SCSI3_PREEMPTANDABORT: { 19580 mhioc_preemptandabort_t *ptr = 19581 (mhioc_preemptandabort_t *)usr_bufp; 19582 19583 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19584 bcopy(ptr->victim_key.key, prp->service_key, 19585 MHIOC_RESV_KEY_SIZE); 19586 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19587 cdb_buf[2] = ptr->resvdesc.type; 19588 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19589 break; 19590 } 19591 case SD_SCSI3_REGISTERANDIGNOREKEY: 19592 { 19593 mhioc_registerandignorekey_t *ptr; 19594 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19595 bcopy(ptr->newkey.key, 19596 prp->service_key, MHIOC_RESV_KEY_SIZE); 19597 prp->aptpl = ptr->aptpl; 19598 break; 19599 } 19600 default: 19601 ASSERT(FALSE); 19602 break; 19603 } 19604 19605 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19606 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19607 19608 switch (status) { 19609 case 0: 19610 break; /* Success! */ 19611 case EIO: 19612 switch (ucmd_buf.uscsi_status) { 19613 case STATUS_RESERVATION_CONFLICT: 19614 status = EACCES; 19615 break; 19616 case STATUS_CHECK: 19617 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19618 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19619 status = ENOTSUP; 19620 } 19621 break; 19622 default: 19623 break; 19624 } 19625 break; 19626 default: 19627 break; 19628 } 19629 19630 kmem_free(prp, data_len); 19631 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19632 return (status); 19633 } 19634 19635 19636 /* 19637 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19638 * 19639 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19640 * 19641 * Arguments: un - pointer to the target's soft state struct 19642 * 19643 * Return Code: 0 - success 19644 * errno-type error code 19645 * 19646 * Context: kernel thread context only. 19647 */ 19648 19649 static int 19650 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un) 19651 { 19652 struct scsi_extended_sense sense_buf; 19653 uchar_t cdb_buf[CDB_GROUP1]; 19654 struct uscsi_cmd ucmd_buf; 19655 int status; 19656 19657 ASSERT(un != NULL); 19658 ASSERT(!mutex_owned(SD_MUTEX(un))); 19659 19660 SD_TRACE(SD_LOG_IO, un, 19661 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19662 19663 bzero(cdb_buf, sizeof (cdb_buf)); 19664 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19665 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19666 19667 cdb_buf[0] = SCMD_SYNCHRONIZE_CACHE; 19668 19669 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19670 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 19671 ucmd_buf.uscsi_bufaddr = NULL; 19672 ucmd_buf.uscsi_buflen = 0; 19673 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19674 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19675 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19676 ucmd_buf.uscsi_timeout = 240; 19677 19678 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19679 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19680 19681 switch (status) { 19682 case 0: 19683 break; /* Success! */ 19684 case EIO: 19685 switch (ucmd_buf.uscsi_status) { 19686 case STATUS_RESERVATION_CONFLICT: 19687 /* Ignore reservation conflict */ 19688 status = 0; 19689 goto done; 19690 19691 case STATUS_CHECK: 19692 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19693 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19694 /* Ignore Illegal Request error */ 19695 status = 0; 19696 goto done; 19697 } 19698 break; 19699 default: 19700 break; 19701 } 19702 /* FALLTHRU */ 19703 default: 19704 /* Ignore error if the media is not present. */ 19705 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 19706 status = 0; 19707 goto done; 19708 } 19709 /* If we reach this, we had an error */ 19710 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19711 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19712 break; 19713 } 19714 19715 done: 19716 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: exit\n"); 19717 19718 return (status); 19719 } 19720 19721 19722 /* 19723 * Function: sd_send_scsi_GET_CONFIGURATION 19724 * 19725 * Description: Issues the get configuration command to the device. 19726 * Called from sd_check_for_writable_cd & sd_get_media_info 19727 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19728 * Arguments: un 19729 * ucmdbuf 19730 * rqbuf 19731 * rqbuflen 19732 * bufaddr 19733 * buflen 19734 * 19735 * Return Code: 0 - Success 19736 * errno return code from sd_send_scsi_cmd() 19737 * 19738 * Context: Can sleep. Does not return until command is completed. 19739 * 19740 */ 19741 19742 static int 19743 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19744 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen) 19745 { 19746 char cdb[CDB_GROUP1]; 19747 int status; 19748 19749 ASSERT(un != NULL); 19750 ASSERT(!mutex_owned(SD_MUTEX(un))); 19751 ASSERT(bufaddr != NULL); 19752 ASSERT(ucmdbuf != NULL); 19753 ASSERT(rqbuf != NULL); 19754 19755 SD_TRACE(SD_LOG_IO, un, 19756 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19757 19758 bzero(cdb, sizeof (cdb)); 19759 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19760 bzero(rqbuf, rqbuflen); 19761 bzero(bufaddr, buflen); 19762 19763 /* 19764 * Set up cdb field for the get configuration command. 19765 */ 19766 cdb[0] = SCMD_GET_CONFIGURATION; 19767 cdb[1] = 0x02; /* Requested Type */ 19768 cdb[8] = SD_PROFILE_HEADER_LEN; 19769 ucmdbuf->uscsi_cdb = cdb; 19770 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19771 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19772 ucmdbuf->uscsi_buflen = buflen; 19773 ucmdbuf->uscsi_timeout = sd_io_time; 19774 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19775 ucmdbuf->uscsi_rqlen = rqbuflen; 19776 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19777 19778 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19779 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19780 19781 switch (status) { 19782 case 0: 19783 break; /* Success! */ 19784 case EIO: 19785 switch (ucmdbuf->uscsi_status) { 19786 case STATUS_RESERVATION_CONFLICT: 19787 status = EACCES; 19788 break; 19789 default: 19790 break; 19791 } 19792 break; 19793 default: 19794 break; 19795 } 19796 19797 if (status == 0) { 19798 SD_DUMP_MEMORY(un, SD_LOG_IO, 19799 "sd_send_scsi_GET_CONFIGURATION: data", 19800 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19801 } 19802 19803 SD_TRACE(SD_LOG_IO, un, 19804 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19805 19806 return (status); 19807 } 19808 19809 /* 19810 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19811 * 19812 * Description: Issues the get configuration command to the device to 19813 * retrieve a specfic feature. Called from 19814 * sd_check_for_writable_cd & sd_set_mmc_caps. 19815 * Arguments: un 19816 * ucmdbuf 19817 * rqbuf 19818 * rqbuflen 19819 * bufaddr 19820 * buflen 19821 * feature 19822 * 19823 * Return Code: 0 - Success 19824 * errno return code from sd_send_scsi_cmd() 19825 * 19826 * Context: Can sleep. Does not return until command is completed. 19827 * 19828 */ 19829 static int 19830 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19831 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19832 uchar_t *bufaddr, uint_t buflen, char feature) 19833 { 19834 char cdb[CDB_GROUP1]; 19835 int status; 19836 19837 ASSERT(un != NULL); 19838 ASSERT(!mutex_owned(SD_MUTEX(un))); 19839 ASSERT(bufaddr != NULL); 19840 ASSERT(ucmdbuf != NULL); 19841 ASSERT(rqbuf != NULL); 19842 19843 SD_TRACE(SD_LOG_IO, un, 19844 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19845 19846 bzero(cdb, sizeof (cdb)); 19847 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19848 bzero(rqbuf, rqbuflen); 19849 bzero(bufaddr, buflen); 19850 19851 /* 19852 * Set up cdb field for the get configuration command. 19853 */ 19854 cdb[0] = SCMD_GET_CONFIGURATION; 19855 cdb[1] = 0x02; /* Requested Type */ 19856 cdb[3] = feature; 19857 cdb[8] = buflen; 19858 ucmdbuf->uscsi_cdb = cdb; 19859 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19860 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19861 ucmdbuf->uscsi_buflen = buflen; 19862 ucmdbuf->uscsi_timeout = sd_io_time; 19863 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19864 ucmdbuf->uscsi_rqlen = rqbuflen; 19865 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19866 19867 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19868 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19869 19870 switch (status) { 19871 case 0: 19872 break; /* Success! */ 19873 case EIO: 19874 switch (ucmdbuf->uscsi_status) { 19875 case STATUS_RESERVATION_CONFLICT: 19876 status = EACCES; 19877 break; 19878 default: 19879 break; 19880 } 19881 break; 19882 default: 19883 break; 19884 } 19885 19886 if (status == 0) { 19887 SD_DUMP_MEMORY(un, SD_LOG_IO, 19888 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19889 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19890 } 19891 19892 SD_TRACE(SD_LOG_IO, un, 19893 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19894 19895 return (status); 19896 } 19897 19898 19899 /* 19900 * Function: sd_send_scsi_MODE_SENSE 19901 * 19902 * Description: Utility function for issuing a scsi MODE SENSE command. 19903 * Note: This routine uses a consistent implementation for Group0, 19904 * Group1, and Group2 commands across all platforms. ATAPI devices 19905 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19906 * 19907 * Arguments: un - pointer to the softstate struct for the target. 19908 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19909 * CDB_GROUP[1|2] (10 byte). 19910 * bufaddr - buffer for page data retrieved from the target. 19911 * buflen - size of page to be retrieved. 19912 * page_code - page code of data to be retrieved from the target. 19913 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19914 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19915 * to use the USCSI "direct" chain and bypass the normal 19916 * command waitq. 19917 * 19918 * Return Code: 0 - Success 19919 * errno return code from sd_send_scsi_cmd() 19920 * 19921 * Context: Can sleep. Does not return until command is completed. 19922 */ 19923 19924 static int 19925 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19926 size_t buflen, uchar_t page_code, int path_flag) 19927 { 19928 struct scsi_extended_sense sense_buf; 19929 uchar_t cdb_buf[CDB_GROUP1]; 19930 struct uscsi_cmd ucmd_buf; 19931 int status; 19932 19933 ASSERT(un != NULL); 19934 ASSERT(!mutex_owned(SD_MUTEX(un))); 19935 ASSERT(bufaddr != NULL); 19936 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19937 (cdbsize == CDB_GROUP2)); 19938 19939 SD_TRACE(SD_LOG_IO, un, 19940 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19941 19942 bzero(cdb_buf, sizeof (cdb_buf)); 19943 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19944 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19945 bzero(bufaddr, buflen); 19946 19947 if (cdbsize == CDB_GROUP0) { 19948 cdb_buf[0] = SCMD_MODE_SENSE; 19949 cdb_buf[2] = page_code; 19950 cdb_buf[4] = buflen; 19951 } else { 19952 cdb_buf[0] = SCMD_MODE_SENSE_G1; 19953 cdb_buf[2] = page_code; 19954 cdb_buf[7] = (uchar_t)((buflen & 0xFF00) >> 8); 19955 cdb_buf[8] = (uchar_t)(buflen & 0xFF); 19956 } 19957 19958 if ((SD_LUN(un) > 0) && (un->un_sd->sd_inq->inq_ansi == 0x01)) { 19959 cdb_buf[1] |= (SD_LUN(un) << 5); 19960 } 19961 19962 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 19963 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 19964 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19965 ucmd_buf.uscsi_buflen = buflen; 19966 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19967 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19968 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19969 ucmd_buf.uscsi_timeout = 60; 19970 19971 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19972 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19973 19974 switch (status) { 19975 case 0: 19976 break; /* Success! */ 19977 case EIO: 19978 switch (ucmd_buf.uscsi_status) { 19979 case STATUS_RESERVATION_CONFLICT: 19980 status = EACCES; 19981 break; 19982 default: 19983 break; 19984 } 19985 break; 19986 default: 19987 break; 19988 } 19989 19990 if (status == 0) { 19991 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 19992 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 19993 } 19994 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 19995 19996 return (status); 19997 } 19998 19999 20000 /* 20001 * Function: sd_send_scsi_MODE_SELECT 20002 * 20003 * Description: Utility function for issuing a scsi MODE SELECT command. 20004 * Note: This routine uses a consistent implementation for Group0, 20005 * Group1, and Group2 commands across all platforms. ATAPI devices 20006 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20007 * 20008 * Arguments: un - pointer to the softstate struct for the target. 20009 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20010 * CDB_GROUP[1|2] (10 byte). 20011 * bufaddr - buffer for page data retrieved from the target. 20012 * buflen - size of page to be retrieved. 20013 * save_page - boolean to determin if SP bit should be set. 20014 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20015 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20016 * to use the USCSI "direct" chain and bypass the normal 20017 * command waitq. 20018 * 20019 * Return Code: 0 - Success 20020 * errno return code from sd_send_scsi_cmd() 20021 * 20022 * Context: Can sleep. Does not return until command is completed. 20023 */ 20024 20025 static int 20026 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20027 size_t buflen, uchar_t save_page, int path_flag) 20028 { 20029 struct scsi_extended_sense sense_buf; 20030 uchar_t cdb_buf[CDB_GROUP1]; 20031 struct uscsi_cmd ucmd_buf; 20032 int status; 20033 20034 ASSERT(un != NULL); 20035 ASSERT(!mutex_owned(SD_MUTEX(un))); 20036 ASSERT(bufaddr != NULL); 20037 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20038 (cdbsize == CDB_GROUP2)); 20039 20040 SD_TRACE(SD_LOG_IO, un, 20041 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20042 20043 bzero(cdb_buf, sizeof (cdb_buf)); 20044 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20045 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20046 20047 cdb_buf[1] = 0x10; /* Set the PF bit for many third party drives */ 20048 20049 if (save_page == SD_SAVE_PAGE) { 20050 cdb_buf[1] |= 0x01; /* Set the savepage(SP) bit if given */ 20051 } 20052 20053 if (cdbsize == CDB_GROUP0) { 20054 cdb_buf[0] = SCMD_MODE_SELECT; 20055 cdb_buf[4] = buflen; 20056 } else { 20057 cdb_buf[0] = SCMD_MODE_SELECT_G1; 20058 cdb_buf[7] = (uchar_t)((buflen & 0xFF00) >> 8); 20059 cdb_buf[8] = (uchar_t)(buflen & 0xFF); 20060 } 20061 20062 if ((SD_LUN(un) > 0) && (un->un_sd->sd_inq->inq_ansi == 0x01)) { 20063 cdb_buf[1] |= (SD_LUN(un) << 5); 20064 } 20065 20066 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 20067 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20068 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20069 ucmd_buf.uscsi_buflen = buflen; 20070 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20071 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20072 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20073 ucmd_buf.uscsi_timeout = 60; 20074 20075 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20076 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20077 20078 switch (status) { 20079 case 0: 20080 break; /* Success! */ 20081 case EIO: 20082 switch (ucmd_buf.uscsi_status) { 20083 case STATUS_RESERVATION_CONFLICT: 20084 status = EACCES; 20085 break; 20086 default: 20087 break; 20088 } 20089 break; 20090 default: 20091 break; 20092 } 20093 20094 if (status == 0) { 20095 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20096 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20097 } 20098 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20099 20100 return (status); 20101 } 20102 20103 20104 /* 20105 * Function: sd_send_scsi_RDWR 20106 * 20107 * Description: Issue a scsi READ or WRITE command with the given parameters. 20108 * 20109 * Arguments: un: Pointer to the sd_lun struct for the target. 20110 * cmd: SCMD_READ or SCMD_WRITE 20111 * bufaddr: Address of caller's buffer to receive the RDWR data 20112 * buflen: Length of caller's buffer receive the RDWR data. 20113 * start_block: Block number for the start of the RDWR operation. 20114 * (Assumes target-native block size.) 20115 * residp: Pointer to variable to receive the redisual of the 20116 * RDWR operation (may be NULL of no residual requested). 20117 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20118 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20119 * to use the USCSI "direct" chain and bypass the normal 20120 * command waitq. 20121 * 20122 * Return Code: 0 - Success 20123 * errno return code from sd_send_scsi_cmd() 20124 * 20125 * Context: Can sleep. Does not return until command is completed. 20126 */ 20127 20128 static int 20129 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 20130 size_t buflen, daddr_t start_block, int path_flag) 20131 { 20132 struct scsi_extended_sense sense_buf; 20133 uchar_t cdb_buf[CDB_GROUP4]; /* Use max size */ 20134 struct uscsi_cmd ucmd_buf; 20135 uint32_t block_count; 20136 int status; 20137 int cdbsize; 20138 uchar_t flag; 20139 int i; 20140 20141 ASSERT(un != NULL); 20142 ASSERT(!mutex_owned(SD_MUTEX(un))); 20143 ASSERT(bufaddr != NULL); 20144 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20145 20146 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20147 20148 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20149 return (EINVAL); 20150 } 20151 20152 mutex_enter(SD_MUTEX(un)); 20153 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20154 mutex_exit(SD_MUTEX(un)); 20155 20156 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20157 20158 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20159 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20160 bufaddr, buflen, start_block, block_count); 20161 20162 bzero(cdb_buf, sizeof (cdb_buf)); 20163 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20164 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20165 20166 /* Compute CDB size to use */ 20167 if (start_block > 0xffffffff) 20168 cdbsize = CDB_GROUP4; 20169 else if ((start_block & 0xFFE00000) || 20170 (un->un_f_cfg_is_atapi == TRUE)) 20171 cdbsize = CDB_GROUP1; 20172 else 20173 cdbsize = CDB_GROUP0; 20174 20175 switch (cdbsize) { 20176 case CDB_GROUP0: /* 6-byte CDBs */ 20177 cdb_buf[0] = cmd; 20178 cdb_buf[1] = (uchar_t)((start_block & 0x001F0000) >> 16); 20179 cdb_buf[2] = (uchar_t)((start_block & 0x0000FF00) >> 8); 20180 cdb_buf[3] = (uchar_t)(start_block & 0x000000FF); 20181 cdb_buf[4] = (uchar_t)(block_count & 0xFF); 20182 break; 20183 case CDB_GROUP1: /* 10-byte CDBs */ 20184 cdb_buf[0] = cmd | SCMD_GROUP1; 20185 cdb_buf[2] = (uchar_t)((start_block & 0xFF000000) >> 24); 20186 cdb_buf[3] = (uchar_t)((start_block & 0x00FF0000) >> 16); 20187 cdb_buf[4] = (uchar_t)((start_block & 0x0000FF00) >> 8); 20188 cdb_buf[5] = (uchar_t)(start_block & 0x000000FF); 20189 cdb_buf[7] = (uchar_t)((block_count & 0xFF00) >> 8); 20190 cdb_buf[8] = (uchar_t)(block_count & 0xFF); 20191 break; 20192 case CDB_GROUP4: /* 16-byte CDBs */ 20193 cdb_buf[0] = cmd | SCMD_GROUP4; 20194 /* Block address is in bytes 2 - 9 */ 20195 for (i = 9; i > 1; i--) { 20196 cdb_buf[i] = (uchar_t)(start_block & 0xFF); 20197 start_block >>= 8; 20198 } 20199 /* Block count is in bytes 10 - 13 */ 20200 for (i = 13; i > 9; i--) { 20201 cdb_buf[i] = (uchar_t)(block_count & 0xFF); 20202 block_count >>= 8; 20203 } 20204 break; 20205 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20206 default: 20207 /* All others reserved */ 20208 return (EINVAL); 20209 } 20210 20211 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20212 if ((SD_LUN(un) > 0) && (un->un_sd->sd_inq->inq_ansi == 0x01)) { 20213 cdb_buf[1] |= (SD_LUN(un) << 5); 20214 } 20215 20216 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 20217 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20218 ucmd_buf.uscsi_bufaddr = bufaddr; 20219 ucmd_buf.uscsi_buflen = buflen; 20220 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20221 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20222 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20223 ucmd_buf.uscsi_timeout = 60; 20224 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20225 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20226 switch (status) { 20227 case 0: 20228 break; /* Success! */ 20229 case EIO: 20230 switch (ucmd_buf.uscsi_status) { 20231 case STATUS_RESERVATION_CONFLICT: 20232 status = EACCES; 20233 break; 20234 default: 20235 break; 20236 } 20237 break; 20238 default: 20239 break; 20240 } 20241 20242 if (status == 0) { 20243 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20244 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20245 } 20246 20247 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20248 20249 return (status); 20250 } 20251 20252 20253 /* 20254 * Function: sd_send_scsi_LOG_SENSE 20255 * 20256 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20257 * 20258 * Arguments: un: Pointer to the sd_lun struct for the target. 20259 * 20260 * Return Code: 0 - Success 20261 * errno return code from sd_send_scsi_cmd() 20262 * 20263 * Context: Can sleep. Does not return until command is completed. 20264 */ 20265 20266 static int 20267 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 20268 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20269 int path_flag) 20270 20271 { 20272 struct scsi_extended_sense sense_buf; 20273 uchar_t cdb_buf[CDB_GROUP1]; 20274 struct uscsi_cmd ucmd_buf; 20275 int status; 20276 20277 ASSERT(un != NULL); 20278 ASSERT(!mutex_owned(SD_MUTEX(un))); 20279 20280 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 20281 20282 bzero(cdb_buf, sizeof (cdb_buf)); 20283 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20284 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20285 20286 cdb_buf[0] = SCMD_LOG_SENSE_G1; 20287 cdb_buf[2] = (page_control << 6) | page_code; 20288 cdb_buf[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 20289 cdb_buf[6] = (uchar_t)(param_ptr & 0x00FF); 20290 cdb_buf[7] = (uchar_t)((buflen & 0xFF00) >> 8); 20291 cdb_buf[8] = (uchar_t)(buflen & 0x00FF); 20292 20293 ucmd_buf.uscsi_cdb = (char *)cdb_buf; 20294 ucmd_buf.uscsi_cdblen = sizeof (cdb_buf); 20295 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20296 ucmd_buf.uscsi_buflen = buflen; 20297 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20298 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20299 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20300 ucmd_buf.uscsi_timeout = 60; 20301 20302 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20303 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20304 20305 switch (status) { 20306 case 0: 20307 break; 20308 case EIO: 20309 switch (ucmd_buf.uscsi_status) { 20310 case STATUS_RESERVATION_CONFLICT: 20311 status = EACCES; 20312 break; 20313 case STATUS_CHECK: 20314 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20315 (sense_buf.es_key == KEY_ILLEGAL_REQUEST) && 20316 (sense_buf.es_add_code == 0x24)) { 20317 /* 20318 * ASC 0x24: INVALID FIELD IN CDB 20319 */ 20320 switch (page_code) { 20321 case START_STOP_CYCLE_PAGE: 20322 /* 20323 * The start stop cycle counter is 20324 * implemented as page 0x31 in earlier 20325 * generation disks. In new generation 20326 * disks the start stop cycle counter is 20327 * implemented as page 0xE. To properly 20328 * handle this case if an attempt for 20329 * log page 0xE is made and fails we 20330 * will try again using page 0x31. 20331 * 20332 * Network storage BU committed to 20333 * maintain the page 0x31 for this 20334 * purpose and will not have any other 20335 * page implemented with page code 0x31 20336 * until all disks transition to the 20337 * standard page. 20338 */ 20339 mutex_enter(SD_MUTEX(un)); 20340 un->un_start_stop_cycle_page = 20341 START_STOP_CYCLE_VU_PAGE; 20342 cdb_buf[2] = (char)(page_control << 6) | 20343 un->un_start_stop_cycle_page; 20344 mutex_exit(SD_MUTEX(un)); 20345 status = sd_send_scsi_cmd( 20346 SD_GET_DEV(un), &ucmd_buf, 20347 UIO_SYSSPACE, UIO_SYSSPACE, 20348 UIO_SYSSPACE, path_flag); 20349 20350 break; 20351 case TEMPERATURE_PAGE: 20352 status = ENOTTY; 20353 break; 20354 default: 20355 break; 20356 } 20357 } 20358 break; 20359 default: 20360 break; 20361 } 20362 break; 20363 default: 20364 break; 20365 } 20366 20367 if (status == 0) { 20368 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20369 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20370 } 20371 20372 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20373 20374 return (status); 20375 } 20376 20377 20378 /* 20379 * Function: sdioctl 20380 * 20381 * Description: Driver's ioctl(9e) entry point function. 20382 * 20383 * Arguments: dev - device number 20384 * cmd - ioctl operation to be performed 20385 * arg - user argument, contains data to be set or reference 20386 * parameter for get 20387 * flag - bit flag, indicating open settings, 32/64 bit type 20388 * cred_p - user credential pointer 20389 * rval_p - calling process return value (OPT) 20390 * 20391 * Return Code: EINVAL 20392 * ENOTTY 20393 * ENXIO 20394 * EIO 20395 * EFAULT 20396 * ENOTSUP 20397 * EPERM 20398 * 20399 * Context: Called from the device switch at normal priority. 20400 */ 20401 20402 static int 20403 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20404 { 20405 struct sd_lun *un = NULL; 20406 int geom_validated = FALSE; 20407 int err = 0; 20408 int i = 0; 20409 cred_t *cr; 20410 20411 /* 20412 * All device accesses go thru sdstrategy where we check on suspend 20413 * status 20414 */ 20415 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20416 return (ENXIO); 20417 } 20418 20419 ASSERT(!mutex_owned(SD_MUTEX(un))); 20420 20421 /* 20422 * Moved this wait from sd_uscsi_strategy to here for 20423 * reasons of deadlock prevention. Internal driver commands, 20424 * specifically those to change a devices power level, result 20425 * in a call to sd_uscsi_strategy. 20426 */ 20427 mutex_enter(SD_MUTEX(un)); 20428 while ((un->un_state == SD_STATE_SUSPENDED) || 20429 (un->un_state == SD_STATE_PM_CHANGING)) { 20430 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20431 } 20432 /* 20433 * Twiddling the counter here protects commands from now 20434 * through to the top of sd_uscsi_strategy. Without the 20435 * counter inc. a power down, for example, could get in 20436 * after the above check for state is made and before 20437 * execution gets to the top of sd_uscsi_strategy. 20438 * That would cause problems. 20439 */ 20440 un->un_ncmds_in_driver++; 20441 20442 if ((un->un_f_geometry_is_valid == FALSE) && 20443 (flag & (FNDELAY | FNONBLOCK))) { 20444 switch (cmd) { 20445 case CDROMPAUSE: 20446 case CDROMRESUME: 20447 case CDROMPLAYMSF: 20448 case CDROMPLAYTRKIND: 20449 case CDROMREADTOCHDR: 20450 case CDROMREADTOCENTRY: 20451 case CDROMSTOP: 20452 case CDROMSTART: 20453 case CDROMVOLCTRL: 20454 case CDROMSUBCHNL: 20455 case CDROMREADMODE2: 20456 case CDROMREADMODE1: 20457 case CDROMREADOFFSET: 20458 case CDROMSBLKMODE: 20459 case CDROMGBLKMODE: 20460 case CDROMGDRVSPEED: 20461 case CDROMSDRVSPEED: 20462 case CDROMCDDA: 20463 case CDROMCDXA: 20464 case CDROMSUBCODE: 20465 if (!ISCD(un)) { 20466 un->un_ncmds_in_driver--; 20467 ASSERT(un->un_ncmds_in_driver >= 0); 20468 mutex_exit(SD_MUTEX(un)); 20469 return (ENOTTY); 20470 } 20471 break; 20472 case FDEJECT: 20473 case DKIOCEJECT: 20474 case CDROMEJECT: 20475 if (!ISREMOVABLE(un)) { 20476 un->un_ncmds_in_driver--; 20477 ASSERT(un->un_ncmds_in_driver >= 0); 20478 mutex_exit(SD_MUTEX(un)); 20479 return (ENOTTY); 20480 } 20481 break; 20482 case DKIOCSVTOC: 20483 case DKIOCSETEFI: 20484 case DKIOCSMBOOT: 20485 mutex_exit(SD_MUTEX(un)); 20486 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20487 if (err != 0) { 20488 mutex_enter(SD_MUTEX(un)); 20489 un->un_ncmds_in_driver--; 20490 ASSERT(un->un_ncmds_in_driver >= 0); 20491 mutex_exit(SD_MUTEX(un)); 20492 return (EIO); 20493 } 20494 mutex_enter(SD_MUTEX(un)); 20495 /* FALLTHROUGH */ 20496 case DKIOCREMOVABLE: 20497 case DKIOCINFO: 20498 case DKIOCGMEDIAINFO: 20499 case MHIOCENFAILFAST: 20500 case MHIOCSTATUS: 20501 case MHIOCTKOWN: 20502 case MHIOCRELEASE: 20503 case MHIOCGRP_INKEYS: 20504 case MHIOCGRP_INRESV: 20505 case MHIOCGRP_REGISTER: 20506 case MHIOCGRP_RESERVE: 20507 case MHIOCGRP_PREEMPTANDABORT: 20508 case MHIOCGRP_REGISTERANDIGNOREKEY: 20509 case CDROMCLOSETRAY: 20510 case USCSICMD: 20511 goto skip_ready_valid; 20512 default: 20513 break; 20514 } 20515 20516 mutex_exit(SD_MUTEX(un)); 20517 err = sd_ready_and_valid(un); 20518 mutex_enter(SD_MUTEX(un)); 20519 if (err == SD_READY_NOT_VALID) { 20520 switch (cmd) { 20521 case DKIOCGAPART: 20522 case DKIOCGGEOM: 20523 case DKIOCSGEOM: 20524 case DKIOCGVTOC: 20525 case DKIOCSVTOC: 20526 case DKIOCSAPART: 20527 case DKIOCG_PHYGEOM: 20528 case DKIOCG_VIRTGEOM: 20529 err = ENOTSUP; 20530 un->un_ncmds_in_driver--; 20531 ASSERT(un->un_ncmds_in_driver >= 0); 20532 mutex_exit(SD_MUTEX(un)); 20533 return (err); 20534 } 20535 } 20536 if (err != SD_READY_VALID) { 20537 switch (cmd) { 20538 case DKIOCSTATE: 20539 case CDROMGDRVSPEED: 20540 case CDROMSDRVSPEED: 20541 case FDEJECT: /* for eject command */ 20542 case DKIOCEJECT: 20543 case CDROMEJECT: 20544 case DKIOCGETEFI: 20545 case DKIOCSGEOM: 20546 case DKIOCREMOVABLE: 20547 case DKIOCSAPART: 20548 case DKIOCSETEFI: 20549 break; 20550 default: 20551 if (ISREMOVABLE(un)) { 20552 err = ENXIO; 20553 } else { 20554 /* Do not map EACCES to EIO */ 20555 if (err != EACCES) 20556 err = EIO; 20557 } 20558 un->un_ncmds_in_driver--; 20559 ASSERT(un->un_ncmds_in_driver >= 0); 20560 mutex_exit(SD_MUTEX(un)); 20561 return (err); 20562 } 20563 } 20564 geom_validated = TRUE; 20565 } 20566 if ((un->un_f_geometry_is_valid == TRUE) && 20567 (un->un_solaris_size > 0)) { 20568 /* 20569 * the "geometry_is_valid" flag could be true if we 20570 * have an fdisk table but no Solaris partition 20571 */ 20572 if (un->un_vtoc.v_sanity != VTOC_SANE) { 20573 /* it is EFI, so return ENOTSUP for these */ 20574 switch (cmd) { 20575 case DKIOCGAPART: 20576 case DKIOCGGEOM: 20577 case DKIOCGVTOC: 20578 case DKIOCSVTOC: 20579 case DKIOCSAPART: 20580 err = ENOTSUP; 20581 un->un_ncmds_in_driver--; 20582 ASSERT(un->un_ncmds_in_driver >= 0); 20583 mutex_exit(SD_MUTEX(un)); 20584 return (err); 20585 } 20586 } 20587 } 20588 20589 skip_ready_valid: 20590 mutex_exit(SD_MUTEX(un)); 20591 20592 switch (cmd) { 20593 case DKIOCINFO: 20594 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20595 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20596 break; 20597 20598 case DKIOCGMEDIAINFO: 20599 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20600 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20601 break; 20602 20603 case DKIOCGGEOM: 20604 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGGEOM\n"); 20605 err = sd_dkio_get_geometry(dev, (caddr_t)arg, flag, 20606 geom_validated); 20607 break; 20608 20609 case DKIOCSGEOM: 20610 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSGEOM\n"); 20611 err = sd_dkio_set_geometry(dev, (caddr_t)arg, flag); 20612 break; 20613 20614 case DKIOCGAPART: 20615 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGAPART\n"); 20616 err = sd_dkio_get_partition(dev, (caddr_t)arg, flag, 20617 geom_validated); 20618 break; 20619 20620 case DKIOCSAPART: 20621 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSAPART\n"); 20622 err = sd_dkio_set_partition(dev, (caddr_t)arg, flag); 20623 break; 20624 20625 case DKIOCGVTOC: 20626 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGVTOC\n"); 20627 err = sd_dkio_get_vtoc(dev, (caddr_t)arg, flag, 20628 geom_validated); 20629 break; 20630 20631 case DKIOCGETEFI: 20632 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGETEFI\n"); 20633 err = sd_dkio_get_efi(dev, (caddr_t)arg, flag); 20634 break; 20635 20636 case DKIOCPARTITION: 20637 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTITION\n"); 20638 err = sd_dkio_partition(dev, (caddr_t)arg, flag); 20639 break; 20640 20641 case DKIOCSVTOC: 20642 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSVTOC\n"); 20643 err = sd_dkio_set_vtoc(dev, (caddr_t)arg, flag); 20644 break; 20645 20646 case DKIOCSETEFI: 20647 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSETEFI\n"); 20648 err = sd_dkio_set_efi(dev, (caddr_t)arg, flag); 20649 break; 20650 20651 case DKIOCGMBOOT: 20652 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMBOOT\n"); 20653 err = sd_dkio_get_mboot(dev, (caddr_t)arg, flag); 20654 break; 20655 20656 case DKIOCSMBOOT: 20657 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSMBOOT\n"); 20658 err = sd_dkio_set_mboot(dev, (caddr_t)arg, flag); 20659 break; 20660 20661 case DKIOCLOCK: 20662 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20663 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20664 SD_PATH_STANDARD); 20665 break; 20666 20667 case DKIOCUNLOCK: 20668 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20669 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20670 SD_PATH_STANDARD); 20671 break; 20672 20673 case DKIOCSTATE: { 20674 enum dkio_state state; 20675 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20676 20677 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20678 err = EFAULT; 20679 } else { 20680 err = sd_check_media(dev, state); 20681 if (err == 0) { 20682 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20683 sizeof (int), flag) != 0) 20684 err = EFAULT; 20685 } 20686 } 20687 break; 20688 } 20689 20690 case DKIOCREMOVABLE: 20691 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20692 if (ISREMOVABLE(un)) { 20693 i = 1; 20694 } else { 20695 i = 0; 20696 } 20697 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20698 err = EFAULT; 20699 } else { 20700 err = 0; 20701 } 20702 break; 20703 20704 case DKIOCGTEMPERATURE: 20705 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20706 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20707 break; 20708 20709 case MHIOCENFAILFAST: 20710 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20711 if ((err = drv_priv(cred_p)) == 0) { 20712 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20713 } 20714 break; 20715 20716 case MHIOCTKOWN: 20717 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20718 if ((err = drv_priv(cred_p)) == 0) { 20719 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20720 } 20721 break; 20722 20723 case MHIOCRELEASE: 20724 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20725 if ((err = drv_priv(cred_p)) == 0) { 20726 err = sd_mhdioc_release(dev); 20727 } 20728 break; 20729 20730 case MHIOCSTATUS: 20731 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20732 if ((err = drv_priv(cred_p)) == 0) { 20733 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20734 case 0: 20735 err = 0; 20736 break; 20737 case EACCES: 20738 *rval_p = 1; 20739 err = 0; 20740 break; 20741 default: 20742 err = EIO; 20743 break; 20744 } 20745 } 20746 break; 20747 20748 case MHIOCQRESERVE: 20749 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20750 if ((err = drv_priv(cred_p)) == 0) { 20751 err = sd_reserve_release(dev, SD_RESERVE); 20752 } 20753 break; 20754 20755 case MHIOCREREGISTERDEVID: 20756 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20757 if (drv_priv(cred_p) == EPERM) { 20758 err = EPERM; 20759 } else if (ISREMOVABLE(un) || ISCD(un)) { 20760 err = ENOTTY; 20761 } else { 20762 err = sd_mhdioc_register_devid(dev); 20763 } 20764 break; 20765 20766 case MHIOCGRP_INKEYS: 20767 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20768 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20769 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20770 err = ENOTSUP; 20771 } else { 20772 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20773 flag); 20774 } 20775 } 20776 break; 20777 20778 case MHIOCGRP_INRESV: 20779 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20780 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20781 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20782 err = ENOTSUP; 20783 } else { 20784 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20785 } 20786 } 20787 break; 20788 20789 case MHIOCGRP_REGISTER: 20790 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20791 if ((err = drv_priv(cred_p)) != EPERM) { 20792 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20793 err = ENOTSUP; 20794 } else if (arg != NULL) { 20795 mhioc_register_t reg; 20796 if (ddi_copyin((void *)arg, ®, 20797 sizeof (mhioc_register_t), flag) != 0) { 20798 err = EFAULT; 20799 } else { 20800 err = 20801 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20802 un, SD_SCSI3_REGISTER, 20803 (uchar_t *)®); 20804 } 20805 } 20806 } 20807 break; 20808 20809 case MHIOCGRP_RESERVE: 20810 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20811 if ((err = drv_priv(cred_p)) != EPERM) { 20812 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20813 err = ENOTSUP; 20814 } else if (arg != NULL) { 20815 mhioc_resv_desc_t resv_desc; 20816 if (ddi_copyin((void *)arg, &resv_desc, 20817 sizeof (mhioc_resv_desc_t), flag) != 0) { 20818 err = EFAULT; 20819 } else { 20820 err = 20821 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20822 un, SD_SCSI3_RESERVE, 20823 (uchar_t *)&resv_desc); 20824 } 20825 } 20826 } 20827 break; 20828 20829 case MHIOCGRP_PREEMPTANDABORT: 20830 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20831 if ((err = drv_priv(cred_p)) != EPERM) { 20832 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20833 err = ENOTSUP; 20834 } else if (arg != NULL) { 20835 mhioc_preemptandabort_t preempt_abort; 20836 if (ddi_copyin((void *)arg, &preempt_abort, 20837 sizeof (mhioc_preemptandabort_t), 20838 flag) != 0) { 20839 err = EFAULT; 20840 } else { 20841 err = 20842 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20843 un, SD_SCSI3_PREEMPTANDABORT, 20844 (uchar_t *)&preempt_abort); 20845 } 20846 } 20847 } 20848 break; 20849 20850 case MHIOCGRP_REGISTERANDIGNOREKEY: 20851 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20852 if ((err = drv_priv(cred_p)) != EPERM) { 20853 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20854 err = ENOTSUP; 20855 } else if (arg != NULL) { 20856 mhioc_registerandignorekey_t r_and_i; 20857 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20858 sizeof (mhioc_registerandignorekey_t), 20859 flag) != 0) { 20860 err = EFAULT; 20861 } else { 20862 err = 20863 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20864 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20865 (uchar_t *)&r_and_i); 20866 } 20867 } 20868 } 20869 break; 20870 20871 case USCSICMD: 20872 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20873 cr = ddi_get_cred(); 20874 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20875 err = EPERM; 20876 } else { 20877 err = sd_uscsi_ioctl(dev, (caddr_t)arg, flag); 20878 } 20879 break; 20880 20881 case CDROMPAUSE: 20882 case CDROMRESUME: 20883 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20884 if (!ISCD(un)) { 20885 err = ENOTTY; 20886 } else { 20887 err = sr_pause_resume(dev, cmd); 20888 } 20889 break; 20890 20891 case CDROMPLAYMSF: 20892 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20893 if (!ISCD(un)) { 20894 err = ENOTTY; 20895 } else { 20896 err = sr_play_msf(dev, (caddr_t)arg, flag); 20897 } 20898 break; 20899 20900 case CDROMPLAYTRKIND: 20901 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20902 #if defined(__i386) || defined(__amd64) 20903 /* 20904 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20905 */ 20906 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20907 #else 20908 if (!ISCD(un)) { 20909 #endif 20910 err = ENOTTY; 20911 } else { 20912 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20913 } 20914 break; 20915 20916 case CDROMREADTOCHDR: 20917 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20918 if (!ISCD(un)) { 20919 err = ENOTTY; 20920 } else { 20921 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20922 } 20923 break; 20924 20925 case CDROMREADTOCENTRY: 20926 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20927 if (!ISCD(un)) { 20928 err = ENOTTY; 20929 } else { 20930 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20931 } 20932 break; 20933 20934 case CDROMSTOP: 20935 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20936 if (!ISCD(un)) { 20937 err = ENOTTY; 20938 } else { 20939 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20940 SD_PATH_STANDARD); 20941 } 20942 break; 20943 20944 case CDROMSTART: 20945 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20946 if (!ISCD(un)) { 20947 err = ENOTTY; 20948 } else { 20949 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20950 SD_PATH_STANDARD); 20951 } 20952 break; 20953 20954 case CDROMCLOSETRAY: 20955 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20956 if (!ISCD(un)) { 20957 err = ENOTTY; 20958 } else { 20959 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20960 SD_PATH_STANDARD); 20961 } 20962 break; 20963 20964 case FDEJECT: /* for eject command */ 20965 case DKIOCEJECT: 20966 case CDROMEJECT: 20967 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20968 if (!ISREMOVABLE(un)) { 20969 err = ENOTTY; 20970 } else { 20971 err = sr_eject(dev); 20972 } 20973 break; 20974 20975 case CDROMVOLCTRL: 20976 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 20977 if (!ISCD(un)) { 20978 err = ENOTTY; 20979 } else { 20980 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 20981 } 20982 break; 20983 20984 case CDROMSUBCHNL: 20985 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 20986 if (!ISCD(un)) { 20987 err = ENOTTY; 20988 } else { 20989 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 20990 } 20991 break; 20992 20993 case CDROMREADMODE2: 20994 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 20995 if (!ISCD(un)) { 20996 err = ENOTTY; 20997 } else if (un->un_f_cfg_is_atapi == TRUE) { 20998 /* 20999 * If the drive supports READ CD, use that instead of 21000 * switching the LBA size via a MODE SELECT 21001 * Block Descriptor 21002 */ 21003 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21004 } else { 21005 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21006 } 21007 break; 21008 21009 case CDROMREADMODE1: 21010 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21011 if (!ISCD(un)) { 21012 err = ENOTTY; 21013 } else { 21014 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21015 } 21016 break; 21017 21018 case CDROMREADOFFSET: 21019 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21020 if (!ISCD(un)) { 21021 err = ENOTTY; 21022 } else { 21023 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21024 flag); 21025 } 21026 break; 21027 21028 case CDROMSBLKMODE: 21029 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21030 /* 21031 * There is no means of changing block size in case of atapi 21032 * drives, thus return ENOTTY if drive type is atapi 21033 */ 21034 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21035 err = ENOTTY; 21036 } else if (un->un_f_mmc_cap == TRUE) { 21037 21038 /* 21039 * MMC Devices do not support changing the 21040 * logical block size 21041 * 21042 * Note: EINVAL is being returned instead of ENOTTY to 21043 * maintain consistancy with the original mmc 21044 * driver update. 21045 */ 21046 err = EINVAL; 21047 } else { 21048 mutex_enter(SD_MUTEX(un)); 21049 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21050 (un->un_ncmds_in_transport > 0)) { 21051 mutex_exit(SD_MUTEX(un)); 21052 err = EINVAL; 21053 } else { 21054 mutex_exit(SD_MUTEX(un)); 21055 err = sr_change_blkmode(dev, cmd, arg, flag); 21056 } 21057 } 21058 break; 21059 21060 case CDROMGBLKMODE: 21061 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21062 if (!ISCD(un)) { 21063 err = ENOTTY; 21064 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21065 (un->un_f_blockcount_is_valid != FALSE)) { 21066 /* 21067 * Drive is an ATAPI drive so return target block 21068 * size for ATAPI drives since we cannot change the 21069 * blocksize on ATAPI drives. Used primarily to detect 21070 * if an ATAPI cdrom is present. 21071 */ 21072 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21073 sizeof (int), flag) != 0) { 21074 err = EFAULT; 21075 } else { 21076 err = 0; 21077 } 21078 21079 } else { 21080 /* 21081 * Drive supports changing block sizes via a Mode 21082 * Select. 21083 */ 21084 err = sr_change_blkmode(dev, cmd, arg, flag); 21085 } 21086 break; 21087 21088 case CDROMGDRVSPEED: 21089 case CDROMSDRVSPEED: 21090 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21091 if (!ISCD(un)) { 21092 err = ENOTTY; 21093 } else if (un->un_f_mmc_cap == TRUE) { 21094 /* 21095 * Note: In the future the driver implementation 21096 * for getting and 21097 * setting cd speed should entail: 21098 * 1) If non-mmc try the Toshiba mode page 21099 * (sr_change_speed) 21100 * 2) If mmc but no support for Real Time Streaming try 21101 * the SET CD SPEED (0xBB) command 21102 * (sr_atapi_change_speed) 21103 * 3) If mmc and support for Real Time Streaming 21104 * try the GET PERFORMANCE and SET STREAMING 21105 * commands (not yet implemented, 4380808) 21106 */ 21107 /* 21108 * As per recent MMC spec, CD-ROM speed is variable 21109 * and changes with LBA. Since there is no such 21110 * things as drive speed now, fail this ioctl. 21111 * 21112 * Note: EINVAL is returned for consistancy of original 21113 * implementation which included support for getting 21114 * the drive speed of mmc devices but not setting 21115 * the drive speed. Thus EINVAL would be returned 21116 * if a set request was made for an mmc device. 21117 * We no longer support get or set speed for 21118 * mmc but need to remain consistant with regard 21119 * to the error code returned. 21120 */ 21121 err = EINVAL; 21122 } else if (un->un_f_cfg_is_atapi == TRUE) { 21123 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21124 } else { 21125 err = sr_change_speed(dev, cmd, arg, flag); 21126 } 21127 break; 21128 21129 case CDROMCDDA: 21130 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21131 if (!ISCD(un)) { 21132 err = ENOTTY; 21133 } else { 21134 err = sr_read_cdda(dev, (void *)arg, flag); 21135 } 21136 break; 21137 21138 case CDROMCDXA: 21139 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21140 if (!ISCD(un)) { 21141 err = ENOTTY; 21142 } else { 21143 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21144 } 21145 break; 21146 21147 case CDROMSUBCODE: 21148 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21149 if (!ISCD(un)) { 21150 err = ENOTTY; 21151 } else { 21152 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21153 } 21154 break; 21155 21156 case DKIOCPARTINFO: { 21157 /* 21158 * Return parameters describing the selected disk slice. 21159 * Note: this ioctl is for the intel platform only 21160 */ 21161 #if defined(__i386) || defined(__amd64) 21162 int part; 21163 21164 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21165 part = SDPART(dev); 21166 21167 /* don't check un_solaris_size for pN */ 21168 if (part < P0_RAW_DISK && un->un_solaris_size == 0) { 21169 err = EIO; 21170 } else { 21171 struct part_info p; 21172 21173 p.p_start = (daddr_t)un->un_offset[part]; 21174 p.p_length = (int)un->un_map[part].dkl_nblk; 21175 #ifdef _MULTI_DATAMODEL 21176 switch (ddi_model_convert_from(flag & FMODELS)) { 21177 case DDI_MODEL_ILP32: 21178 { 21179 struct part_info32 p32; 21180 21181 p32.p_start = (daddr32_t)p.p_start; 21182 p32.p_length = p.p_length; 21183 if (ddi_copyout(&p32, (void *)arg, 21184 sizeof (p32), flag)) 21185 err = EFAULT; 21186 break; 21187 } 21188 21189 case DDI_MODEL_NONE: 21190 { 21191 if (ddi_copyout(&p, (void *)arg, sizeof (p), 21192 flag)) 21193 err = EFAULT; 21194 break; 21195 } 21196 } 21197 #else /* ! _MULTI_DATAMODEL */ 21198 if (ddi_copyout(&p, (void *)arg, sizeof (p), flag)) 21199 err = EFAULT; 21200 #endif /* _MULTI_DATAMODEL */ 21201 } 21202 #else 21203 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21204 err = ENOTTY; 21205 #endif 21206 break; 21207 } 21208 21209 case DKIOCG_PHYGEOM: { 21210 /* Return the driver's notion of the media physical geometry */ 21211 #if defined(__i386) || defined(__amd64) 21212 struct dk_geom disk_geom; 21213 struct dk_geom *dkgp = &disk_geom; 21214 21215 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21216 mutex_enter(SD_MUTEX(un)); 21217 21218 if (un->un_g.dkg_nhead != 0 && 21219 un->un_g.dkg_nsect != 0) { 21220 /* 21221 * We succeeded in getting a geometry, but 21222 * right now it is being reported as just the 21223 * Solaris fdisk partition, just like for 21224 * DKIOCGGEOM. We need to change that to be 21225 * correct for the entire disk now. 21226 */ 21227 bcopy(&un->un_g, dkgp, sizeof (*dkgp)); 21228 dkgp->dkg_acyl = 0; 21229 dkgp->dkg_ncyl = un->un_blockcount / 21230 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21231 } else { 21232 bzero(dkgp, sizeof (struct dk_geom)); 21233 /* 21234 * This disk does not have a Solaris VTOC 21235 * so we must present a physical geometry 21236 * that will remain consistent regardless 21237 * of how the disk is used. This will ensure 21238 * that the geometry does not change regardless 21239 * of the fdisk partition type (ie. EFI, FAT32, 21240 * Solaris, etc). 21241 */ 21242 if (ISCD(un)) { 21243 dkgp->dkg_nhead = un->un_pgeom.g_nhead; 21244 dkgp->dkg_nsect = un->un_pgeom.g_nsect; 21245 dkgp->dkg_ncyl = un->un_pgeom.g_ncyl; 21246 dkgp->dkg_acyl = un->un_pgeom.g_acyl; 21247 } else { 21248 sd_convert_geometry(un->un_blockcount, dkgp); 21249 dkgp->dkg_acyl = 0; 21250 dkgp->dkg_ncyl = un->un_blockcount / 21251 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21252 } 21253 } 21254 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21255 21256 if (ddi_copyout(dkgp, (void *)arg, 21257 sizeof (struct dk_geom), flag)) { 21258 mutex_exit(SD_MUTEX(un)); 21259 err = EFAULT; 21260 } else { 21261 mutex_exit(SD_MUTEX(un)); 21262 err = 0; 21263 } 21264 #else 21265 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21266 err = ENOTTY; 21267 #endif 21268 break; 21269 } 21270 21271 case DKIOCG_VIRTGEOM: { 21272 /* Return the driver's notion of the media's logical geometry */ 21273 #if defined(__i386) || defined(__amd64) 21274 struct dk_geom disk_geom; 21275 struct dk_geom *dkgp = &disk_geom; 21276 21277 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21278 mutex_enter(SD_MUTEX(un)); 21279 /* 21280 * If there is no HBA geometry available, or 21281 * if the HBA returned us something that doesn't 21282 * really fit into an Int 13/function 8 geometry 21283 * result, just fail the ioctl. See PSARC 1998/313. 21284 */ 21285 if (un->un_lgeom.g_nhead == 0 || 21286 un->un_lgeom.g_nsect == 0 || 21287 un->un_lgeom.g_ncyl > 1024) { 21288 mutex_exit(SD_MUTEX(un)); 21289 err = EINVAL; 21290 } else { 21291 dkgp->dkg_ncyl = un->un_lgeom.g_ncyl; 21292 dkgp->dkg_acyl = un->un_lgeom.g_acyl; 21293 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21294 dkgp->dkg_nhead = un->un_lgeom.g_nhead; 21295 dkgp->dkg_nsect = un->un_lgeom.g_nsect; 21296 21297 if (ddi_copyout(dkgp, (void *)arg, 21298 sizeof (struct dk_geom), flag)) { 21299 mutex_exit(SD_MUTEX(un)); 21300 err = EFAULT; 21301 } else { 21302 mutex_exit(SD_MUTEX(un)); 21303 err = 0; 21304 } 21305 } 21306 #else 21307 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21308 err = ENOTTY; 21309 #endif 21310 break; 21311 } 21312 #ifdef SDDEBUG 21313 /* RESET/ABORTS testing ioctls */ 21314 case DKIOCRESET: { 21315 int reset_level; 21316 21317 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21318 err = EFAULT; 21319 } else { 21320 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21321 "reset_level = 0x%lx\n", reset_level); 21322 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21323 err = 0; 21324 } else { 21325 err = EIO; 21326 } 21327 } 21328 break; 21329 } 21330 21331 case DKIOCABORT: 21332 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21333 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21334 err = 0; 21335 } else { 21336 err = EIO; 21337 } 21338 break; 21339 #endif 21340 21341 #ifdef SD_FAULT_INJECTION 21342 /* SDIOC FaultInjection testing ioctls */ 21343 case SDIOCSTART: 21344 case SDIOCSTOP: 21345 case SDIOCINSERTPKT: 21346 case SDIOCINSERTXB: 21347 case SDIOCINSERTUN: 21348 case SDIOCINSERTARQ: 21349 case SDIOCPUSH: 21350 case SDIOCRETRIEVE: 21351 case SDIOCRUN: 21352 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21353 "SDIOC detected cmd:0x%X:\n", cmd); 21354 /* call error generator */ 21355 sd_faultinjection_ioctl(cmd, arg, un); 21356 err = 0; 21357 break; 21358 21359 #endif /* SD_FAULT_INJECTION */ 21360 21361 default: 21362 err = ENOTTY; 21363 break; 21364 } 21365 mutex_enter(SD_MUTEX(un)); 21366 un->un_ncmds_in_driver--; 21367 ASSERT(un->un_ncmds_in_driver >= 0); 21368 mutex_exit(SD_MUTEX(un)); 21369 21370 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21371 return (err); 21372 } 21373 21374 21375 /* 21376 * Function: sd_uscsi_ioctl 21377 * 21378 * Description: This routine is the driver entry point for handling USCSI ioctl 21379 * requests (USCSICMD). 21380 * 21381 * Arguments: dev - the device number 21382 * arg - user provided scsi command 21383 * flag - this argument is a pass through to ddi_copyxxx() 21384 * directly from the mode argument of ioctl(). 21385 * 21386 * Return Code: code returned by sd_send_scsi_cmd 21387 * ENXIO 21388 * EFAULT 21389 * EAGAIN 21390 */ 21391 21392 static int 21393 sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag) 21394 { 21395 #ifdef _MULTI_DATAMODEL 21396 /* 21397 * For use when a 32 bit app makes a call into a 21398 * 64 bit ioctl 21399 */ 21400 struct uscsi_cmd32 uscsi_cmd_32_for_64; 21401 struct uscsi_cmd32 *ucmd32 = &uscsi_cmd_32_for_64; 21402 model_t model; 21403 #endif /* _MULTI_DATAMODEL */ 21404 struct uscsi_cmd *scmd = NULL; 21405 struct sd_lun *un = NULL; 21406 enum uio_seg uioseg; 21407 char cdb[CDB_GROUP0]; 21408 int rval = 0; 21409 21410 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21411 return (ENXIO); 21412 } 21413 21414 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: entry: un:0x%p\n", un); 21415 21416 scmd = (struct uscsi_cmd *) 21417 kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21418 21419 #ifdef _MULTI_DATAMODEL 21420 switch (model = ddi_model_convert_from(flag & FMODELS)) { 21421 case DDI_MODEL_ILP32: 21422 { 21423 if (ddi_copyin((void *)arg, ucmd32, sizeof (*ucmd32), flag)) { 21424 rval = EFAULT; 21425 goto done; 21426 } 21427 /* 21428 * Convert the ILP32 uscsi data from the 21429 * application to LP64 for internal use. 21430 */ 21431 uscsi_cmd32touscsi_cmd(ucmd32, scmd); 21432 break; 21433 } 21434 case DDI_MODEL_NONE: 21435 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21436 rval = EFAULT; 21437 goto done; 21438 } 21439 break; 21440 } 21441 #else /* ! _MULTI_DATAMODEL */ 21442 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21443 rval = EFAULT; 21444 goto done; 21445 } 21446 #endif /* _MULTI_DATAMODEL */ 21447 21448 scmd->uscsi_flags &= ~USCSI_NOINTR; 21449 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE; 21450 if (un->un_f_format_in_progress == TRUE) { 21451 rval = EAGAIN; 21452 goto done; 21453 } 21454 21455 /* 21456 * Gotta do the ddi_copyin() here on the uscsi_cdb so that 21457 * we will have a valid cdb[0] to test. 21458 */ 21459 if ((ddi_copyin(scmd->uscsi_cdb, cdb, CDB_GROUP0, flag) == 0) && 21460 (cdb[0] == SCMD_FORMAT)) { 21461 SD_TRACE(SD_LOG_IOCTL, un, 21462 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21463 mutex_enter(SD_MUTEX(un)); 21464 un->un_f_format_in_progress = TRUE; 21465 mutex_exit(SD_MUTEX(un)); 21466 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21467 SD_PATH_STANDARD); 21468 mutex_enter(SD_MUTEX(un)); 21469 un->un_f_format_in_progress = FALSE; 21470 mutex_exit(SD_MUTEX(un)); 21471 } else { 21472 SD_TRACE(SD_LOG_IOCTL, un, 21473 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21474 /* 21475 * It's OK to fall into here even if the ddi_copyin() 21476 * on the uscsi_cdb above fails, because sd_send_scsi_cmd() 21477 * does this same copyin and will return the EFAULT 21478 * if it fails. 21479 */ 21480 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21481 SD_PATH_STANDARD); 21482 } 21483 #ifdef _MULTI_DATAMODEL 21484 switch (model) { 21485 case DDI_MODEL_ILP32: 21486 /* 21487 * Convert back to ILP32 before copyout to the 21488 * application 21489 */ 21490 uscsi_cmdtouscsi_cmd32(scmd, ucmd32); 21491 if (ddi_copyout(ucmd32, (void *)arg, sizeof (*ucmd32), flag)) { 21492 if (rval != 0) { 21493 rval = EFAULT; 21494 } 21495 } 21496 break; 21497 case DDI_MODEL_NONE: 21498 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21499 if (rval != 0) { 21500 rval = EFAULT; 21501 } 21502 } 21503 break; 21504 } 21505 #else /* ! _MULTI_DATAMODE */ 21506 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21507 if (rval != 0) { 21508 rval = EFAULT; 21509 } 21510 } 21511 #endif /* _MULTI_DATAMODE */ 21512 done: 21513 kmem_free(scmd, sizeof (struct uscsi_cmd)); 21514 21515 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: exit: un:0x%p\n", un); 21516 21517 return (rval); 21518 } 21519 21520 21521 /* 21522 * Function: sd_dkio_ctrl_info 21523 * 21524 * Description: This routine is the driver entry point for handling controller 21525 * information ioctl requests (DKIOCINFO). 21526 * 21527 * Arguments: dev - the device number 21528 * arg - pointer to user provided dk_cinfo structure 21529 * specifying the controller type and attributes. 21530 * flag - this argument is a pass through to ddi_copyxxx() 21531 * directly from the mode argument of ioctl(). 21532 * 21533 * Return Code: 0 21534 * EFAULT 21535 * ENXIO 21536 */ 21537 21538 static int 21539 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21540 { 21541 struct sd_lun *un = NULL; 21542 struct dk_cinfo *info; 21543 dev_info_t *pdip; 21544 21545 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21546 return (ENXIO); 21547 } 21548 21549 info = (struct dk_cinfo *) 21550 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21551 21552 switch (un->un_ctype) { 21553 case CTYPE_CDROM: 21554 info->dki_ctype = DKC_CDROM; 21555 break; 21556 default: 21557 info->dki_ctype = DKC_SCSI_CCS; 21558 break; 21559 } 21560 pdip = ddi_get_parent(SD_DEVINFO(un)); 21561 info->dki_cnum = ddi_get_instance(pdip); 21562 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21563 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21564 } else { 21565 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21566 DK_DEVLEN - 1); 21567 } 21568 21569 /* Unit Information */ 21570 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21571 info->dki_slave = ((SD_TARGET(un) << 3) | SD_LUN(un)); 21572 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21573 DK_DEVLEN - 1); 21574 info->dki_flags = DKI_FMTVOL; 21575 info->dki_partition = SDPART(dev); 21576 21577 /* Max Transfer size of this device in blocks */ 21578 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21579 info->dki_addr = 0; 21580 info->dki_space = 0; 21581 info->dki_prio = 0; 21582 info->dki_vec = 0; 21583 21584 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21585 kmem_free(info, sizeof (struct dk_cinfo)); 21586 return (EFAULT); 21587 } else { 21588 kmem_free(info, sizeof (struct dk_cinfo)); 21589 return (0); 21590 } 21591 } 21592 21593 21594 /* 21595 * Function: sd_get_media_info 21596 * 21597 * Description: This routine is the driver entry point for handling ioctl 21598 * requests for the media type or command set profile used by the 21599 * drive to operate on the media (DKIOCGMEDIAINFO). 21600 * 21601 * Arguments: dev - the device number 21602 * arg - pointer to user provided dk_minfo structure 21603 * specifying the media type, logical block size and 21604 * drive capacity. 21605 * flag - this argument is a pass through to ddi_copyxxx() 21606 * directly from the mode argument of ioctl(). 21607 * 21608 * Return Code: 0 21609 * EACCESS 21610 * EFAULT 21611 * ENXIO 21612 * EIO 21613 */ 21614 21615 static int 21616 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21617 { 21618 struct sd_lun *un = NULL; 21619 struct uscsi_cmd com; 21620 struct scsi_inquiry *sinq; 21621 struct dk_minfo media_info; 21622 u_longlong_t media_capacity; 21623 uint64_t capacity; 21624 uint_t lbasize; 21625 uchar_t *out_data; 21626 uchar_t *rqbuf; 21627 int rval = 0; 21628 int rtn; 21629 21630 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21631 (un->un_state == SD_STATE_OFFLINE)) { 21632 return (ENXIO); 21633 } 21634 21635 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21636 21637 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21638 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21639 21640 /* Issue a TUR to determine if the drive is ready with media present */ 21641 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21642 if (rval == ENXIO) { 21643 goto done; 21644 } 21645 21646 /* Now get configuration data */ 21647 if (ISCD(un)) { 21648 media_info.dki_media_type = DK_CDROM; 21649 21650 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21651 if (un->un_f_mmc_cap == TRUE) { 21652 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21653 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN); 21654 21655 if (rtn) { 21656 /* 21657 * Failed for other than an illegal request 21658 * or command not supported 21659 */ 21660 if ((com.uscsi_status == STATUS_CHECK) && 21661 (com.uscsi_rqstatus == STATUS_GOOD)) { 21662 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21663 (rqbuf[12] != 0x20)) { 21664 rval = EIO; 21665 goto done; 21666 } 21667 } 21668 } else { 21669 /* 21670 * The GET CONFIGURATION command succeeded 21671 * so set the media type according to the 21672 * returned data 21673 */ 21674 media_info.dki_media_type = out_data[6]; 21675 media_info.dki_media_type <<= 8; 21676 media_info.dki_media_type |= out_data[7]; 21677 } 21678 } 21679 } else { 21680 /* 21681 * The profile list is not available, so we attempt to identify 21682 * the media type based on the inquiry data 21683 */ 21684 sinq = un->un_sd->sd_inq; 21685 if (sinq->inq_qual == 0) { 21686 /* This is a direct access device */ 21687 media_info.dki_media_type = DK_FIXED_DISK; 21688 21689 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21690 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21691 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21692 media_info.dki_media_type = DK_ZIP; 21693 } else if ( 21694 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21695 media_info.dki_media_type = DK_JAZ; 21696 } 21697 } 21698 } else { 21699 /* Not a CD or direct access so return unknown media */ 21700 media_info.dki_media_type = DK_UNKNOWN; 21701 } 21702 } 21703 21704 /* Now read the capacity so we can provide the lbasize and capacity */ 21705 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21706 SD_PATH_DIRECT)) { 21707 case 0: 21708 break; 21709 case EACCES: 21710 rval = EACCES; 21711 goto done; 21712 default: 21713 rval = EIO; 21714 goto done; 21715 } 21716 21717 media_info.dki_lbsize = lbasize; 21718 media_capacity = capacity; 21719 21720 /* 21721 * sd_send_scsi_READ_CAPACITY() reports capacity in 21722 * un->un_sys_blocksize chunks. So we need to convert it into 21723 * cap.lbasize chunks. 21724 */ 21725 media_capacity *= un->un_sys_blocksize; 21726 media_capacity /= lbasize; 21727 media_info.dki_capacity = media_capacity; 21728 21729 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21730 rval = EFAULT; 21731 /* Put goto. Anybody might add some code below in future */ 21732 goto done; 21733 } 21734 done: 21735 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21736 kmem_free(rqbuf, SENSE_LENGTH); 21737 return (rval); 21738 } 21739 21740 21741 /* 21742 * Function: sd_dkio_get_geometry 21743 * 21744 * Description: This routine is the driver entry point for handling user 21745 * requests to get the device geometry (DKIOCGGEOM). 21746 * 21747 * Arguments: dev - the device number 21748 * arg - pointer to user provided dk_geom structure specifying 21749 * the controller's notion of the current geometry. 21750 * flag - this argument is a pass through to ddi_copyxxx() 21751 * directly from the mode argument of ioctl(). 21752 * geom_validated - flag indicating if the device geometry has been 21753 * previously validated in the sdioctl routine. 21754 * 21755 * Return Code: 0 21756 * EFAULT 21757 * ENXIO 21758 * EIO 21759 */ 21760 21761 static int 21762 sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, int geom_validated) 21763 { 21764 struct sd_lun *un = NULL; 21765 struct dk_geom *tmp_geom = NULL; 21766 int rval = 0; 21767 21768 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21769 return (ENXIO); 21770 } 21771 21772 #if defined(__i386) || defined(__amd64) 21773 if (un->un_solaris_size == 0) { 21774 return (EIO); 21775 } 21776 #endif 21777 if (geom_validated == FALSE) { 21778 /* 21779 * sd_validate_geometry does not spin a disk up 21780 * if it was spun down. We need to make sure it 21781 * is ready. 21782 */ 21783 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21784 return (rval); 21785 } 21786 mutex_enter(SD_MUTEX(un)); 21787 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 21788 mutex_exit(SD_MUTEX(un)); 21789 } 21790 if (rval) 21791 return (rval); 21792 21793 /* 21794 * Make a local copy of the soft state geometry to avoid some potential 21795 * race conditions associated with holding the mutex and updating the 21796 * write_reinstruct value 21797 */ 21798 tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21799 mutex_enter(SD_MUTEX(un)); 21800 bcopy(&un->un_g, tmp_geom, sizeof (struct dk_geom)); 21801 mutex_exit(SD_MUTEX(un)); 21802 21803 if (tmp_geom->dkg_write_reinstruct == 0) { 21804 tmp_geom->dkg_write_reinstruct = 21805 (int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm * 21806 sd_rot_delay) / (int)60000); 21807 } 21808 21809 rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom), 21810 flag); 21811 if (rval != 0) { 21812 rval = EFAULT; 21813 } 21814 21815 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21816 return (rval); 21817 21818 } 21819 21820 21821 /* 21822 * Function: sd_dkio_set_geometry 21823 * 21824 * Description: This routine is the driver entry point for handling user 21825 * requests to set the device geometry (DKIOCSGEOM). The actual 21826 * device geometry is not updated, just the driver "notion" of it. 21827 * 21828 * Arguments: dev - the device number 21829 * arg - pointer to user provided dk_geom structure used to set 21830 * the controller's notion of the current geometry. 21831 * flag - this argument is a pass through to ddi_copyxxx() 21832 * directly from the mode argument of ioctl(). 21833 * 21834 * Return Code: 0 21835 * EFAULT 21836 * ENXIO 21837 * EIO 21838 */ 21839 21840 static int 21841 sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag) 21842 { 21843 struct sd_lun *un = NULL; 21844 struct dk_geom *tmp_geom; 21845 struct dk_map *lp; 21846 int rval = 0; 21847 int i; 21848 21849 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21850 return (ENXIO); 21851 } 21852 21853 #if defined(__i386) || defined(__amd64) 21854 if (un->un_solaris_size == 0) { 21855 return (EIO); 21856 } 21857 #endif 21858 /* 21859 * We need to copy the user specified geometry into local 21860 * storage and then update the softstate. We don't want to hold 21861 * the mutex and copyin directly from the user to the soft state 21862 */ 21863 tmp_geom = (struct dk_geom *) 21864 kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21865 rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag); 21866 if (rval != 0) { 21867 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21868 return (EFAULT); 21869 } 21870 21871 mutex_enter(SD_MUTEX(un)); 21872 bcopy(tmp_geom, &un->un_g, sizeof (struct dk_geom)); 21873 for (i = 0; i < NDKMAP; i++) { 21874 lp = &un->un_map[i]; 21875 un->un_offset[i] = 21876 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 21877 #if defined(__i386) || defined(__amd64) 21878 un->un_offset[i] += un->un_solaris_offset; 21879 #endif 21880 } 21881 un->un_f_geometry_is_valid = FALSE; 21882 mutex_exit(SD_MUTEX(un)); 21883 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21884 21885 return (rval); 21886 } 21887 21888 21889 /* 21890 * Function: sd_dkio_get_partition 21891 * 21892 * Description: This routine is the driver entry point for handling user 21893 * requests to get the partition table (DKIOCGAPART). 21894 * 21895 * Arguments: dev - the device number 21896 * arg - pointer to user provided dk_allmap structure specifying 21897 * the controller's notion of the current partition table. 21898 * flag - this argument is a pass through to ddi_copyxxx() 21899 * directly from the mode argument of ioctl(). 21900 * geom_validated - flag indicating if the device geometry has been 21901 * previously validated in the sdioctl routine. 21902 * 21903 * Return Code: 0 21904 * EFAULT 21905 * ENXIO 21906 * EIO 21907 */ 21908 21909 static int 21910 sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, int geom_validated) 21911 { 21912 struct sd_lun *un = NULL; 21913 int rval = 0; 21914 int size; 21915 21916 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21917 return (ENXIO); 21918 } 21919 21920 #if defined(__i386) || defined(__amd64) 21921 if (un->un_solaris_size == 0) { 21922 return (EIO); 21923 } 21924 #endif 21925 /* 21926 * Make sure the geometry is valid before getting the partition 21927 * information. 21928 */ 21929 mutex_enter(SD_MUTEX(un)); 21930 if (geom_validated == FALSE) { 21931 /* 21932 * sd_validate_geometry does not spin a disk up 21933 * if it was spun down. We need to make sure it 21934 * is ready before validating the geometry. 21935 */ 21936 mutex_exit(SD_MUTEX(un)); 21937 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21938 return (rval); 21939 } 21940 mutex_enter(SD_MUTEX(un)); 21941 21942 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 21943 mutex_exit(SD_MUTEX(un)); 21944 return (rval); 21945 } 21946 } 21947 mutex_exit(SD_MUTEX(un)); 21948 21949 #ifdef _MULTI_DATAMODEL 21950 switch (ddi_model_convert_from(flag & FMODELS)) { 21951 case DDI_MODEL_ILP32: { 21952 struct dk_map32 dk_map32[NDKMAP]; 21953 int i; 21954 21955 for (i = 0; i < NDKMAP; i++) { 21956 dk_map32[i].dkl_cylno = un->un_map[i].dkl_cylno; 21957 dk_map32[i].dkl_nblk = un->un_map[i].dkl_nblk; 21958 } 21959 size = NDKMAP * sizeof (struct dk_map32); 21960 rval = ddi_copyout(dk_map32, (void *)arg, size, flag); 21961 if (rval != 0) { 21962 rval = EFAULT; 21963 } 21964 break; 21965 } 21966 case DDI_MODEL_NONE: 21967 size = NDKMAP * sizeof (struct dk_map); 21968 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 21969 if (rval != 0) { 21970 rval = EFAULT; 21971 } 21972 break; 21973 } 21974 #else /* ! _MULTI_DATAMODEL */ 21975 size = NDKMAP * sizeof (struct dk_map); 21976 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 21977 if (rval != 0) { 21978 rval = EFAULT; 21979 } 21980 #endif /* _MULTI_DATAMODEL */ 21981 return (rval); 21982 } 21983 21984 21985 /* 21986 * Function: sd_dkio_set_partition 21987 * 21988 * Description: This routine is the driver entry point for handling user 21989 * requests to set the partition table (DKIOCSAPART). The actual 21990 * device partition is not updated. 21991 * 21992 * Arguments: dev - the device number 21993 * arg - pointer to user provided dk_allmap structure used to set 21994 * the controller's notion of the partition table. 21995 * flag - this argument is a pass through to ddi_copyxxx() 21996 * directly from the mode argument of ioctl(). 21997 * 21998 * Return Code: 0 21999 * EINVAL 22000 * EFAULT 22001 * ENXIO 22002 * EIO 22003 */ 22004 22005 static int 22006 sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag) 22007 { 22008 struct sd_lun *un = NULL; 22009 struct dk_map dk_map[NDKMAP]; 22010 struct dk_map *lp; 22011 int rval = 0; 22012 int size; 22013 int i; 22014 #if defined(_SUNOS_VTOC_16) 22015 struct dkl_partition *vp; 22016 #endif 22017 22018 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22019 return (ENXIO); 22020 } 22021 22022 /* 22023 * Set the map for all logical partitions. We lock 22024 * the priority just to make sure an interrupt doesn't 22025 * come in while the map is half updated. 22026 */ 22027 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_solaris_size)) 22028 mutex_enter(SD_MUTEX(un)); 22029 if (un->un_blockcount > DK_MAX_BLOCKS) { 22030 mutex_exit(SD_MUTEX(un)); 22031 return (ENOTSUP); 22032 } 22033 mutex_exit(SD_MUTEX(un)); 22034 if (un->un_solaris_size == 0) { 22035 return (EIO); 22036 } 22037 22038 #ifdef _MULTI_DATAMODEL 22039 switch (ddi_model_convert_from(flag & FMODELS)) { 22040 case DDI_MODEL_ILP32: { 22041 struct dk_map32 dk_map32[NDKMAP]; 22042 22043 size = NDKMAP * sizeof (struct dk_map32); 22044 rval = ddi_copyin((void *)arg, dk_map32, size, flag); 22045 if (rval != 0) { 22046 return (EFAULT); 22047 } 22048 for (i = 0; i < NDKMAP; i++) { 22049 dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno; 22050 dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk; 22051 } 22052 break; 22053 } 22054 case DDI_MODEL_NONE: 22055 size = NDKMAP * sizeof (struct dk_map); 22056 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22057 if (rval != 0) { 22058 return (EFAULT); 22059 } 22060 break; 22061 } 22062 #else /* ! _MULTI_DATAMODEL */ 22063 size = NDKMAP * sizeof (struct dk_map); 22064 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22065 if (rval != 0) { 22066 return (EFAULT); 22067 } 22068 #endif /* _MULTI_DATAMODEL */ 22069 22070 mutex_enter(SD_MUTEX(un)); 22071 /* Note: The size used in this bcopy is set based upon the data model */ 22072 bcopy(dk_map, un->un_map, size); 22073 #if defined(_SUNOS_VTOC_16) 22074 vp = (struct dkl_partition *)&(un->un_vtoc); 22075 #endif /* defined(_SUNOS_VTOC_16) */ 22076 for (i = 0; i < NDKMAP; i++) { 22077 lp = &un->un_map[i]; 22078 un->un_offset[i] = 22079 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22080 #if defined(_SUNOS_VTOC_16) 22081 vp->p_start = un->un_offset[i]; 22082 vp->p_size = lp->dkl_nblk; 22083 vp++; 22084 #endif /* defined(_SUNOS_VTOC_16) */ 22085 #if defined(__i386) || defined(__amd64) 22086 un->un_offset[i] += un->un_solaris_offset; 22087 #endif 22088 } 22089 mutex_exit(SD_MUTEX(un)); 22090 return (rval); 22091 } 22092 22093 22094 /* 22095 * Function: sd_dkio_get_vtoc 22096 * 22097 * Description: This routine is the driver entry point for handling user 22098 * requests to get the current volume table of contents 22099 * (DKIOCGVTOC). 22100 * 22101 * Arguments: dev - the device number 22102 * arg - pointer to user provided vtoc structure specifying 22103 * the current vtoc. 22104 * flag - this argument is a pass through to ddi_copyxxx() 22105 * directly from the mode argument of ioctl(). 22106 * geom_validated - flag indicating if the device geometry has been 22107 * previously validated in the sdioctl routine. 22108 * 22109 * Return Code: 0 22110 * EFAULT 22111 * ENXIO 22112 * EIO 22113 */ 22114 22115 static int 22116 sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, int geom_validated) 22117 { 22118 struct sd_lun *un = NULL; 22119 #if defined(_SUNOS_VTOC_8) 22120 struct vtoc user_vtoc; 22121 #endif /* defined(_SUNOS_VTOC_8) */ 22122 int rval = 0; 22123 22124 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22125 return (ENXIO); 22126 } 22127 22128 mutex_enter(SD_MUTEX(un)); 22129 if (geom_validated == FALSE) { 22130 /* 22131 * sd_validate_geometry does not spin a disk up 22132 * if it was spun down. We need to make sure it 22133 * is ready. 22134 */ 22135 mutex_exit(SD_MUTEX(un)); 22136 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22137 return (rval); 22138 } 22139 mutex_enter(SD_MUTEX(un)); 22140 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22141 mutex_exit(SD_MUTEX(un)); 22142 return (rval); 22143 } 22144 } 22145 22146 #if defined(_SUNOS_VTOC_8) 22147 sd_build_user_vtoc(un, &user_vtoc); 22148 mutex_exit(SD_MUTEX(un)); 22149 22150 #ifdef _MULTI_DATAMODEL 22151 switch (ddi_model_convert_from(flag & FMODELS)) { 22152 case DDI_MODEL_ILP32: { 22153 struct vtoc32 user_vtoc32; 22154 22155 vtoctovtoc32(user_vtoc, user_vtoc32); 22156 if (ddi_copyout(&user_vtoc32, (void *)arg, 22157 sizeof (struct vtoc32), flag)) { 22158 return (EFAULT); 22159 } 22160 break; 22161 } 22162 22163 case DDI_MODEL_NONE: 22164 if (ddi_copyout(&user_vtoc, (void *)arg, 22165 sizeof (struct vtoc), flag)) { 22166 return (EFAULT); 22167 } 22168 break; 22169 } 22170 #else /* ! _MULTI_DATAMODEL */ 22171 if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) { 22172 return (EFAULT); 22173 } 22174 #endif /* _MULTI_DATAMODEL */ 22175 22176 #elif defined(_SUNOS_VTOC_16) 22177 mutex_exit(SD_MUTEX(un)); 22178 22179 #ifdef _MULTI_DATAMODEL 22180 /* 22181 * The un_vtoc structure is a "struct dk_vtoc" which is always 22182 * 32-bit to maintain compatibility with existing on-disk 22183 * structures. Thus, we need to convert the structure when copying 22184 * it out to a datamodel-dependent "struct vtoc" in a 64-bit 22185 * program. If the target is a 32-bit program, then no conversion 22186 * is necessary. 22187 */ 22188 /* LINTED: logical expression always true: op "||" */ 22189 ASSERT(sizeof (un->un_vtoc) == sizeof (struct vtoc32)); 22190 switch (ddi_model_convert_from(flag & FMODELS)) { 22191 case DDI_MODEL_ILP32: 22192 if (ddi_copyout(&(un->un_vtoc), (void *)arg, 22193 sizeof (un->un_vtoc), flag)) { 22194 return (EFAULT); 22195 } 22196 break; 22197 22198 case DDI_MODEL_NONE: { 22199 struct vtoc user_vtoc; 22200 22201 vtoc32tovtoc(un->un_vtoc, user_vtoc); 22202 if (ddi_copyout(&user_vtoc, (void *)arg, 22203 sizeof (struct vtoc), flag)) { 22204 return (EFAULT); 22205 } 22206 break; 22207 } 22208 } 22209 #else /* ! _MULTI_DATAMODEL */ 22210 if (ddi_copyout(&(un->un_vtoc), (void *)arg, sizeof (un->un_vtoc), 22211 flag)) { 22212 return (EFAULT); 22213 } 22214 #endif /* _MULTI_DATAMODEL */ 22215 #else 22216 #error "No VTOC format defined." 22217 #endif 22218 22219 return (rval); 22220 } 22221 22222 static int 22223 sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag) 22224 { 22225 struct sd_lun *un = NULL; 22226 dk_efi_t user_efi; 22227 int rval = 0; 22228 void *buffer; 22229 22230 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22231 return (ENXIO); 22232 22233 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22234 return (EFAULT); 22235 22236 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22237 22238 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22239 (user_efi.dki_length > un->un_max_xfer_size)) 22240 return (EINVAL); 22241 22242 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22243 rval = sd_send_scsi_READ(un, buffer, user_efi.dki_length, 22244 user_efi.dki_lba, SD_PATH_DIRECT); 22245 if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data, 22246 user_efi.dki_length, flag) != 0) 22247 rval = EFAULT; 22248 22249 kmem_free(buffer, user_efi.dki_length); 22250 return (rval); 22251 } 22252 22253 /* 22254 * Function: sd_build_user_vtoc 22255 * 22256 * Description: This routine populates a pass by reference variable with the 22257 * current volume table of contents. 22258 * 22259 * Arguments: un - driver soft state (unit) structure 22260 * user_vtoc - pointer to vtoc structure to be populated 22261 */ 22262 22263 static void 22264 sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22265 { 22266 struct dk_map2 *lpart; 22267 struct dk_map *lmap; 22268 struct partition *vpart; 22269 int nblks; 22270 int i; 22271 22272 ASSERT(mutex_owned(SD_MUTEX(un))); 22273 22274 /* 22275 * Return vtoc structure fields in the provided VTOC area, addressed 22276 * by *vtoc. 22277 */ 22278 bzero(user_vtoc, sizeof (struct vtoc)); 22279 user_vtoc->v_bootinfo[0] = un->un_vtoc.v_bootinfo[0]; 22280 user_vtoc->v_bootinfo[1] = un->un_vtoc.v_bootinfo[1]; 22281 user_vtoc->v_bootinfo[2] = un->un_vtoc.v_bootinfo[2]; 22282 user_vtoc->v_sanity = VTOC_SANE; 22283 user_vtoc->v_version = un->un_vtoc.v_version; 22284 bcopy(un->un_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL); 22285 user_vtoc->v_sectorsz = un->un_sys_blocksize; 22286 user_vtoc->v_nparts = un->un_vtoc.v_nparts; 22287 bcopy(un->un_vtoc.v_reserved, user_vtoc->v_reserved, 22288 sizeof (un->un_vtoc.v_reserved)); 22289 /* 22290 * Convert partitioning information. 22291 * 22292 * Note the conversion from starting cylinder number 22293 * to starting sector number. 22294 */ 22295 lmap = un->un_map; 22296 lpart = (struct dk_map2 *)un->un_vtoc.v_part; 22297 vpart = user_vtoc->v_part; 22298 22299 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22300 22301 for (i = 0; i < V_NUMPAR; i++) { 22302 vpart->p_tag = lpart->p_tag; 22303 vpart->p_flag = lpart->p_flag; 22304 vpart->p_start = lmap->dkl_cylno * nblks; 22305 vpart->p_size = lmap->dkl_nblk; 22306 lmap++; 22307 lpart++; 22308 vpart++; 22309 22310 /* (4364927) */ 22311 user_vtoc->timestamp[i] = (time_t)un->un_vtoc.v_timestamp[i]; 22312 } 22313 22314 bcopy(un->un_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII); 22315 } 22316 22317 static int 22318 sd_dkio_partition(dev_t dev, caddr_t arg, int flag) 22319 { 22320 struct sd_lun *un = NULL; 22321 struct partition64 p64; 22322 int rval = 0; 22323 uint_t nparts; 22324 efi_gpe_t *partitions; 22325 efi_gpt_t *buffer; 22326 diskaddr_t gpe_lba; 22327 22328 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22329 return (ENXIO); 22330 } 22331 22332 if (ddi_copyin((const void *)arg, &p64, 22333 sizeof (struct partition64), flag)) { 22334 return (EFAULT); 22335 } 22336 22337 buffer = kmem_alloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 22338 rval = sd_send_scsi_READ(un, buffer, DEV_BSIZE, 22339 1, SD_PATH_DIRECT); 22340 if (rval != 0) 22341 goto done_error; 22342 22343 sd_swap_efi_gpt(buffer); 22344 22345 if ((rval = sd_validate_efi(buffer)) != 0) 22346 goto done_error; 22347 22348 nparts = buffer->efi_gpt_NumberOfPartitionEntries; 22349 gpe_lba = buffer->efi_gpt_PartitionEntryLBA; 22350 if (p64.p_partno > nparts) { 22351 /* couldn't find it */ 22352 rval = ESRCH; 22353 goto done_error; 22354 } 22355 /* 22356 * if we're dealing with a partition that's out of the normal 22357 * 16K block, adjust accordingly 22358 */ 22359 gpe_lba += p64.p_partno / sizeof (efi_gpe_t); 22360 rval = sd_send_scsi_READ(un, buffer, EFI_MIN_ARRAY_SIZE, 22361 gpe_lba, SD_PATH_DIRECT); 22362 if (rval) { 22363 goto done_error; 22364 } 22365 partitions = (efi_gpe_t *)buffer; 22366 22367 sd_swap_efi_gpe(nparts, partitions); 22368 22369 partitions += p64.p_partno; 22370 bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type, 22371 sizeof (struct uuid)); 22372 p64.p_start = partitions->efi_gpe_StartingLBA; 22373 p64.p_size = partitions->efi_gpe_EndingLBA - 22374 p64.p_start + 1; 22375 22376 if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag)) 22377 rval = EFAULT; 22378 22379 done_error: 22380 kmem_free(buffer, EFI_MIN_ARRAY_SIZE); 22381 return (rval); 22382 } 22383 22384 22385 /* 22386 * Function: sd_dkio_set_vtoc 22387 * 22388 * Description: This routine is the driver entry point for handling user 22389 * requests to set the current volume table of contents 22390 * (DKIOCSVTOC). 22391 * 22392 * Arguments: dev - the device number 22393 * arg - pointer to user provided vtoc structure used to set the 22394 * current vtoc. 22395 * flag - this argument is a pass through to ddi_copyxxx() 22396 * directly from the mode argument of ioctl(). 22397 * 22398 * Return Code: 0 22399 * EFAULT 22400 * ENXIO 22401 * EINVAL 22402 * ENOTSUP 22403 */ 22404 22405 static int 22406 sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag) 22407 { 22408 struct sd_lun *un = NULL; 22409 struct vtoc user_vtoc; 22410 int rval = 0; 22411 22412 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22413 return (ENXIO); 22414 } 22415 22416 #if defined(__i386) || defined(__amd64) 22417 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 22418 return (EINVAL); 22419 } 22420 #endif 22421 22422 #ifdef _MULTI_DATAMODEL 22423 switch (ddi_model_convert_from(flag & FMODELS)) { 22424 case DDI_MODEL_ILP32: { 22425 struct vtoc32 user_vtoc32; 22426 22427 if (ddi_copyin((const void *)arg, &user_vtoc32, 22428 sizeof (struct vtoc32), flag)) { 22429 return (EFAULT); 22430 } 22431 vtoc32tovtoc(user_vtoc32, user_vtoc); 22432 break; 22433 } 22434 22435 case DDI_MODEL_NONE: 22436 if (ddi_copyin((const void *)arg, &user_vtoc, 22437 sizeof (struct vtoc), flag)) { 22438 return (EFAULT); 22439 } 22440 break; 22441 } 22442 #else /* ! _MULTI_DATAMODEL */ 22443 if (ddi_copyin((const void *)arg, &user_vtoc, 22444 sizeof (struct vtoc), flag)) { 22445 return (EFAULT); 22446 } 22447 #endif /* _MULTI_DATAMODEL */ 22448 22449 mutex_enter(SD_MUTEX(un)); 22450 if (un->un_blockcount > DK_MAX_BLOCKS) { 22451 mutex_exit(SD_MUTEX(un)); 22452 return (ENOTSUP); 22453 } 22454 if (un->un_g.dkg_ncyl == 0) { 22455 mutex_exit(SD_MUTEX(un)); 22456 return (EINVAL); 22457 } 22458 22459 mutex_exit(SD_MUTEX(un)); 22460 sd_clear_efi(un); 22461 ddi_remove_minor_node(SD_DEVINFO(un), "wd"); 22462 ddi_remove_minor_node(SD_DEVINFO(un), "wd,raw"); 22463 (void) ddi_create_minor_node(SD_DEVINFO(un), "h", 22464 S_IFBLK, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22465 un->un_node_type, NULL); 22466 (void) ddi_create_minor_node(SD_DEVINFO(un), "h,raw", 22467 S_IFCHR, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22468 un->un_node_type, NULL); 22469 mutex_enter(SD_MUTEX(un)); 22470 22471 if ((rval = sd_build_label_vtoc(un, &user_vtoc)) == 0) { 22472 if ((rval = sd_write_label(dev)) == 0) { 22473 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) 22474 != 0) { 22475 SD_ERROR(SD_LOG_IOCTL_DKIO, un, 22476 "sd_dkio_set_vtoc: " 22477 "Failed validate geometry\n"); 22478 } 22479 } 22480 } 22481 22482 /* 22483 * If sd_build_label_vtoc, or sd_write_label failed above write the 22484 * devid anyway, what can it hurt? Also preserve the device id by 22485 * writing to the disk acyl for the case where a devid has been 22486 * fabricated. 22487 */ 22488 if (!ISREMOVABLE(un) && !ISCD(un) && 22489 (un->un_f_opt_fab_devid == TRUE)) { 22490 if (un->un_devid == NULL) { 22491 sd_register_devid(un, SD_DEVINFO(un), 22492 SD_TARGET_IS_UNRESERVED); 22493 } else { 22494 /* 22495 * The device id for this disk has been 22496 * fabricated. Fabricated device id's are 22497 * managed by storing them in the last 2 22498 * available sectors on the drive. The device 22499 * id must be preserved by writing it back out 22500 * to this location. 22501 */ 22502 if (sd_write_deviceid(un) != 0) { 22503 ddi_devid_free(un->un_devid); 22504 un->un_devid = NULL; 22505 } 22506 } 22507 } 22508 mutex_exit(SD_MUTEX(un)); 22509 return (rval); 22510 } 22511 22512 22513 /* 22514 * Function: sd_build_label_vtoc 22515 * 22516 * Description: This routine updates the driver soft state current volume table 22517 * of contents based on a user specified vtoc. 22518 * 22519 * Arguments: un - driver soft state (unit) structure 22520 * user_vtoc - pointer to vtoc structure specifying vtoc to be used 22521 * to update the driver soft state. 22522 * 22523 * Return Code: 0 22524 * EINVAL 22525 */ 22526 22527 static int 22528 sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22529 { 22530 struct dk_map *lmap; 22531 struct partition *vpart; 22532 int nblks; 22533 #if defined(_SUNOS_VTOC_8) 22534 int ncyl; 22535 struct dk_map2 *lpart; 22536 #endif /* defined(_SUNOS_VTOC_8) */ 22537 int i; 22538 22539 ASSERT(mutex_owned(SD_MUTEX(un))); 22540 22541 /* Sanity-check the vtoc */ 22542 if (user_vtoc->v_sanity != VTOC_SANE || 22543 user_vtoc->v_sectorsz != un->un_sys_blocksize || 22544 user_vtoc->v_nparts != V_NUMPAR) { 22545 return (EINVAL); 22546 } 22547 22548 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22549 if (nblks == 0) { 22550 return (EINVAL); 22551 } 22552 22553 #if defined(_SUNOS_VTOC_8) 22554 vpart = user_vtoc->v_part; 22555 for (i = 0; i < V_NUMPAR; i++) { 22556 if ((vpart->p_start % nblks) != 0) { 22557 return (EINVAL); 22558 } 22559 ncyl = vpart->p_start / nblks; 22560 ncyl += vpart->p_size / nblks; 22561 if ((vpart->p_size % nblks) != 0) { 22562 ncyl++; 22563 } 22564 if (ncyl > (int)un->un_g.dkg_ncyl) { 22565 return (EINVAL); 22566 } 22567 vpart++; 22568 } 22569 #endif /* defined(_SUNOS_VTOC_8) */ 22570 22571 /* Put appropriate vtoc structure fields into the disk label */ 22572 #if defined(_SUNOS_VTOC_16) 22573 /* 22574 * The vtoc is always a 32bit data structure to maintain the 22575 * on-disk format. Convert "in place" instead of bcopying it. 22576 */ 22577 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(un->un_vtoc)))); 22578 22579 /* 22580 * in the 16-slice vtoc, starting sectors are expressed in 22581 * numbers *relative* to the start of the Solaris fdisk partition. 22582 */ 22583 lmap = un->un_map; 22584 vpart = user_vtoc->v_part; 22585 22586 for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) { 22587 lmap->dkl_cylno = vpart->p_start / nblks; 22588 lmap->dkl_nblk = vpart->p_size; 22589 } 22590 22591 #elif defined(_SUNOS_VTOC_8) 22592 22593 un->un_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0]; 22594 un->un_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1]; 22595 un->un_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2]; 22596 22597 un->un_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity; 22598 un->un_vtoc.v_version = (uint32_t)user_vtoc->v_version; 22599 22600 bcopy(user_vtoc->v_volume, un->un_vtoc.v_volume, LEN_DKL_VVOL); 22601 22602 un->un_vtoc.v_nparts = user_vtoc->v_nparts; 22603 22604 bcopy(user_vtoc->v_reserved, un->un_vtoc.v_reserved, 22605 sizeof (un->un_vtoc.v_reserved)); 22606 22607 /* 22608 * Note the conversion from starting sector number 22609 * to starting cylinder number. 22610 * Return error if division results in a remainder. 22611 */ 22612 lmap = un->un_map; 22613 lpart = un->un_vtoc.v_part; 22614 vpart = user_vtoc->v_part; 22615 22616 for (i = 0; i < (int)user_vtoc->v_nparts; i++) { 22617 lpart->p_tag = vpart->p_tag; 22618 lpart->p_flag = vpart->p_flag; 22619 lmap->dkl_cylno = vpart->p_start / nblks; 22620 lmap->dkl_nblk = vpart->p_size; 22621 22622 lmap++; 22623 lpart++; 22624 vpart++; 22625 22626 /* (4387723) */ 22627 #ifdef _LP64 22628 if (user_vtoc->timestamp[i] > TIME32_MAX) { 22629 un->un_vtoc.v_timestamp[i] = TIME32_MAX; 22630 } else { 22631 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22632 } 22633 #else 22634 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22635 #endif 22636 } 22637 22638 bcopy(user_vtoc->v_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 22639 #else 22640 #error "No VTOC format defined." 22641 #endif 22642 return (0); 22643 } 22644 22645 /* 22646 * Function: sd_clear_efi 22647 * 22648 * Description: This routine clears all EFI labels. 22649 * 22650 * Arguments: un - driver soft state (unit) structure 22651 * 22652 * Return Code: void 22653 */ 22654 22655 static void 22656 sd_clear_efi(struct sd_lun *un) 22657 { 22658 efi_gpt_t *gpt; 22659 uint_t lbasize; 22660 uint64_t cap; 22661 int rval; 22662 22663 ASSERT(!mutex_owned(SD_MUTEX(un))); 22664 22665 gpt = kmem_alloc(sizeof (efi_gpt_t), KM_SLEEP); 22666 22667 if (sd_send_scsi_READ(un, gpt, DEV_BSIZE, 1, SD_PATH_DIRECT) != 0) { 22668 goto done; 22669 } 22670 22671 sd_swap_efi_gpt(gpt); 22672 rval = sd_validate_efi(gpt); 22673 if (rval == 0) { 22674 /* clear primary */ 22675 bzero(gpt, sizeof (efi_gpt_t)); 22676 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 1, 22677 SD_PATH_DIRECT))) { 22678 SD_INFO(SD_LOG_IO_PARTITION, un, 22679 "sd_clear_efi: clear primary label failed\n"); 22680 } 22681 } 22682 /* the backup */ 22683 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 22684 SD_PATH_DIRECT); 22685 if (rval) { 22686 goto done; 22687 } 22688 if ((rval = sd_send_scsi_READ(un, gpt, lbasize, 22689 cap - 1, SD_PATH_DIRECT)) != 0) { 22690 goto done; 22691 } 22692 sd_swap_efi_gpt(gpt); 22693 rval = sd_validate_efi(gpt); 22694 if (rval == 0) { 22695 /* clear backup */ 22696 SD_TRACE(SD_LOG_IOCTL, un, "sd_clear_efi clear backup@%lu\n", 22697 cap-1); 22698 bzero(gpt, sizeof (efi_gpt_t)); 22699 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 22700 cap-1, SD_PATH_DIRECT))) { 22701 SD_INFO(SD_LOG_IO_PARTITION, un, 22702 "sd_clear_efi: clear backup label failed\n"); 22703 } 22704 } 22705 22706 done: 22707 kmem_free(gpt, sizeof (efi_gpt_t)); 22708 } 22709 22710 /* 22711 * Function: sd_set_vtoc 22712 * 22713 * Description: This routine writes data to the appropriate positions 22714 * 22715 * Arguments: un - driver soft state (unit) structure 22716 * dkl - the data to be written 22717 * 22718 * Return: void 22719 */ 22720 22721 static int 22722 sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl) 22723 { 22724 void *shadow_buf; 22725 uint_t label_addr; 22726 int sec; 22727 int blk; 22728 int head; 22729 int cyl; 22730 int rval; 22731 22732 #if defined(__i386) || defined(__amd64) 22733 label_addr = un->un_solaris_offset + DK_LABEL_LOC; 22734 #else 22735 /* Write the primary label at block 0 of the solaris partition. */ 22736 label_addr = 0; 22737 #endif 22738 22739 if (NOT_DEVBSIZE(un)) { 22740 shadow_buf = kmem_zalloc(un->un_tgt_blocksize, KM_SLEEP); 22741 /* 22742 * Read the target's first block. 22743 */ 22744 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22745 un->un_tgt_blocksize, label_addr, 22746 SD_PATH_STANDARD)) != 0) { 22747 goto exit; 22748 } 22749 /* 22750 * Copy the contents of the label into the shadow buffer 22751 * which is of the size of target block size. 22752 */ 22753 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22754 } 22755 22756 /* Write the primary label */ 22757 if (NOT_DEVBSIZE(un)) { 22758 rval = sd_send_scsi_WRITE(un, shadow_buf, un->un_tgt_blocksize, 22759 label_addr, SD_PATH_STANDARD); 22760 } else { 22761 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22762 label_addr, SD_PATH_STANDARD); 22763 } 22764 if (rval != 0) { 22765 return (rval); 22766 } 22767 22768 /* 22769 * Calculate where the backup labels go. They are always on 22770 * the last alternate cylinder, but some older drives put them 22771 * on head 2 instead of the last head. They are always on the 22772 * first 5 odd sectors of the appropriate track. 22773 * 22774 * We have no choice at this point, but to believe that the 22775 * disk label is valid. Use the geometry of the disk 22776 * as described in the label. 22777 */ 22778 cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1; 22779 head = dkl->dkl_nhead - 1; 22780 22781 /* 22782 * Write and verify the backup labels. Make sure we don't try to 22783 * write past the last cylinder. 22784 */ 22785 for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) { 22786 blk = (daddr_t)( 22787 (cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) + 22788 (head * dkl->dkl_nsect) + sec); 22789 #if defined(__i386) || defined(__amd64) 22790 blk += un->un_solaris_offset; 22791 #endif 22792 if (NOT_DEVBSIZE(un)) { 22793 uint64_t tblk; 22794 /* 22795 * Need to read the block first for read modify write. 22796 */ 22797 tblk = (uint64_t)blk; 22798 blk = (int)((tblk * un->un_sys_blocksize) / 22799 un->un_tgt_blocksize); 22800 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22801 un->un_tgt_blocksize, blk, 22802 SD_PATH_STANDARD)) != 0) { 22803 goto exit; 22804 } 22805 /* 22806 * Modify the shadow buffer with the label. 22807 */ 22808 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22809 rval = sd_send_scsi_WRITE(un, shadow_buf, 22810 un->un_tgt_blocksize, blk, SD_PATH_STANDARD); 22811 } else { 22812 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22813 blk, SD_PATH_STANDARD); 22814 SD_INFO(SD_LOG_IO_PARTITION, un, 22815 "sd_set_vtoc: wrote backup label %d\n", blk); 22816 } 22817 if (rval != 0) { 22818 goto exit; 22819 } 22820 } 22821 exit: 22822 if (NOT_DEVBSIZE(un)) { 22823 kmem_free(shadow_buf, un->un_tgt_blocksize); 22824 } 22825 return (rval); 22826 } 22827 22828 /* 22829 * Function: sd_clear_vtoc 22830 * 22831 * Description: This routine clears out the VTOC labels. 22832 * 22833 * Arguments: un - driver soft state (unit) structure 22834 * 22835 * Return: void 22836 */ 22837 22838 static void 22839 sd_clear_vtoc(struct sd_lun *un) 22840 { 22841 struct dk_label *dkl; 22842 22843 mutex_exit(SD_MUTEX(un)); 22844 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22845 mutex_enter(SD_MUTEX(un)); 22846 /* 22847 * sd_set_vtoc uses these fields in order to figure out 22848 * where to overwrite the backup labels 22849 */ 22850 dkl->dkl_apc = un->un_g.dkg_apc; 22851 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22852 dkl->dkl_acyl = un->un_g.dkg_acyl; 22853 dkl->dkl_nhead = un->un_g.dkg_nhead; 22854 dkl->dkl_nsect = un->un_g.dkg_nsect; 22855 mutex_exit(SD_MUTEX(un)); 22856 (void) sd_set_vtoc(un, dkl); 22857 kmem_free(dkl, sizeof (struct dk_label)); 22858 22859 mutex_enter(SD_MUTEX(un)); 22860 } 22861 22862 /* 22863 * Function: sd_write_label 22864 * 22865 * Description: This routine will validate and write the driver soft state vtoc 22866 * contents to the device. 22867 * 22868 * Arguments: dev - the device number 22869 * 22870 * Return Code: the code returned by sd_send_scsi_cmd() 22871 * 0 22872 * EINVAL 22873 * ENXIO 22874 * ENOMEM 22875 */ 22876 22877 static int 22878 sd_write_label(dev_t dev) 22879 { 22880 struct sd_lun *un; 22881 struct dk_label *dkl; 22882 short sum; 22883 short *sp; 22884 int i; 22885 int rval; 22886 22887 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 22888 (un->un_state == SD_STATE_OFFLINE)) { 22889 return (ENXIO); 22890 } 22891 ASSERT(mutex_owned(SD_MUTEX(un))); 22892 mutex_exit(SD_MUTEX(un)); 22893 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22894 mutex_enter(SD_MUTEX(un)); 22895 22896 bcopy(&un->un_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc)); 22897 dkl->dkl_rpm = un->un_g.dkg_rpm; 22898 dkl->dkl_pcyl = un->un_g.dkg_pcyl; 22899 dkl->dkl_apc = un->un_g.dkg_apc; 22900 dkl->dkl_intrlv = un->un_g.dkg_intrlv; 22901 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22902 dkl->dkl_acyl = un->un_g.dkg_acyl; 22903 dkl->dkl_nhead = un->un_g.dkg_nhead; 22904 dkl->dkl_nsect = un->un_g.dkg_nsect; 22905 22906 #if defined(_SUNOS_VTOC_8) 22907 dkl->dkl_obs1 = un->un_g.dkg_obs1; 22908 dkl->dkl_obs2 = un->un_g.dkg_obs2; 22909 dkl->dkl_obs3 = un->un_g.dkg_obs3; 22910 for (i = 0; i < NDKMAP; i++) { 22911 dkl->dkl_map[i].dkl_cylno = un->un_map[i].dkl_cylno; 22912 dkl->dkl_map[i].dkl_nblk = un->un_map[i].dkl_nblk; 22913 } 22914 bcopy(un->un_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII); 22915 #elif defined(_SUNOS_VTOC_16) 22916 dkl->dkl_skew = un->un_dkg_skew; 22917 #else 22918 #error "No VTOC format defined." 22919 #endif 22920 22921 dkl->dkl_magic = DKL_MAGIC; 22922 dkl->dkl_write_reinstruct = un->un_g.dkg_write_reinstruct; 22923 dkl->dkl_read_reinstruct = un->un_g.dkg_read_reinstruct; 22924 22925 /* Construct checksum for the new disk label */ 22926 sum = 0; 22927 sp = (short *)dkl; 22928 i = sizeof (struct dk_label) / sizeof (short); 22929 while (i--) { 22930 sum ^= *sp++; 22931 } 22932 dkl->dkl_cksum = sum; 22933 22934 mutex_exit(SD_MUTEX(un)); 22935 22936 rval = sd_set_vtoc(un, dkl); 22937 exit: 22938 kmem_free(dkl, sizeof (struct dk_label)); 22939 mutex_enter(SD_MUTEX(un)); 22940 return (rval); 22941 } 22942 22943 static int 22944 sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag) 22945 { 22946 struct sd_lun *un = NULL; 22947 dk_efi_t user_efi; 22948 int rval = 0; 22949 void *buffer; 22950 22951 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22952 return (ENXIO); 22953 22954 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22955 return (EFAULT); 22956 22957 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22958 22959 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22960 (user_efi.dki_length > un->un_max_xfer_size)) 22961 return (EINVAL); 22962 22963 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22964 if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) { 22965 rval = EFAULT; 22966 } else { 22967 /* 22968 * let's clear the vtoc labels and clear the softstate 22969 * vtoc. 22970 */ 22971 mutex_enter(SD_MUTEX(un)); 22972 if (un->un_vtoc.v_sanity == VTOC_SANE) { 22973 SD_TRACE(SD_LOG_IO_PARTITION, un, 22974 "sd_dkio_set_efi: CLEAR VTOC\n"); 22975 sd_clear_vtoc(un); 22976 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 22977 mutex_exit(SD_MUTEX(un)); 22978 ddi_remove_minor_node(SD_DEVINFO(un), "h"); 22979 ddi_remove_minor_node(SD_DEVINFO(un), "h,raw"); 22980 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd", 22981 S_IFBLK, 22982 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22983 un->un_node_type, NULL); 22984 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd,raw", 22985 S_IFCHR, 22986 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22987 un->un_node_type, NULL); 22988 } else 22989 mutex_exit(SD_MUTEX(un)); 22990 rval = sd_send_scsi_WRITE(un, buffer, user_efi.dki_length, 22991 user_efi.dki_lba, SD_PATH_DIRECT); 22992 if (rval == 0) { 22993 mutex_enter(SD_MUTEX(un)); 22994 un->un_f_geometry_is_valid = FALSE; 22995 mutex_exit(SD_MUTEX(un)); 22996 } 22997 } 22998 kmem_free(buffer, user_efi.dki_length); 22999 return (rval); 23000 } 23001 23002 /* 23003 * Function: sd_dkio_get_mboot 23004 * 23005 * Description: This routine is the driver entry point for handling user 23006 * requests to get the current device mboot (DKIOCGMBOOT) 23007 * 23008 * Arguments: dev - the device number 23009 * arg - pointer to user provided mboot structure specifying 23010 * the current mboot. 23011 * flag - this argument is a pass through to ddi_copyxxx() 23012 * directly from the mode argument of ioctl(). 23013 * 23014 * Return Code: 0 23015 * EINVAL 23016 * EFAULT 23017 * ENXIO 23018 */ 23019 23020 static int 23021 sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag) 23022 { 23023 struct sd_lun *un; 23024 struct mboot *mboot; 23025 int rval; 23026 size_t buffer_size; 23027 23028 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23029 (un->un_state == SD_STATE_OFFLINE)) { 23030 return (ENXIO); 23031 } 23032 23033 #if defined(_SUNOS_VTOC_8) 23034 if ((!ISREMOVABLE(un)) || (arg == NULL)) { 23035 #elif defined(_SUNOS_VTOC_16) 23036 if (arg == NULL) { 23037 #endif 23038 return (EINVAL); 23039 } 23040 23041 /* 23042 * Read the mboot block, located at absolute block 0 on the target. 23043 */ 23044 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct mboot)); 23045 23046 SD_TRACE(SD_LOG_IO_PARTITION, un, 23047 "sd_dkio_get_mboot: allocation size: 0x%x\n", buffer_size); 23048 23049 mboot = kmem_zalloc(buffer_size, KM_SLEEP); 23050 if ((rval = sd_send_scsi_READ(un, mboot, buffer_size, 0, 23051 SD_PATH_STANDARD)) == 0) { 23052 if (ddi_copyout(mboot, (void *)arg, 23053 sizeof (struct mboot), flag) != 0) { 23054 rval = EFAULT; 23055 } 23056 } 23057 kmem_free(mboot, buffer_size); 23058 return (rval); 23059 } 23060 23061 23062 /* 23063 * Function: sd_dkio_set_mboot 23064 * 23065 * Description: This routine is the driver entry point for handling user 23066 * requests to validate and set the device master boot 23067 * (DKIOCSMBOOT). 23068 * 23069 * Arguments: dev - the device number 23070 * arg - pointer to user provided mboot structure used to set the 23071 * master boot. 23072 * flag - this argument is a pass through to ddi_copyxxx() 23073 * directly from the mode argument of ioctl(). 23074 * 23075 * Return Code: 0 23076 * EINVAL 23077 * EFAULT 23078 * ENXIO 23079 */ 23080 23081 static int 23082 sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag) 23083 { 23084 struct sd_lun *un = NULL; 23085 struct mboot *mboot = NULL; 23086 int rval; 23087 ushort_t magic; 23088 23089 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23090 return (ENXIO); 23091 } 23092 23093 ASSERT(!mutex_owned(SD_MUTEX(un))); 23094 23095 #if defined(_SUNOS_VTOC_8) 23096 if (!ISREMOVABLE(un)) { 23097 return (EINVAL); 23098 } 23099 #endif 23100 23101 if (arg == NULL) { 23102 return (EINVAL); 23103 } 23104 23105 mboot = kmem_zalloc(sizeof (struct mboot), KM_SLEEP); 23106 23107 if (ddi_copyin((const void *)arg, mboot, 23108 sizeof (struct mboot), flag) != 0) { 23109 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23110 return (EFAULT); 23111 } 23112 23113 /* Is this really a master boot record? */ 23114 magic = LE_16(mboot->signature); 23115 if (magic != MBB_MAGIC) { 23116 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23117 return (EINVAL); 23118 } 23119 23120 rval = sd_send_scsi_WRITE(un, mboot, un->un_sys_blocksize, 0, 23121 SD_PATH_STANDARD); 23122 23123 mutex_enter(SD_MUTEX(un)); 23124 #if defined(__i386) || defined(__amd64) 23125 if (rval == 0) { 23126 /* 23127 * mboot has been written successfully. 23128 * update the fdisk and vtoc tables in memory 23129 */ 23130 rval = sd_update_fdisk_and_vtoc(un); 23131 if ((un->un_f_geometry_is_valid == FALSE) || (rval != 0)) { 23132 mutex_exit(SD_MUTEX(un)); 23133 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23134 return (rval); 23135 } 23136 } 23137 23138 /* 23139 * If the mboot write fails, write the devid anyway, what can it hurt? 23140 * Also preserve the device id by writing to the disk acyl for the case 23141 * where a devid has been fabricated. 23142 */ 23143 if (!ISREMOVABLE(un) && !ISCD(un) && 23144 (un->un_f_opt_fab_devid == TRUE)) { 23145 if (un->un_devid == NULL) { 23146 sd_register_devid(un, SD_DEVINFO(un), 23147 SD_TARGET_IS_UNRESERVED); 23148 } else { 23149 /* 23150 * The device id for this disk has been 23151 * fabricated. Fabricated device id's are 23152 * managed by storing them in the last 2 23153 * available sectors on the drive. The device 23154 * id must be preserved by writing it back out 23155 * to this location. 23156 */ 23157 if (sd_write_deviceid(un) != 0) { 23158 ddi_devid_free(un->un_devid); 23159 un->un_devid = NULL; 23160 } 23161 } 23162 } 23163 #else 23164 if (rval == 0) { 23165 /* 23166 * mboot has been written successfully. 23167 * set up the default geometry and VTOC 23168 */ 23169 if (un->un_blockcount <= DK_MAX_BLOCKS) 23170 sd_setup_default_geometry(un); 23171 } 23172 #endif 23173 mutex_exit(SD_MUTEX(un)); 23174 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23175 return (rval); 23176 } 23177 23178 23179 /* 23180 * Function: sd_setup_default_geometry 23181 * 23182 * Description: This local utility routine sets the default geometry as part of 23183 * setting the device mboot. 23184 * 23185 * Arguments: un - driver soft state (unit) structure 23186 * 23187 * Note: This may be redundant with sd_build_default_label. 23188 */ 23189 23190 static void 23191 sd_setup_default_geometry(struct sd_lun *un) 23192 { 23193 /* zero out the soft state geometry and partition table. */ 23194 bzero(&un->un_g, sizeof (struct dk_geom)); 23195 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23196 bzero(un->un_map, NDKMAP * (sizeof (struct dk_map))); 23197 un->un_asciilabel[0] = '\0'; 23198 23199 /* 23200 * For the rpm, we use the minimum for the disk. 23201 * For the head, cyl and number of sector per track, 23202 * if the capacity <= 1GB, head = 64, sect = 32. 23203 * else head = 255, sect 63 23204 * Note: the capacity should be equal to C*H*S values. 23205 * This will cause some truncation of size due to 23206 * round off errors. For CD-ROMs, this truncation can 23207 * have adverse side effects, so returning ncyl and 23208 * nhead as 1. The nsect will overflow for most of 23209 * CD-ROMs as nsect is of type ushort. 23210 */ 23211 if (ISCD(un)) { 23212 un->un_g.dkg_ncyl = 1; 23213 un->un_g.dkg_nhead = 1; 23214 un->un_g.dkg_nsect = un->un_blockcount; 23215 } else { 23216 if (un->un_blockcount <= 0x1000) { 23217 /* Needed for unlabeled SCSI floppies. */ 23218 un->un_g.dkg_nhead = 2; 23219 un->un_g.dkg_ncyl = 80; 23220 un->un_g.dkg_pcyl = 80; 23221 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 23222 } else if (un->un_blockcount <= 0x200000) { 23223 un->un_g.dkg_nhead = 64; 23224 un->un_g.dkg_nsect = 32; 23225 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 23226 } else { 23227 un->un_g.dkg_nhead = 255; 23228 un->un_g.dkg_nsect = 63; 23229 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 23230 } 23231 un->un_blockcount = un->un_g.dkg_ncyl * 23232 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 23233 } 23234 un->un_g.dkg_acyl = 0; 23235 un->un_g.dkg_bcyl = 0; 23236 un->un_g.dkg_intrlv = 1; 23237 un->un_g.dkg_rpm = 200; 23238 un->un_g.dkg_read_reinstruct = 0; 23239 un->un_g.dkg_write_reinstruct = 0; 23240 if (un->un_g.dkg_pcyl == 0) { 23241 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl; 23242 } 23243 23244 un->un_map['a'-'a'].dkl_cylno = 0; 23245 un->un_map['a'-'a'].dkl_nblk = un->un_blockcount; 23246 un->un_map['c'-'a'].dkl_cylno = 0; 23247 un->un_map['c'-'a'].dkl_nblk = un->un_blockcount; 23248 un->un_f_geometry_is_valid = FALSE; 23249 } 23250 23251 23252 #if defined(__i386) || defined(__amd64) 23253 /* 23254 * Function: sd_update_fdisk_and_vtoc 23255 * 23256 * Description: This local utility routine updates the device fdisk and vtoc 23257 * as part of setting the device mboot. 23258 * 23259 * Arguments: un - driver soft state (unit) structure 23260 * 23261 * Return Code: 0 for success or errno-type return code. 23262 * 23263 * Note:x86: This looks like a duplicate of sd_validate_geometry(), but 23264 * these did exist seperately in x86 sd.c!!! 23265 */ 23266 23267 static int 23268 sd_update_fdisk_and_vtoc(struct sd_lun *un) 23269 { 23270 static char labelstring[128]; 23271 static char buf[256]; 23272 char *label = 0; 23273 int count; 23274 int label_rc = 0; 23275 int gvalid = un->un_f_geometry_is_valid; 23276 int fdisk_rval; 23277 int lbasize; 23278 int capacity; 23279 23280 ASSERT(mutex_owned(SD_MUTEX(un))); 23281 23282 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 23283 return (EINVAL); 23284 } 23285 23286 if (un->un_f_blockcount_is_valid == FALSE) { 23287 return (EINVAL); 23288 } 23289 23290 #if defined(_SUNOS_VTOC_16) 23291 /* 23292 * Set up the "whole disk" fdisk partition; this should always 23293 * exist, regardless of whether the disk contains an fdisk table 23294 * or vtoc. 23295 */ 23296 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 23297 un->un_map[P0_RAW_DISK].dkl_nblk = un->un_blockcount; 23298 #endif /* defined(_SUNOS_VTOC_16) */ 23299 23300 /* 23301 * copy the lbasize and capacity so that if they're 23302 * reset while we're not holding the SD_MUTEX(un), we will 23303 * continue to use valid values after the SD_MUTEX(un) is 23304 * reacquired. 23305 */ 23306 lbasize = un->un_tgt_blocksize; 23307 capacity = un->un_blockcount; 23308 23309 /* 23310 * refresh the logical and physical geometry caches. 23311 * (data from mode sense format/rigid disk geometry pages, 23312 * and scsi_ifgetcap("geometry"). 23313 */ 23314 sd_resync_geom_caches(un, capacity, lbasize, SD_PATH_DIRECT); 23315 23316 /* 23317 * Only DIRECT ACCESS devices will have Sun labels. 23318 * CD's supposedly have a Sun label, too 23319 */ 23320 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 23321 fdisk_rval = sd_read_fdisk(un, capacity, lbasize, 23322 SD_PATH_DIRECT); 23323 if (fdisk_rval == SD_CMD_FAILURE) { 23324 ASSERT(mutex_owned(SD_MUTEX(un))); 23325 return (EIO); 23326 } 23327 23328 if (fdisk_rval == SD_CMD_RESERVATION_CONFLICT) { 23329 ASSERT(mutex_owned(SD_MUTEX(un))); 23330 return (EACCES); 23331 } 23332 23333 if (un->un_solaris_size <= DK_LABEL_LOC) { 23334 /* 23335 * Found fdisk table but no Solaris partition entry, 23336 * so don't call sd_uselabel() and don't create 23337 * a default label. 23338 */ 23339 label_rc = 0; 23340 un->un_f_geometry_is_valid = TRUE; 23341 goto no_solaris_partition; 23342 } 23343 23344 #if defined(_SUNOS_VTOC_8) 23345 label = (char *)un->un_asciilabel; 23346 #elif defined(_SUNOS_VTOC_16) 23347 label = (char *)un->un_vtoc.v_asciilabel; 23348 #else 23349 #error "No VTOC format defined." 23350 #endif 23351 } else if (capacity < 0) { 23352 ASSERT(mutex_owned(SD_MUTEX(un))); 23353 return (EINVAL); 23354 } 23355 23356 /* 23357 * For Removable media We reach here if we have found a 23358 * SOLARIS PARTITION. 23359 * If un_f_geometry_is_valid is FALSE it indicates that the SOLARIS 23360 * PARTITION has changed from the previous one, hence we will setup a 23361 * default VTOC in this case. 23362 */ 23363 if (un->un_f_geometry_is_valid == FALSE) { 23364 sd_build_default_label(un); 23365 label_rc = 0; 23366 } 23367 23368 no_solaris_partition: 23369 if ((!ISREMOVABLE(un) || 23370 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 23371 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 23372 /* 23373 * Print out a message indicating who and what we are. 23374 * We do this only when we happen to really validate the 23375 * geometry. We may call sd_validate_geometry() at other 23376 * times, ioctl()'s like Get VTOC in which case we 23377 * don't want to print the label. 23378 * If the geometry is valid, print the label string, 23379 * else print vendor and product info, if available 23380 */ 23381 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 23382 SD_INFO(SD_LOG_IOCTL_DKIO, un, "?<%s>\n", label); 23383 } else { 23384 mutex_enter(&sd_label_mutex); 23385 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 23386 labelstring); 23387 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 23388 &labelstring[64]); 23389 (void) sprintf(buf, "?Vendor '%s', product '%s'", 23390 labelstring, &labelstring[64]); 23391 if (un->un_f_blockcount_is_valid == TRUE) { 23392 (void) sprintf(&buf[strlen(buf)], 23393 ", %" PRIu64 " %u byte blocks\n", 23394 un->un_blockcount, 23395 un->un_tgt_blocksize); 23396 } else { 23397 (void) sprintf(&buf[strlen(buf)], 23398 ", (unknown capacity)\n"); 23399 } 23400 SD_INFO(SD_LOG_IOCTL_DKIO, un, buf); 23401 mutex_exit(&sd_label_mutex); 23402 } 23403 } 23404 23405 #if defined(_SUNOS_VTOC_16) 23406 /* 23407 * If we have valid geometry, set up the remaining fdisk partitions. 23408 * Note that dkl_cylno is not used for the fdisk map entries, so 23409 * we set it to an entirely bogus value. 23410 */ 23411 for (count = 0; count < FD_NUMPART; count++) { 23412 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 23413 un->un_map[FDISK_P1 + count].dkl_nblk = 23414 un->un_fmap[count].fmap_nblk; 23415 un->un_offset[FDISK_P1 + count] = 23416 un->un_fmap[count].fmap_start; 23417 } 23418 #endif 23419 23420 for (count = 0; count < NDKMAP; count++) { 23421 #if defined(_SUNOS_VTOC_8) 23422 struct dk_map *lp = &un->un_map[count]; 23423 un->un_offset[count] = 23424 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 23425 #elif defined(_SUNOS_VTOC_16) 23426 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 23427 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 23428 #else 23429 #error "No VTOC format defined." 23430 #endif 23431 } 23432 23433 ASSERT(mutex_owned(SD_MUTEX(un))); 23434 return (label_rc); 23435 } 23436 #endif 23437 23438 23439 /* 23440 * Function: sd_check_media 23441 * 23442 * Description: This utility routine implements the functionality for the 23443 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23444 * driver state changes from that specified by the user 23445 * (inserted or ejected). For example, if the user specifies 23446 * DKIO_EJECTED and the current media state is inserted this 23447 * routine will immediately return DKIO_INSERTED. However, if the 23448 * current media state is not inserted the user thread will be 23449 * blocked until the drive state changes. If DKIO_NONE is specified 23450 * the user thread will block until a drive state change occurs. 23451 * 23452 * Arguments: dev - the device number 23453 * state - user pointer to a dkio_state, updated with the current 23454 * drive state at return. 23455 * 23456 * Return Code: ENXIO 23457 * EIO 23458 * EAGAIN 23459 * EINTR 23460 */ 23461 23462 static int 23463 sd_check_media(dev_t dev, enum dkio_state state) 23464 { 23465 struct sd_lun *un = NULL; 23466 enum dkio_state prev_state; 23467 opaque_t token = NULL; 23468 int rval = 0; 23469 23470 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23471 return (ENXIO); 23472 } 23473 23474 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23475 23476 mutex_enter(SD_MUTEX(un)); 23477 23478 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23479 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23480 23481 prev_state = un->un_mediastate; 23482 23483 /* is there anything to do? */ 23484 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23485 /* 23486 * submit the request to the scsi_watch service; 23487 * scsi_media_watch_cb() does the real work 23488 */ 23489 mutex_exit(SD_MUTEX(un)); 23490 23491 /* 23492 * This change handles the case where a scsi watch request is 23493 * added to a device that is powered down. To accomplish this 23494 * we power up the device before adding the scsi watch request, 23495 * since the scsi watch sends a TUR directly to the device 23496 * which the device cannot handle if it is powered down. 23497 */ 23498 if (sd_pm_entry(un) != DDI_SUCCESS) { 23499 mutex_enter(SD_MUTEX(un)); 23500 goto done; 23501 } 23502 23503 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23504 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23505 (caddr_t)dev); 23506 23507 sd_pm_exit(un); 23508 23509 mutex_enter(SD_MUTEX(un)); 23510 if (token == NULL) { 23511 rval = EAGAIN; 23512 goto done; 23513 } 23514 23515 /* 23516 * This is a special case IOCTL that doesn't return 23517 * until the media state changes. Routine sdpower 23518 * knows about and handles this so don't count it 23519 * as an active cmd in the driver, which would 23520 * keep the device busy to the pm framework. 23521 * If the count isn't decremented the device can't 23522 * be powered down. 23523 */ 23524 un->un_ncmds_in_driver--; 23525 ASSERT(un->un_ncmds_in_driver >= 0); 23526 23527 /* 23528 * if a prior request had been made, this will be the same 23529 * token, as scsi_watch was designed that way. 23530 */ 23531 un->un_swr_token = token; 23532 un->un_specified_mediastate = state; 23533 23534 /* 23535 * now wait for media change 23536 * we will not be signalled unless mediastate == state but it is 23537 * still better to test for this condition, since there is a 23538 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23539 */ 23540 SD_TRACE(SD_LOG_COMMON, un, 23541 "sd_check_media: waiting for media state change\n"); 23542 while (un->un_mediastate == state) { 23543 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23544 SD_TRACE(SD_LOG_COMMON, un, 23545 "sd_check_media: waiting for media state " 23546 "was interrupted\n"); 23547 un->un_ncmds_in_driver++; 23548 rval = EINTR; 23549 goto done; 23550 } 23551 SD_TRACE(SD_LOG_COMMON, un, 23552 "sd_check_media: received signal, state=%x\n", 23553 un->un_mediastate); 23554 } 23555 /* 23556 * Inc the counter to indicate the device once again 23557 * has an active outstanding cmd. 23558 */ 23559 un->un_ncmds_in_driver++; 23560 } 23561 23562 /* invalidate geometry */ 23563 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23564 sr_ejected(un); 23565 } 23566 23567 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23568 uint64_t capacity; 23569 uint_t lbasize; 23570 23571 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23572 mutex_exit(SD_MUTEX(un)); 23573 /* 23574 * Since the following routines use SD_PATH_DIRECT, we must 23575 * call PM directly before the upcoming disk accesses. This 23576 * may cause the disk to be power/spin up. 23577 */ 23578 23579 if (sd_pm_entry(un) == DDI_SUCCESS) { 23580 rval = sd_send_scsi_READ_CAPACITY(un, 23581 &capacity, 23582 &lbasize, SD_PATH_DIRECT); 23583 if (rval != 0) { 23584 sd_pm_exit(un); 23585 mutex_enter(SD_MUTEX(un)); 23586 goto done; 23587 } 23588 } else { 23589 rval = EIO; 23590 mutex_enter(SD_MUTEX(un)); 23591 goto done; 23592 } 23593 mutex_enter(SD_MUTEX(un)); 23594 23595 sd_update_block_info(un, lbasize, capacity); 23596 23597 un->un_f_geometry_is_valid = FALSE; 23598 (void) sd_validate_geometry(un, SD_PATH_DIRECT); 23599 23600 mutex_exit(SD_MUTEX(un)); 23601 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 23602 SD_PATH_DIRECT); 23603 sd_pm_exit(un); 23604 23605 mutex_enter(SD_MUTEX(un)); 23606 } 23607 done: 23608 un->un_f_watcht_stopped = FALSE; 23609 if (un->un_swr_token) { 23610 /* 23611 * Use of this local token and the mutex ensures that we avoid 23612 * some race conditions associated with terminating the 23613 * scsi watch. 23614 */ 23615 token = un->un_swr_token; 23616 un->un_swr_token = (opaque_t)NULL; 23617 mutex_exit(SD_MUTEX(un)); 23618 (void) scsi_watch_request_terminate(token, 23619 SCSI_WATCH_TERMINATE_WAIT); 23620 mutex_enter(SD_MUTEX(un)); 23621 } 23622 23623 /* 23624 * Update the capacity kstat value, if no media previously 23625 * (capacity kstat is 0) and a media has been inserted 23626 * (un_f_blockcount_is_valid == TRUE) 23627 * This is a more generic way then checking for ISREMOVABLE. 23628 */ 23629 if (un->un_errstats) { 23630 struct sd_errstats *stp = NULL; 23631 23632 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23633 if ((stp->sd_capacity.value.ui64 == 0) && 23634 (un->un_f_blockcount_is_valid == TRUE)) { 23635 stp->sd_capacity.value.ui64 = 23636 (uint64_t)((uint64_t)un->un_blockcount * 23637 un->un_sys_blocksize); 23638 } 23639 } 23640 mutex_exit(SD_MUTEX(un)); 23641 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23642 return (rval); 23643 } 23644 23645 23646 /* 23647 * Function: sd_delayed_cv_broadcast 23648 * 23649 * Description: Delayed cv_broadcast to allow for target to recover from media 23650 * insertion. 23651 * 23652 * Arguments: arg - driver soft state (unit) structure 23653 */ 23654 23655 static void 23656 sd_delayed_cv_broadcast(void *arg) 23657 { 23658 struct sd_lun *un = arg; 23659 23660 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23661 23662 mutex_enter(SD_MUTEX(un)); 23663 un->un_dcvb_timeid = NULL; 23664 cv_broadcast(&un->un_state_cv); 23665 mutex_exit(SD_MUTEX(un)); 23666 } 23667 23668 23669 /* 23670 * Function: sd_media_watch_cb 23671 * 23672 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23673 * routine processes the TUR sense data and updates the driver 23674 * state if a transition has occurred. The user thread 23675 * (sd_check_media) is then signalled. 23676 * 23677 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23678 * among multiple watches that share this callback function 23679 * resultp - scsi watch facility result packet containing scsi 23680 * packet, status byte and sense data 23681 * 23682 * Return Code: 0 for success, -1 for failure 23683 */ 23684 23685 static int 23686 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23687 { 23688 struct sd_lun *un; 23689 struct scsi_status *statusp = resultp->statusp; 23690 struct scsi_extended_sense *sensep = resultp->sensep; 23691 enum dkio_state state = DKIO_NONE; 23692 dev_t dev = (dev_t)arg; 23693 uchar_t actual_sense_length; 23694 23695 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23696 return (-1); 23697 } 23698 actual_sense_length = resultp->actual_sense_length; 23699 23700 mutex_enter(SD_MUTEX(un)); 23701 SD_TRACE(SD_LOG_COMMON, un, 23702 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23703 *((char *)statusp), (void *)sensep, actual_sense_length); 23704 23705 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23706 un->un_mediastate = DKIO_DEV_GONE; 23707 printf("sd_media_watch_cb: dev gone\n"); 23708 cv_broadcast(&un->un_state_cv); 23709 mutex_exit(SD_MUTEX(un)); 23710 23711 return (0); 23712 } 23713 23714 /* 23715 * If there was a check condition then sensep points to valid sense data 23716 * If status was not a check condition but a reservation or busy status 23717 * then the new state is DKIO_NONE 23718 */ 23719 if (sensep != NULL) { 23720 SD_INFO(SD_LOG_COMMON, un, 23721 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23722 sensep->es_key, sensep->es_add_code, sensep->es_qual_code); 23723 /* This routine only uses up to 13 bytes of sense data. */ 23724 if (actual_sense_length >= 13) { 23725 if (sensep->es_key == KEY_UNIT_ATTENTION) { 23726 if (sensep->es_add_code == 0x28) { 23727 state = DKIO_INSERTED; 23728 } 23729 } else { 23730 /* 23731 * if 02/04/02 means that the host 23732 * should send start command. Explicitly 23733 * leave the media state as is 23734 * (inserted) as the media is inserted 23735 * and host has stopped device for PM 23736 * reasons. Upon next true read/write 23737 * to this media will bring the 23738 * device to the right state good for 23739 * media access. 23740 */ 23741 if ((sensep->es_key == KEY_NOT_READY) && 23742 (sensep->es_add_code == 0x3a)) { 23743 state = DKIO_EJECTED; 23744 } 23745 23746 /* 23747 * If the drivge is busy with an operation 23748 * or long write, keep the media in an 23749 * inserted state. 23750 */ 23751 23752 if ((sensep->es_key == KEY_NOT_READY) && 23753 (sensep->es_add_code == 0x04) && 23754 ((sensep->es_qual_code == 0x02) || 23755 (sensep->es_qual_code == 0x07) || 23756 (sensep->es_qual_code == 0x08))) { 23757 state = DKIO_INSERTED; 23758 } 23759 } 23760 } 23761 } else if ((*((char *)statusp) == STATUS_GOOD) && 23762 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23763 state = DKIO_INSERTED; 23764 } 23765 23766 SD_TRACE(SD_LOG_COMMON, un, 23767 "sd_media_watch_cb: state=%x, specified=%x\n", 23768 state, un->un_specified_mediastate); 23769 23770 /* 23771 * now signal the waiting thread if this is *not* the specified state; 23772 * delay the signal if the state is DKIO_INSERTED to allow the target 23773 * to recover 23774 */ 23775 if (state != un->un_specified_mediastate) { 23776 un->un_mediastate = state; 23777 if (state == DKIO_INSERTED) { 23778 /* 23779 * delay the signal to give the drive a chance 23780 * to do what it apparently needs to do 23781 */ 23782 SD_TRACE(SD_LOG_COMMON, un, 23783 "sd_media_watch_cb: delayed cv_broadcast\n"); 23784 if (un->un_dcvb_timeid == NULL) { 23785 un->un_dcvb_timeid = 23786 timeout(sd_delayed_cv_broadcast, un, 23787 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23788 } 23789 } else { 23790 SD_TRACE(SD_LOG_COMMON, un, 23791 "sd_media_watch_cb: immediate cv_broadcast\n"); 23792 cv_broadcast(&un->un_state_cv); 23793 } 23794 } 23795 mutex_exit(SD_MUTEX(un)); 23796 return (0); 23797 } 23798 23799 23800 /* 23801 * Function: sd_dkio_get_temp 23802 * 23803 * Description: This routine is the driver entry point for handling ioctl 23804 * requests to get the disk temperature. 23805 * 23806 * Arguments: dev - the device number 23807 * arg - pointer to user provided dk_temperature structure. 23808 * flag - this argument is a pass through to ddi_copyxxx() 23809 * directly from the mode argument of ioctl(). 23810 * 23811 * Return Code: 0 23812 * EFAULT 23813 * ENXIO 23814 * EAGAIN 23815 */ 23816 23817 static int 23818 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23819 { 23820 struct sd_lun *un = NULL; 23821 struct dk_temperature *dktemp = NULL; 23822 uchar_t *temperature_page; 23823 int rval = 0; 23824 int path_flag = SD_PATH_STANDARD; 23825 23826 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23827 return (ENXIO); 23828 } 23829 23830 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23831 23832 /* copyin the disk temp argument to get the user flags */ 23833 if (ddi_copyin((void *)arg, dktemp, 23834 sizeof (struct dk_temperature), flag) != 0) { 23835 rval = EFAULT; 23836 goto done; 23837 } 23838 23839 /* Initialize the temperature to invalid. */ 23840 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23841 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23842 23843 /* 23844 * Note: Investigate removing the "bypass pm" semantic. 23845 * Can we just bypass PM always? 23846 */ 23847 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23848 path_flag = SD_PATH_DIRECT; 23849 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23850 mutex_enter(&un->un_pm_mutex); 23851 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23852 /* 23853 * If DKT_BYPASS_PM is set, and the drive happens to be 23854 * in low power mode, we can not wake it up, Need to 23855 * return EAGAIN. 23856 */ 23857 mutex_exit(&un->un_pm_mutex); 23858 rval = EAGAIN; 23859 goto done; 23860 } else { 23861 /* 23862 * Indicate to PM the device is busy. This is required 23863 * to avoid a race - i.e. the ioctl is issuing a 23864 * command and the pm framework brings down the device 23865 * to low power mode (possible power cut-off on some 23866 * platforms). 23867 */ 23868 mutex_exit(&un->un_pm_mutex); 23869 if (sd_pm_entry(un) != DDI_SUCCESS) { 23870 rval = EAGAIN; 23871 goto done; 23872 } 23873 } 23874 } 23875 23876 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23877 23878 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 23879 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 23880 goto done2; 23881 } 23882 23883 /* 23884 * For the current temperature verify that the parameter length is 0x02 23885 * and the parameter code is 0x00 23886 */ 23887 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23888 (temperature_page[5] == 0x00)) { 23889 if (temperature_page[9] == 0xFF) { 23890 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23891 } else { 23892 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23893 } 23894 } 23895 23896 /* 23897 * For the reference temperature verify that the parameter 23898 * length is 0x02 and the parameter code is 0x01 23899 */ 23900 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23901 (temperature_page[11] == 0x01)) { 23902 if (temperature_page[15] == 0xFF) { 23903 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23904 } else { 23905 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23906 } 23907 } 23908 23909 /* Do the copyout regardless of the temperature commands status. */ 23910 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23911 flag) != 0) { 23912 rval = EFAULT; 23913 } 23914 23915 done2: 23916 if (path_flag == SD_PATH_DIRECT) { 23917 sd_pm_exit(un); 23918 } 23919 23920 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23921 done: 23922 if (dktemp != NULL) { 23923 kmem_free(dktemp, sizeof (struct dk_temperature)); 23924 } 23925 23926 return (rval); 23927 } 23928 23929 23930 /* 23931 * Function: sd_log_page_supported 23932 * 23933 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23934 * supported log pages. 23935 * 23936 * Arguments: un - 23937 * log_page - 23938 * 23939 * Return Code: -1 - on error (log sense is optional and may not be supported). 23940 * 0 - log page not found. 23941 * 1 - log page found. 23942 */ 23943 23944 static int 23945 sd_log_page_supported(struct sd_lun *un, int log_page) 23946 { 23947 uchar_t *log_page_data; 23948 int i; 23949 int match = 0; 23950 int log_size; 23951 23952 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23953 23954 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 23955 SD_PATH_DIRECT) != 0) { 23956 SD_ERROR(SD_LOG_COMMON, un, 23957 "sd_log_page_supported: failed log page retrieval\n"); 23958 kmem_free(log_page_data, 0xFF); 23959 return (-1); 23960 } 23961 log_size = log_page_data[3]; 23962 23963 /* 23964 * The list of supported log pages start from the fourth byte. Check 23965 * until we run out of log pages or a match is found. 23966 */ 23967 for (i = 4; (i < (log_size + 4)) && !match; i++) { 23968 if (log_page_data[i] == log_page) { 23969 match++; 23970 } 23971 } 23972 kmem_free(log_page_data, 0xFF); 23973 return (match); 23974 } 23975 23976 23977 /* 23978 * Function: sd_mhdioc_failfast 23979 * 23980 * Description: This routine is the driver entry point for handling ioctl 23981 * requests to enable/disable the multihost failfast option. 23982 * (MHIOCENFAILFAST) 23983 * 23984 * Arguments: dev - the device number 23985 * arg - user specified probing interval. 23986 * flag - this argument is a pass through to ddi_copyxxx() 23987 * directly from the mode argument of ioctl(). 23988 * 23989 * Return Code: 0 23990 * EFAULT 23991 * ENXIO 23992 */ 23993 23994 static int 23995 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 23996 { 23997 struct sd_lun *un = NULL; 23998 int mh_time; 23999 int rval = 0; 24000 24001 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24002 return (ENXIO); 24003 } 24004 24005 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24006 return (EFAULT); 24007 24008 if (mh_time) { 24009 mutex_enter(SD_MUTEX(un)); 24010 un->un_resvd_status |= SD_FAILFAST; 24011 mutex_exit(SD_MUTEX(un)); 24012 /* 24013 * If mh_time is INT_MAX, then this ioctl is being used for 24014 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24015 */ 24016 if (mh_time != INT_MAX) { 24017 rval = sd_check_mhd(dev, mh_time); 24018 } 24019 } else { 24020 (void) sd_check_mhd(dev, 0); 24021 mutex_enter(SD_MUTEX(un)); 24022 un->un_resvd_status &= ~SD_FAILFAST; 24023 mutex_exit(SD_MUTEX(un)); 24024 } 24025 return (rval); 24026 } 24027 24028 24029 /* 24030 * Function: sd_mhdioc_takeown 24031 * 24032 * Description: This routine is the driver entry point for handling ioctl 24033 * requests to forcefully acquire exclusive access rights to the 24034 * multihost disk (MHIOCTKOWN). 24035 * 24036 * Arguments: dev - the device number 24037 * arg - user provided structure specifying the delay 24038 * parameters in milliseconds 24039 * flag - this argument is a pass through to ddi_copyxxx() 24040 * directly from the mode argument of ioctl(). 24041 * 24042 * Return Code: 0 24043 * EFAULT 24044 * ENXIO 24045 */ 24046 24047 static int 24048 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24049 { 24050 struct sd_lun *un = NULL; 24051 struct mhioctkown *tkown = NULL; 24052 int rval = 0; 24053 24054 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24055 return (ENXIO); 24056 } 24057 24058 if (arg != NULL) { 24059 tkown = (struct mhioctkown *) 24060 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24061 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24062 if (rval != 0) { 24063 rval = EFAULT; 24064 goto error; 24065 } 24066 } 24067 24068 rval = sd_take_ownership(dev, tkown); 24069 mutex_enter(SD_MUTEX(un)); 24070 if (rval == 0) { 24071 un->un_resvd_status |= SD_RESERVE; 24072 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24073 sd_reinstate_resv_delay = 24074 tkown->reinstate_resv_delay * 1000; 24075 } else { 24076 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24077 } 24078 /* 24079 * Give the scsi_watch routine interval set by 24080 * the MHIOCENFAILFAST ioctl precedence here. 24081 */ 24082 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24083 mutex_exit(SD_MUTEX(un)); 24084 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24085 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24086 "sd_mhdioc_takeown : %d\n", 24087 sd_reinstate_resv_delay); 24088 } else { 24089 mutex_exit(SD_MUTEX(un)); 24090 } 24091 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24092 sd_mhd_reset_notify_cb, (caddr_t)un); 24093 } else { 24094 un->un_resvd_status &= ~SD_RESERVE; 24095 mutex_exit(SD_MUTEX(un)); 24096 } 24097 24098 error: 24099 if (tkown != NULL) { 24100 kmem_free(tkown, sizeof (struct mhioctkown)); 24101 } 24102 return (rval); 24103 } 24104 24105 24106 /* 24107 * Function: sd_mhdioc_release 24108 * 24109 * Description: This routine is the driver entry point for handling ioctl 24110 * requests to release exclusive access rights to the multihost 24111 * disk (MHIOCRELEASE). 24112 * 24113 * Arguments: dev - the device number 24114 * 24115 * Return Code: 0 24116 * ENXIO 24117 */ 24118 24119 static int 24120 sd_mhdioc_release(dev_t dev) 24121 { 24122 struct sd_lun *un = NULL; 24123 timeout_id_t resvd_timeid_save; 24124 int resvd_status_save; 24125 int rval = 0; 24126 24127 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24128 return (ENXIO); 24129 } 24130 24131 mutex_enter(SD_MUTEX(un)); 24132 resvd_status_save = un->un_resvd_status; 24133 un->un_resvd_status &= 24134 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24135 if (un->un_resvd_timeid) { 24136 resvd_timeid_save = un->un_resvd_timeid; 24137 un->un_resvd_timeid = NULL; 24138 mutex_exit(SD_MUTEX(un)); 24139 (void) untimeout(resvd_timeid_save); 24140 } else { 24141 mutex_exit(SD_MUTEX(un)); 24142 } 24143 24144 /* 24145 * destroy any pending timeout thread that may be attempting to 24146 * reinstate reservation on this device. 24147 */ 24148 sd_rmv_resv_reclaim_req(dev); 24149 24150 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24151 mutex_enter(SD_MUTEX(un)); 24152 if ((un->un_mhd_token) && 24153 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24154 mutex_exit(SD_MUTEX(un)); 24155 (void) sd_check_mhd(dev, 0); 24156 } else { 24157 mutex_exit(SD_MUTEX(un)); 24158 } 24159 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24160 sd_mhd_reset_notify_cb, (caddr_t)un); 24161 } else { 24162 /* 24163 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24164 */ 24165 mutex_enter(SD_MUTEX(un)); 24166 un->un_resvd_status = resvd_status_save; 24167 mutex_exit(SD_MUTEX(un)); 24168 } 24169 return (rval); 24170 } 24171 24172 24173 /* 24174 * Function: sd_mhdioc_register_devid 24175 * 24176 * Description: This routine is the driver entry point for handling ioctl 24177 * requests to register the device id (MHIOCREREGISTERDEVID). 24178 * 24179 * Note: The implementation for this ioctl has been updated to 24180 * be consistent with the original PSARC case (1999/357) 24181 * (4375899, 4241671, 4220005) 24182 * 24183 * Arguments: dev - the device number 24184 * 24185 * Return Code: 0 24186 * ENXIO 24187 */ 24188 24189 static int 24190 sd_mhdioc_register_devid(dev_t dev) 24191 { 24192 struct sd_lun *un = NULL; 24193 int rval = 0; 24194 24195 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24196 return (ENXIO); 24197 } 24198 24199 ASSERT(!mutex_owned(SD_MUTEX(un))); 24200 24201 mutex_enter(SD_MUTEX(un)); 24202 24203 /* If a devid already exists, de-register it */ 24204 if (un->un_devid != NULL) { 24205 ddi_devid_unregister(SD_DEVINFO(un)); 24206 /* 24207 * After unregister devid, needs to free devid memory 24208 */ 24209 ddi_devid_free(un->un_devid); 24210 un->un_devid = NULL; 24211 } 24212 24213 /* Check for reservation conflict */ 24214 mutex_exit(SD_MUTEX(un)); 24215 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 24216 mutex_enter(SD_MUTEX(un)); 24217 24218 switch (rval) { 24219 case 0: 24220 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24221 break; 24222 case EACCES: 24223 break; 24224 default: 24225 rval = EIO; 24226 } 24227 24228 mutex_exit(SD_MUTEX(un)); 24229 return (rval); 24230 } 24231 24232 24233 /* 24234 * Function: sd_mhdioc_inkeys 24235 * 24236 * Description: This routine is the driver entry point for handling ioctl 24237 * requests to issue the SCSI-3 Persistent In Read Keys command 24238 * to the device (MHIOCGRP_INKEYS). 24239 * 24240 * Arguments: dev - the device number 24241 * arg - user provided in_keys structure 24242 * flag - this argument is a pass through to ddi_copyxxx() 24243 * directly from the mode argument of ioctl(). 24244 * 24245 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24246 * ENXIO 24247 * EFAULT 24248 */ 24249 24250 static int 24251 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24252 { 24253 struct sd_lun *un; 24254 mhioc_inkeys_t inkeys; 24255 int rval = 0; 24256 24257 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24258 return (ENXIO); 24259 } 24260 24261 #ifdef _MULTI_DATAMODEL 24262 switch (ddi_model_convert_from(flag & FMODELS)) { 24263 case DDI_MODEL_ILP32: { 24264 struct mhioc_inkeys32 inkeys32; 24265 24266 if (ddi_copyin(arg, &inkeys32, 24267 sizeof (struct mhioc_inkeys32), flag) != 0) { 24268 return (EFAULT); 24269 } 24270 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24271 if ((rval = sd_persistent_reservation_in_read_keys(un, 24272 &inkeys, flag)) != 0) { 24273 return (rval); 24274 } 24275 inkeys32.generation = inkeys.generation; 24276 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24277 flag) != 0) { 24278 return (EFAULT); 24279 } 24280 break; 24281 } 24282 case DDI_MODEL_NONE: 24283 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24284 flag) != 0) { 24285 return (EFAULT); 24286 } 24287 if ((rval = sd_persistent_reservation_in_read_keys(un, 24288 &inkeys, flag)) != 0) { 24289 return (rval); 24290 } 24291 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24292 flag) != 0) { 24293 return (EFAULT); 24294 } 24295 break; 24296 } 24297 24298 #else /* ! _MULTI_DATAMODEL */ 24299 24300 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24301 return (EFAULT); 24302 } 24303 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24304 if (rval != 0) { 24305 return (rval); 24306 } 24307 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24308 return (EFAULT); 24309 } 24310 24311 #endif /* _MULTI_DATAMODEL */ 24312 24313 return (rval); 24314 } 24315 24316 24317 /* 24318 * Function: sd_mhdioc_inresv 24319 * 24320 * Description: This routine is the driver entry point for handling ioctl 24321 * requests to issue the SCSI-3 Persistent In Read Reservations 24322 * command to the device (MHIOCGRP_INKEYS). 24323 * 24324 * Arguments: dev - the device number 24325 * arg - user provided in_resv structure 24326 * flag - this argument is a pass through to ddi_copyxxx() 24327 * directly from the mode argument of ioctl(). 24328 * 24329 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24330 * ENXIO 24331 * EFAULT 24332 */ 24333 24334 static int 24335 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24336 { 24337 struct sd_lun *un; 24338 mhioc_inresvs_t inresvs; 24339 int rval = 0; 24340 24341 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24342 return (ENXIO); 24343 } 24344 24345 #ifdef _MULTI_DATAMODEL 24346 24347 switch (ddi_model_convert_from(flag & FMODELS)) { 24348 case DDI_MODEL_ILP32: { 24349 struct mhioc_inresvs32 inresvs32; 24350 24351 if (ddi_copyin(arg, &inresvs32, 24352 sizeof (struct mhioc_inresvs32), flag) != 0) { 24353 return (EFAULT); 24354 } 24355 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24356 if ((rval = sd_persistent_reservation_in_read_resv(un, 24357 &inresvs, flag)) != 0) { 24358 return (rval); 24359 } 24360 inresvs32.generation = inresvs.generation; 24361 if (ddi_copyout(&inresvs32, arg, 24362 sizeof (struct mhioc_inresvs32), flag) != 0) { 24363 return (EFAULT); 24364 } 24365 break; 24366 } 24367 case DDI_MODEL_NONE: 24368 if (ddi_copyin(arg, &inresvs, 24369 sizeof (mhioc_inresvs_t), flag) != 0) { 24370 return (EFAULT); 24371 } 24372 if ((rval = sd_persistent_reservation_in_read_resv(un, 24373 &inresvs, flag)) != 0) { 24374 return (rval); 24375 } 24376 if (ddi_copyout(&inresvs, arg, 24377 sizeof (mhioc_inresvs_t), flag) != 0) { 24378 return (EFAULT); 24379 } 24380 break; 24381 } 24382 24383 #else /* ! _MULTI_DATAMODEL */ 24384 24385 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24386 return (EFAULT); 24387 } 24388 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24389 if (rval != 0) { 24390 return (rval); 24391 } 24392 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24393 return (EFAULT); 24394 } 24395 24396 #endif /* ! _MULTI_DATAMODEL */ 24397 24398 return (rval); 24399 } 24400 24401 24402 /* 24403 * The following routines support the clustering functionality described below 24404 * and implement lost reservation reclaim functionality. 24405 * 24406 * Clustering 24407 * ---------- 24408 * The clustering code uses two different, independent forms of SCSI 24409 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24410 * Persistent Group Reservations. For any particular disk, it will use either 24411 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24412 * 24413 * SCSI-2 24414 * The cluster software takes ownership of a multi-hosted disk by issuing the 24415 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24416 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 24417 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 24418 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 24419 * meaning of failfast is that if the driver (on this host) ever encounters the 24420 * scsi error return code RESERVATION_CONFLICT from the device, it should 24421 * immediately panic the host. The motivation for this ioctl is that if this 24422 * host does encounter reservation conflict, the underlying cause is that some 24423 * other host of the cluster has decided that this host is no longer in the 24424 * cluster and has seized control of the disks for itself. Since this host is no 24425 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 24426 * does two things: 24427 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24428 * error to panic the host 24429 * (b) it sets up a periodic timer to test whether this host still has 24430 * "access" (in that no other host has reserved the device): if the 24431 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24432 * purpose of that periodic timer is to handle scenarios where the host is 24433 * otherwise temporarily quiescent, temporarily doing no real i/o. 24434 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24435 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24436 * the device itself. 24437 * 24438 * SCSI-3 PGR 24439 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24440 * facility is supported through the shared multihost disk ioctls 24441 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24442 * MHIOCGRP_PREEMPTANDABORT) 24443 * 24444 * Reservation Reclaim: 24445 * -------------------- 24446 * To support the lost reservation reclaim operations this driver creates a 24447 * single thread to handle reinstating reservations on all devices that have 24448 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24449 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24450 * and the reservation reclaim thread loops through the requests to regain the 24451 * lost reservations. 24452 */ 24453 24454 /* 24455 * Function: sd_check_mhd() 24456 * 24457 * Description: This function sets up and submits a scsi watch request or 24458 * terminates an existing watch request. This routine is used in 24459 * support of reservation reclaim. 24460 * 24461 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24462 * among multiple watches that share the callback function 24463 * interval - the number of microseconds specifying the watch 24464 * interval for issuing TEST UNIT READY commands. If 24465 * set to 0 the watch should be terminated. If the 24466 * interval is set to 0 and if the device is required 24467 * to hold reservation while disabling failfast, the 24468 * watch is restarted with an interval of 24469 * reinstate_resv_delay. 24470 * 24471 * Return Code: 0 - Successful submit/terminate of scsi watch request 24472 * ENXIO - Indicates an invalid device was specified 24473 * EAGAIN - Unable to submit the scsi watch request 24474 */ 24475 24476 static int 24477 sd_check_mhd(dev_t dev, int interval) 24478 { 24479 struct sd_lun *un; 24480 opaque_t token; 24481 24482 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24483 return (ENXIO); 24484 } 24485 24486 /* is this a watch termination request? */ 24487 if (interval == 0) { 24488 mutex_enter(SD_MUTEX(un)); 24489 /* if there is an existing watch task then terminate it */ 24490 if (un->un_mhd_token) { 24491 token = un->un_mhd_token; 24492 un->un_mhd_token = NULL; 24493 mutex_exit(SD_MUTEX(un)); 24494 (void) scsi_watch_request_terminate(token, 24495 SCSI_WATCH_TERMINATE_WAIT); 24496 mutex_enter(SD_MUTEX(un)); 24497 } else { 24498 mutex_exit(SD_MUTEX(un)); 24499 /* 24500 * Note: If we return here we don't check for the 24501 * failfast case. This is the original legacy 24502 * implementation but perhaps we should be checking 24503 * the failfast case. 24504 */ 24505 return (0); 24506 } 24507 /* 24508 * If the device is required to hold reservation while 24509 * disabling failfast, we need to restart the scsi_watch 24510 * routine with an interval of reinstate_resv_delay. 24511 */ 24512 if (un->un_resvd_status & SD_RESERVE) { 24513 interval = sd_reinstate_resv_delay/1000; 24514 } else { 24515 /* no failfast so bail */ 24516 mutex_exit(SD_MUTEX(un)); 24517 return (0); 24518 } 24519 mutex_exit(SD_MUTEX(un)); 24520 } 24521 24522 /* 24523 * adjust minimum time interval to 1 second, 24524 * and convert from msecs to usecs 24525 */ 24526 if (interval > 0 && interval < 1000) { 24527 interval = 1000; 24528 } 24529 interval *= 1000; 24530 24531 /* 24532 * submit the request to the scsi_watch service 24533 */ 24534 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24535 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24536 if (token == NULL) { 24537 return (EAGAIN); 24538 } 24539 24540 /* 24541 * save token for termination later on 24542 */ 24543 mutex_enter(SD_MUTEX(un)); 24544 un->un_mhd_token = token; 24545 mutex_exit(SD_MUTEX(un)); 24546 return (0); 24547 } 24548 24549 24550 /* 24551 * Function: sd_mhd_watch_cb() 24552 * 24553 * Description: This function is the call back function used by the scsi watch 24554 * facility. The scsi watch facility sends the "Test Unit Ready" 24555 * and processes the status. If applicable (i.e. a "Unit Attention" 24556 * status and automatic "Request Sense" not used) the scsi watch 24557 * facility will send a "Request Sense" and retrieve the sense data 24558 * to be passed to this callback function. In either case the 24559 * automatic "Request Sense" or the facility submitting one, this 24560 * callback is passed the status and sense data. 24561 * 24562 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24563 * among multiple watches that share this callback function 24564 * resultp - scsi watch facility result packet containing scsi 24565 * packet, status byte and sense data 24566 * 24567 * Return Code: 0 - continue the watch task 24568 * non-zero - terminate the watch task 24569 */ 24570 24571 static int 24572 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24573 { 24574 struct sd_lun *un; 24575 struct scsi_status *statusp; 24576 struct scsi_extended_sense *sensep; 24577 struct scsi_pkt *pkt; 24578 uchar_t actual_sense_length; 24579 dev_t dev = (dev_t)arg; 24580 24581 ASSERT(resultp != NULL); 24582 statusp = resultp->statusp; 24583 sensep = resultp->sensep; 24584 pkt = resultp->pkt; 24585 actual_sense_length = resultp->actual_sense_length; 24586 24587 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24588 return (ENXIO); 24589 } 24590 24591 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24592 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24593 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24594 24595 /* Begin processing of the status and/or sense data */ 24596 if (pkt->pkt_reason != CMD_CMPLT) { 24597 /* Handle the incomplete packet */ 24598 sd_mhd_watch_incomplete(un, pkt); 24599 return (0); 24600 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24601 if (*((unsigned char *)statusp) 24602 == STATUS_RESERVATION_CONFLICT) { 24603 /* 24604 * Handle a reservation conflict by panicking if 24605 * configured for failfast or by logging the conflict 24606 * and updating the reservation status 24607 */ 24608 mutex_enter(SD_MUTEX(un)); 24609 if ((un->un_resvd_status & SD_FAILFAST) && 24610 (sd_failfast_enable)) { 24611 panic("Reservation Conflict"); 24612 /*NOTREACHED*/ 24613 } 24614 SD_INFO(SD_LOG_IOCTL_MHD, un, 24615 "sd_mhd_watch_cb: Reservation Conflict\n"); 24616 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24617 mutex_exit(SD_MUTEX(un)); 24618 } 24619 } 24620 24621 if (sensep != NULL) { 24622 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24623 mutex_enter(SD_MUTEX(un)); 24624 if ((sensep->es_add_code == SD_SCSI_RESET_SENSE_CODE) && 24625 (un->un_resvd_status & SD_RESERVE)) { 24626 /* 24627 * The additional sense code indicates a power 24628 * on or bus device reset has occurred; update 24629 * the reservation status. 24630 */ 24631 un->un_resvd_status |= 24632 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24633 SD_INFO(SD_LOG_IOCTL_MHD, un, 24634 "sd_mhd_watch_cb: Lost Reservation\n"); 24635 } 24636 } else { 24637 return (0); 24638 } 24639 } else { 24640 mutex_enter(SD_MUTEX(un)); 24641 } 24642 24643 if ((un->un_resvd_status & SD_RESERVE) && 24644 (un->un_resvd_status & SD_LOST_RESERVE)) { 24645 if (un->un_resvd_status & SD_WANT_RESERVE) { 24646 /* 24647 * A reset occurred in between the last probe and this 24648 * one so if a timeout is pending cancel it. 24649 */ 24650 if (un->un_resvd_timeid) { 24651 timeout_id_t temp_id = un->un_resvd_timeid; 24652 un->un_resvd_timeid = NULL; 24653 mutex_exit(SD_MUTEX(un)); 24654 (void) untimeout(temp_id); 24655 mutex_enter(SD_MUTEX(un)); 24656 } 24657 un->un_resvd_status &= ~SD_WANT_RESERVE; 24658 } 24659 if (un->un_resvd_timeid == 0) { 24660 /* Schedule a timeout to handle the lost reservation */ 24661 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24662 (void *)dev, 24663 drv_usectohz(sd_reinstate_resv_delay)); 24664 } 24665 } 24666 mutex_exit(SD_MUTEX(un)); 24667 return (0); 24668 } 24669 24670 24671 /* 24672 * Function: sd_mhd_watch_incomplete() 24673 * 24674 * Description: This function is used to find out why a scsi pkt sent by the 24675 * scsi watch facility was not completed. Under some scenarios this 24676 * routine will return. Otherwise it will send a bus reset to see 24677 * if the drive is still online. 24678 * 24679 * Arguments: un - driver soft state (unit) structure 24680 * pkt - incomplete scsi pkt 24681 */ 24682 24683 static void 24684 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24685 { 24686 int be_chatty; 24687 int perr; 24688 24689 ASSERT(pkt != NULL); 24690 ASSERT(un != NULL); 24691 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24692 perr = (pkt->pkt_statistics & STAT_PERR); 24693 24694 mutex_enter(SD_MUTEX(un)); 24695 if (un->un_state == SD_STATE_DUMPING) { 24696 mutex_exit(SD_MUTEX(un)); 24697 return; 24698 } 24699 24700 switch (pkt->pkt_reason) { 24701 case CMD_UNX_BUS_FREE: 24702 /* 24703 * If we had a parity error that caused the target to drop BSY*, 24704 * don't be chatty about it. 24705 */ 24706 if (perr && be_chatty) { 24707 be_chatty = 0; 24708 } 24709 break; 24710 case CMD_TAG_REJECT: 24711 /* 24712 * The SCSI-2 spec states that a tag reject will be sent by the 24713 * target if tagged queuing is not supported. A tag reject may 24714 * also be sent during certain initialization periods or to 24715 * control internal resources. For the latter case the target 24716 * may also return Queue Full. 24717 * 24718 * If this driver receives a tag reject from a target that is 24719 * going through an init period or controlling internal 24720 * resources tagged queuing will be disabled. This is a less 24721 * than optimal behavior but the driver is unable to determine 24722 * the target state and assumes tagged queueing is not supported 24723 */ 24724 pkt->pkt_flags = 0; 24725 un->un_tagflags = 0; 24726 24727 if (un->un_f_opt_queueing == TRUE) { 24728 un->un_throttle = min(un->un_throttle, 3); 24729 } else { 24730 un->un_throttle = 1; 24731 } 24732 mutex_exit(SD_MUTEX(un)); 24733 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24734 mutex_enter(SD_MUTEX(un)); 24735 break; 24736 case CMD_INCOMPLETE: 24737 /* 24738 * The transport stopped with an abnormal state, fallthrough and 24739 * reset the target and/or bus unless selection did not complete 24740 * (indicated by STATE_GOT_BUS) in which case we don't want to 24741 * go through a target/bus reset 24742 */ 24743 if (pkt->pkt_state == STATE_GOT_BUS) { 24744 break; 24745 } 24746 /*FALLTHROUGH*/ 24747 24748 case CMD_TIMEOUT: 24749 default: 24750 /* 24751 * The lun may still be running the command, so a lun reset 24752 * should be attempted. If the lun reset fails or cannot be 24753 * issued, than try a target reset. Lastly try a bus reset. 24754 */ 24755 if ((pkt->pkt_statistics & 24756 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24757 int reset_retval = 0; 24758 mutex_exit(SD_MUTEX(un)); 24759 if (un->un_f_allow_bus_device_reset == TRUE) { 24760 if (un->un_f_lun_reset_enabled == TRUE) { 24761 reset_retval = 24762 scsi_reset(SD_ADDRESS(un), 24763 RESET_LUN); 24764 } 24765 if (reset_retval == 0) { 24766 reset_retval = 24767 scsi_reset(SD_ADDRESS(un), 24768 RESET_TARGET); 24769 } 24770 } 24771 if (reset_retval == 0) { 24772 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24773 } 24774 mutex_enter(SD_MUTEX(un)); 24775 } 24776 break; 24777 } 24778 24779 /* A device/bus reset has occurred; update the reservation status. */ 24780 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24781 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24782 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24783 un->un_resvd_status |= 24784 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24785 SD_INFO(SD_LOG_IOCTL_MHD, un, 24786 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24787 } 24788 } 24789 24790 /* 24791 * The disk has been turned off; Update the device state. 24792 * 24793 * Note: Should we be offlining the disk here? 24794 */ 24795 if (pkt->pkt_state == STATE_GOT_BUS) { 24796 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24797 "Disk not responding to selection\n"); 24798 if (un->un_state != SD_STATE_OFFLINE) { 24799 New_state(un, SD_STATE_OFFLINE); 24800 } 24801 } else if (be_chatty) { 24802 /* 24803 * suppress messages if they are all the same pkt reason; 24804 * with TQ, many (up to 256) are returned with the same 24805 * pkt_reason 24806 */ 24807 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24808 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24809 "sd_mhd_watch_incomplete: " 24810 "SCSI transport failed: reason '%s'\n", 24811 scsi_rname(pkt->pkt_reason)); 24812 } 24813 } 24814 un->un_last_pkt_reason = pkt->pkt_reason; 24815 mutex_exit(SD_MUTEX(un)); 24816 } 24817 24818 24819 /* 24820 * Function: sd_sname() 24821 * 24822 * Description: This is a simple little routine to return a string containing 24823 * a printable description of command status byte for use in 24824 * logging. 24825 * 24826 * Arguments: status - pointer to a status byte 24827 * 24828 * Return Code: char * - string containing status description. 24829 */ 24830 24831 static char * 24832 sd_sname(uchar_t status) 24833 { 24834 switch (status & STATUS_MASK) { 24835 case STATUS_GOOD: 24836 return ("good status"); 24837 case STATUS_CHECK: 24838 return ("check condition"); 24839 case STATUS_MET: 24840 return ("condition met"); 24841 case STATUS_BUSY: 24842 return ("busy"); 24843 case STATUS_INTERMEDIATE: 24844 return ("intermediate"); 24845 case STATUS_INTERMEDIATE_MET: 24846 return ("intermediate - condition met"); 24847 case STATUS_RESERVATION_CONFLICT: 24848 return ("reservation_conflict"); 24849 case STATUS_TERMINATED: 24850 return ("command terminated"); 24851 case STATUS_QFULL: 24852 return ("queue full"); 24853 default: 24854 return ("<unknown status>"); 24855 } 24856 } 24857 24858 24859 /* 24860 * Function: sd_mhd_resvd_recover() 24861 * 24862 * Description: This function adds a reservation entry to the 24863 * sd_resv_reclaim_request list and signals the reservation 24864 * reclaim thread that there is work pending. If the reservation 24865 * reclaim thread has not been previously created this function 24866 * will kick it off. 24867 * 24868 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24869 * among multiple watches that share this callback function 24870 * 24871 * Context: This routine is called by timeout() and is run in interrupt 24872 * context. It must not sleep or call other functions which may 24873 * sleep. 24874 */ 24875 24876 static void 24877 sd_mhd_resvd_recover(void *arg) 24878 { 24879 dev_t dev = (dev_t)arg; 24880 struct sd_lun *un; 24881 struct sd_thr_request *sd_treq = NULL; 24882 struct sd_thr_request *sd_cur = NULL; 24883 struct sd_thr_request *sd_prev = NULL; 24884 int already_there = 0; 24885 24886 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24887 return; 24888 } 24889 24890 mutex_enter(SD_MUTEX(un)); 24891 un->un_resvd_timeid = NULL; 24892 if (un->un_resvd_status & SD_WANT_RESERVE) { 24893 /* 24894 * There was a reset so don't issue the reserve, allow the 24895 * sd_mhd_watch_cb callback function to notice this and 24896 * reschedule the timeout for reservation. 24897 */ 24898 mutex_exit(SD_MUTEX(un)); 24899 return; 24900 } 24901 mutex_exit(SD_MUTEX(un)); 24902 24903 /* 24904 * Add this device to the sd_resv_reclaim_request list and the 24905 * sd_resv_reclaim_thread should take care of the rest. 24906 * 24907 * Note: We can't sleep in this context so if the memory allocation 24908 * fails allow the sd_mhd_watch_cb callback function to notice this and 24909 * reschedule the timeout for reservation. (4378460) 24910 */ 24911 sd_treq = (struct sd_thr_request *) 24912 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24913 if (sd_treq == NULL) { 24914 return; 24915 } 24916 24917 sd_treq->sd_thr_req_next = NULL; 24918 sd_treq->dev = dev; 24919 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24920 if (sd_tr.srq_thr_req_head == NULL) { 24921 sd_tr.srq_thr_req_head = sd_treq; 24922 } else { 24923 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24924 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24925 if (sd_cur->dev == dev) { 24926 /* 24927 * already in Queue so don't log 24928 * another request for the device 24929 */ 24930 already_there = 1; 24931 break; 24932 } 24933 sd_prev = sd_cur; 24934 } 24935 if (!already_there) { 24936 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24937 "logging request for %lx\n", dev); 24938 sd_prev->sd_thr_req_next = sd_treq; 24939 } else { 24940 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24941 } 24942 } 24943 24944 /* 24945 * Create a kernel thread to do the reservation reclaim and free up this 24946 * thread. We cannot block this thread while we go away to do the 24947 * reservation reclaim 24948 */ 24949 if (sd_tr.srq_resv_reclaim_thread == NULL) 24950 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24951 sd_resv_reclaim_thread, NULL, 24952 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24953 24954 /* Tell the reservation reclaim thread that it has work to do */ 24955 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24956 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24957 } 24958 24959 /* 24960 * Function: sd_resv_reclaim_thread() 24961 * 24962 * Description: This function implements the reservation reclaim operations 24963 * 24964 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24965 * among multiple watches that share this callback function 24966 */ 24967 24968 static void 24969 sd_resv_reclaim_thread() 24970 { 24971 struct sd_lun *un; 24972 struct sd_thr_request *sd_mhreq; 24973 24974 /* Wait for work */ 24975 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24976 if (sd_tr.srq_thr_req_head == NULL) { 24977 cv_wait(&sd_tr.srq_resv_reclaim_cv, 24978 &sd_tr.srq_resv_reclaim_mutex); 24979 } 24980 24981 /* Loop while we have work */ 24982 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 24983 un = ddi_get_soft_state(sd_state, 24984 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 24985 if (un == NULL) { 24986 /* 24987 * softstate structure is NULL so just 24988 * dequeue the request and continue 24989 */ 24990 sd_tr.srq_thr_req_head = 24991 sd_tr.srq_thr_cur_req->sd_thr_req_next; 24992 kmem_free(sd_tr.srq_thr_cur_req, 24993 sizeof (struct sd_thr_request)); 24994 continue; 24995 } 24996 24997 /* dequeue the request */ 24998 sd_mhreq = sd_tr.srq_thr_cur_req; 24999 sd_tr.srq_thr_req_head = 25000 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25001 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25002 25003 /* 25004 * Reclaim reservation only if SD_RESERVE is still set. There 25005 * may have been a call to MHIOCRELEASE before we got here. 25006 */ 25007 mutex_enter(SD_MUTEX(un)); 25008 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25009 /* 25010 * Note: The SD_LOST_RESERVE flag is cleared before 25011 * reclaiming the reservation. If this is done after the 25012 * call to sd_reserve_release a reservation loss in the 25013 * window between pkt completion of reserve cmd and 25014 * mutex_enter below may not be recognized 25015 */ 25016 un->un_resvd_status &= ~SD_LOST_RESERVE; 25017 mutex_exit(SD_MUTEX(un)); 25018 25019 if (sd_reserve_release(sd_mhreq->dev, 25020 SD_RESERVE) == 0) { 25021 mutex_enter(SD_MUTEX(un)); 25022 un->un_resvd_status |= SD_RESERVE; 25023 mutex_exit(SD_MUTEX(un)); 25024 SD_INFO(SD_LOG_IOCTL_MHD, un, 25025 "sd_resv_reclaim_thread: " 25026 "Reservation Recovered\n"); 25027 } else { 25028 mutex_enter(SD_MUTEX(un)); 25029 un->un_resvd_status |= SD_LOST_RESERVE; 25030 mutex_exit(SD_MUTEX(un)); 25031 SD_INFO(SD_LOG_IOCTL_MHD, un, 25032 "sd_resv_reclaim_thread: Failed " 25033 "Reservation Recovery\n"); 25034 } 25035 } else { 25036 mutex_exit(SD_MUTEX(un)); 25037 } 25038 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25039 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25040 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25041 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25042 /* 25043 * wakeup the destroy thread if anyone is waiting on 25044 * us to complete. 25045 */ 25046 cv_signal(&sd_tr.srq_inprocess_cv); 25047 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25048 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25049 } 25050 25051 /* 25052 * cleanup the sd_tr structure now that this thread will not exist 25053 */ 25054 ASSERT(sd_tr.srq_thr_req_head == NULL); 25055 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25056 sd_tr.srq_resv_reclaim_thread = NULL; 25057 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25058 thread_exit(); 25059 } 25060 25061 25062 /* 25063 * Function: sd_rmv_resv_reclaim_req() 25064 * 25065 * Description: This function removes any pending reservation reclaim requests 25066 * for the specified device. 25067 * 25068 * Arguments: dev - the device 'dev_t' 25069 */ 25070 25071 static void 25072 sd_rmv_resv_reclaim_req(dev_t dev) 25073 { 25074 struct sd_thr_request *sd_mhreq; 25075 struct sd_thr_request *sd_prev; 25076 25077 /* Remove a reservation reclaim request from the list */ 25078 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25079 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25080 /* 25081 * We are attempting to reinstate reservation for 25082 * this device. We wait for sd_reserve_release() 25083 * to return before we return. 25084 */ 25085 cv_wait(&sd_tr.srq_inprocess_cv, 25086 &sd_tr.srq_resv_reclaim_mutex); 25087 } else { 25088 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25089 if (sd_mhreq && sd_mhreq->dev == dev) { 25090 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25091 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25092 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25093 return; 25094 } 25095 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25096 if (sd_mhreq && sd_mhreq->dev == dev) { 25097 break; 25098 } 25099 sd_prev = sd_mhreq; 25100 } 25101 if (sd_mhreq != NULL) { 25102 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25103 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25104 } 25105 } 25106 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25107 } 25108 25109 25110 /* 25111 * Function: sd_mhd_reset_notify_cb() 25112 * 25113 * Description: This is a call back function for scsi_reset_notify. This 25114 * function updates the softstate reserved status and logs the 25115 * reset. The driver scsi watch facility callback function 25116 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25117 * will reclaim the reservation. 25118 * 25119 * Arguments: arg - driver soft state (unit) structure 25120 */ 25121 25122 static void 25123 sd_mhd_reset_notify_cb(caddr_t arg) 25124 { 25125 struct sd_lun *un = (struct sd_lun *)arg; 25126 25127 mutex_enter(SD_MUTEX(un)); 25128 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25129 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25130 SD_INFO(SD_LOG_IOCTL_MHD, un, 25131 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25132 } 25133 mutex_exit(SD_MUTEX(un)); 25134 } 25135 25136 25137 /* 25138 * Function: sd_take_ownership() 25139 * 25140 * Description: This routine implements an algorithm to achieve a stable 25141 * reservation on disks which don't implement priority reserve, 25142 * and makes sure that other host lose re-reservation attempts. 25143 * This algorithm contains of a loop that keeps issuing the RESERVE 25144 * for some period of time (min_ownership_delay, default 6 seconds) 25145 * During that loop, it looks to see if there has been a bus device 25146 * reset or bus reset (both of which cause an existing reservation 25147 * to be lost). If the reservation is lost issue RESERVE until a 25148 * period of min_ownership_delay with no resets has gone by, or 25149 * until max_ownership_delay has expired. This loop ensures that 25150 * the host really did manage to reserve the device, in spite of 25151 * resets. The looping for min_ownership_delay (default six 25152 * seconds) is important to early generation clustering products, 25153 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25154 * MHIOCENFAILFAST periodic timer of two seconds. By having 25155 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25156 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25157 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25158 * have already noticed, via the MHIOCENFAILFAST polling, that it 25159 * no longer "owns" the disk and will have panicked itself. Thus, 25160 * the host issuing the MHIOCTKOWN is assured (with timing 25161 * dependencies) that by the time it actually starts to use the 25162 * disk for real work, the old owner is no longer accessing it. 25163 * 25164 * min_ownership_delay is the minimum amount of time for which the 25165 * disk must be reserved continuously devoid of resets before the 25166 * MHIOCTKOWN ioctl will return success. 25167 * 25168 * max_ownership_delay indicates the amount of time by which the 25169 * take ownership should succeed or timeout with an error. 25170 * 25171 * Arguments: dev - the device 'dev_t' 25172 * *p - struct containing timing info. 25173 * 25174 * Return Code: 0 for success or error code 25175 */ 25176 25177 static int 25178 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25179 { 25180 struct sd_lun *un; 25181 int rval; 25182 int err; 25183 int reservation_count = 0; 25184 int min_ownership_delay = 6000000; /* in usec */ 25185 int max_ownership_delay = 30000000; /* in usec */ 25186 clock_t start_time; /* starting time of this algorithm */ 25187 clock_t end_time; /* time limit for giving up */ 25188 clock_t ownership_time; /* time limit for stable ownership */ 25189 clock_t current_time; 25190 clock_t previous_current_time; 25191 25192 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25193 return (ENXIO); 25194 } 25195 25196 /* 25197 * Attempt a device reservation. A priority reservation is requested. 25198 */ 25199 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25200 != SD_SUCCESS) { 25201 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25202 "sd_take_ownership: return(1)=%d\n", rval); 25203 return (rval); 25204 } 25205 25206 /* Update the softstate reserved status to indicate the reservation */ 25207 mutex_enter(SD_MUTEX(un)); 25208 un->un_resvd_status |= SD_RESERVE; 25209 un->un_resvd_status &= 25210 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25211 mutex_exit(SD_MUTEX(un)); 25212 25213 if (p != NULL) { 25214 if (p->min_ownership_delay != 0) { 25215 min_ownership_delay = p->min_ownership_delay * 1000; 25216 } 25217 if (p->max_ownership_delay != 0) { 25218 max_ownership_delay = p->max_ownership_delay * 1000; 25219 } 25220 } 25221 SD_INFO(SD_LOG_IOCTL_MHD, un, 25222 "sd_take_ownership: min, max delays: %d, %d\n", 25223 min_ownership_delay, max_ownership_delay); 25224 25225 start_time = ddi_get_lbolt(); 25226 current_time = start_time; 25227 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25228 end_time = start_time + drv_usectohz(max_ownership_delay); 25229 25230 while (current_time - end_time < 0) { 25231 delay(drv_usectohz(500000)); 25232 25233 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25234 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25235 mutex_enter(SD_MUTEX(un)); 25236 rval = (un->un_resvd_status & 25237 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25238 mutex_exit(SD_MUTEX(un)); 25239 break; 25240 } 25241 } 25242 previous_current_time = current_time; 25243 current_time = ddi_get_lbolt(); 25244 mutex_enter(SD_MUTEX(un)); 25245 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25246 ownership_time = ddi_get_lbolt() + 25247 drv_usectohz(min_ownership_delay); 25248 reservation_count = 0; 25249 } else { 25250 reservation_count++; 25251 } 25252 un->un_resvd_status |= SD_RESERVE; 25253 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25254 mutex_exit(SD_MUTEX(un)); 25255 25256 SD_INFO(SD_LOG_IOCTL_MHD, un, 25257 "sd_take_ownership: ticks for loop iteration=%ld, " 25258 "reservation=%s\n", (current_time - previous_current_time), 25259 reservation_count ? "ok" : "reclaimed"); 25260 25261 if (current_time - ownership_time >= 0 && 25262 reservation_count >= 4) { 25263 rval = 0; /* Achieved a stable ownership */ 25264 break; 25265 } 25266 if (current_time - end_time >= 0) { 25267 rval = EACCES; /* No ownership in max possible time */ 25268 break; 25269 } 25270 } 25271 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25272 "sd_take_ownership: return(2)=%d\n", rval); 25273 return (rval); 25274 } 25275 25276 25277 /* 25278 * Function: sd_reserve_release() 25279 * 25280 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25281 * PRIORITY RESERVE commands based on a user specified command type 25282 * 25283 * Arguments: dev - the device 'dev_t' 25284 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25285 * SD_RESERVE, SD_RELEASE 25286 * 25287 * Return Code: 0 or Error Code 25288 */ 25289 25290 static int 25291 sd_reserve_release(dev_t dev, int cmd) 25292 { 25293 struct uscsi_cmd *com = NULL; 25294 struct sd_lun *un = NULL; 25295 char cdb[CDB_GROUP0]; 25296 int rval; 25297 25298 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25299 (cmd == SD_PRIORITY_RESERVE)); 25300 25301 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25302 return (ENXIO); 25303 } 25304 25305 /* instantiate and initialize the command and cdb */ 25306 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25307 bzero(cdb, CDB_GROUP0); 25308 com->uscsi_flags = USCSI_SILENT; 25309 com->uscsi_timeout = un->un_reserve_release_time; 25310 com->uscsi_cdblen = CDB_GROUP0; 25311 com->uscsi_cdb = cdb; 25312 if (cmd == SD_RELEASE) { 25313 cdb[0] = SCMD_RELEASE; 25314 } else { 25315 cdb[0] = SCMD_RESERVE; 25316 } 25317 25318 /* Send the command. */ 25319 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25320 UIO_SYSSPACE, SD_PATH_STANDARD); 25321 25322 /* 25323 * "break" a reservation that is held by another host, by issuing a 25324 * reset if priority reserve is desired, and we could not get the 25325 * device. 25326 */ 25327 if ((cmd == SD_PRIORITY_RESERVE) && 25328 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25329 /* 25330 * First try to reset the LUN. If we cannot, then try a target 25331 * reset, followed by a bus reset if the target reset fails. 25332 */ 25333 int reset_retval = 0; 25334 if (un->un_f_lun_reset_enabled == TRUE) { 25335 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25336 } 25337 if (reset_retval == 0) { 25338 /* The LUN reset either failed or was not issued */ 25339 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25340 } 25341 if ((reset_retval == 0) && 25342 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25343 rval = EIO; 25344 kmem_free(com, sizeof (*com)); 25345 return (rval); 25346 } 25347 25348 bzero(com, sizeof (struct uscsi_cmd)); 25349 com->uscsi_flags = USCSI_SILENT; 25350 com->uscsi_cdb = cdb; 25351 com->uscsi_cdblen = CDB_GROUP0; 25352 com->uscsi_timeout = 5; 25353 25354 /* 25355 * Reissue the last reserve command, this time without request 25356 * sense. Assume that it is just a regular reserve command. 25357 */ 25358 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25359 UIO_SYSSPACE, SD_PATH_STANDARD); 25360 } 25361 25362 /* Return an error if still getting a reservation conflict. */ 25363 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25364 rval = EACCES; 25365 } 25366 25367 kmem_free(com, sizeof (*com)); 25368 return (rval); 25369 } 25370 25371 25372 #define SD_NDUMP_RETRIES 12 25373 /* 25374 * System Crash Dump routine 25375 */ 25376 25377 static int 25378 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25379 { 25380 int instance; 25381 int partition; 25382 int i; 25383 int err; 25384 struct sd_lun *un; 25385 struct dk_map *lp; 25386 struct scsi_pkt *wr_pktp; 25387 struct buf *wr_bp; 25388 struct buf wr_buf; 25389 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25390 daddr_t tgt_blkno; /* rmw - blkno for target */ 25391 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25392 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25393 size_t io_start_offset; 25394 int doing_rmw = FALSE; 25395 int rval; 25396 #if defined(__i386) || defined(__amd64) 25397 ssize_t dma_resid; 25398 daddr_t oblkno; 25399 #endif 25400 25401 instance = SDUNIT(dev); 25402 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25403 (!un->un_f_geometry_is_valid) || ISCD(un)) { 25404 return (ENXIO); 25405 } 25406 25407 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25408 25409 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25410 25411 partition = SDPART(dev); 25412 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25413 25414 /* Validate blocks to dump at against partition size. */ 25415 lp = &un->un_map[partition]; 25416 if ((blkno + nblk) > lp->dkl_nblk) { 25417 SD_TRACE(SD_LOG_DUMP, un, 25418 "sddump: dump range larger than partition: " 25419 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25420 blkno, nblk, lp->dkl_nblk); 25421 return (EINVAL); 25422 } 25423 25424 mutex_enter(&un->un_pm_mutex); 25425 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25426 struct scsi_pkt *start_pktp; 25427 25428 mutex_exit(&un->un_pm_mutex); 25429 25430 /* 25431 * use pm framework to power on HBA 1st 25432 */ 25433 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25434 25435 /* 25436 * Dump no long uses sdpower to power on a device, it's 25437 * in-line here so it can be done in polled mode. 25438 */ 25439 25440 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25441 25442 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25443 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25444 25445 if (start_pktp == NULL) { 25446 /* We were not given a SCSI packet, fail. */ 25447 return (EIO); 25448 } 25449 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25450 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25451 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25452 start_pktp->pkt_flags = FLAG_NOINTR; 25453 25454 mutex_enter(SD_MUTEX(un)); 25455 sd_fill_scsi1_lun(un, start_pktp); 25456 mutex_exit(SD_MUTEX(un)); 25457 /* 25458 * Scsi_poll returns 0 (success) if the command completes and 25459 * the status block is STATUS_GOOD. 25460 */ 25461 if (sd_scsi_poll(un, start_pktp) != 0) { 25462 scsi_destroy_pkt(start_pktp); 25463 return (EIO); 25464 } 25465 scsi_destroy_pkt(start_pktp); 25466 (void) sd_ddi_pm_resume(un); 25467 } else { 25468 mutex_exit(&un->un_pm_mutex); 25469 } 25470 25471 mutex_enter(SD_MUTEX(un)); 25472 un->un_throttle = 0; 25473 25474 /* 25475 * The first time through, reset the specific target device. 25476 * However, when cpr calls sddump we know that sd is in a 25477 * a good state so no bus reset is required. 25478 * Clear sense data via Request Sense cmd. 25479 * In sddump we don't care about allow_bus_device_reset anymore 25480 */ 25481 25482 if ((un->un_state != SD_STATE_SUSPENDED) && 25483 (un->un_state != SD_STATE_DUMPING)) { 25484 25485 New_state(un, SD_STATE_DUMPING); 25486 25487 if (un->un_f_is_fibre == FALSE) { 25488 mutex_exit(SD_MUTEX(un)); 25489 /* 25490 * Attempt a bus reset for parallel scsi. 25491 * 25492 * Note: A bus reset is required because on some host 25493 * systems (i.e. E420R) a bus device reset is 25494 * insufficient to reset the state of the target. 25495 * 25496 * Note: Don't issue the reset for fibre-channel, 25497 * because this tends to hang the bus (loop) for 25498 * too long while everyone is logging out and in 25499 * and the deadman timer for dumping will fire 25500 * before the dump is complete. 25501 */ 25502 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25503 mutex_enter(SD_MUTEX(un)); 25504 Restore_state(un); 25505 mutex_exit(SD_MUTEX(un)); 25506 return (EIO); 25507 } 25508 25509 /* Delay to give the device some recovery time. */ 25510 drv_usecwait(10000); 25511 25512 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25513 SD_INFO(SD_LOG_DUMP, un, 25514 "sddump: sd_send_polled_RQS failed\n"); 25515 } 25516 mutex_enter(SD_MUTEX(un)); 25517 } 25518 } 25519 25520 /* 25521 * Convert the partition-relative block number to a 25522 * disk physical block number. 25523 */ 25524 blkno += un->un_offset[partition]; 25525 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25526 25527 25528 /* 25529 * Check if the device has a non-512 block size. 25530 */ 25531 wr_bp = NULL; 25532 if (NOT_DEVBSIZE(un)) { 25533 tgt_byte_offset = blkno * un->un_sys_blocksize; 25534 tgt_byte_count = nblk * un->un_sys_blocksize; 25535 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25536 (tgt_byte_count % un->un_tgt_blocksize)) { 25537 doing_rmw = TRUE; 25538 /* 25539 * Calculate the block number and number of block 25540 * in terms of the media block size. 25541 */ 25542 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25543 tgt_nblk = 25544 ((tgt_byte_offset + tgt_byte_count + 25545 (un->un_tgt_blocksize - 1)) / 25546 un->un_tgt_blocksize) - tgt_blkno; 25547 25548 /* 25549 * Invoke the routine which is going to do read part 25550 * of read-modify-write. 25551 * Note that this routine returns a pointer to 25552 * a valid bp in wr_bp. 25553 */ 25554 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25555 &wr_bp); 25556 if (err) { 25557 mutex_exit(SD_MUTEX(un)); 25558 return (err); 25559 } 25560 /* 25561 * Offset is being calculated as - 25562 * (original block # * system block size) - 25563 * (new block # * target block size) 25564 */ 25565 io_start_offset = 25566 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25567 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25568 25569 ASSERT((io_start_offset >= 0) && 25570 (io_start_offset < un->un_tgt_blocksize)); 25571 /* 25572 * Do the modify portion of read modify write. 25573 */ 25574 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25575 (size_t)nblk * un->un_sys_blocksize); 25576 } else { 25577 doing_rmw = FALSE; 25578 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25579 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25580 } 25581 25582 /* Convert blkno and nblk to target blocks */ 25583 blkno = tgt_blkno; 25584 nblk = tgt_nblk; 25585 } else { 25586 wr_bp = &wr_buf; 25587 bzero(wr_bp, sizeof (struct buf)); 25588 wr_bp->b_flags = B_BUSY; 25589 wr_bp->b_un.b_addr = addr; 25590 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25591 wr_bp->b_resid = 0; 25592 } 25593 25594 mutex_exit(SD_MUTEX(un)); 25595 25596 /* 25597 * Obtain a SCSI packet for the write command. 25598 * It should be safe to call the allocator here without 25599 * worrying about being locked for DVMA mapping because 25600 * the address we're passed is already a DVMA mapping 25601 * 25602 * We are also not going to worry about semaphore ownership 25603 * in the dump buffer. Dumping is single threaded at present. 25604 */ 25605 25606 wr_pktp = NULL; 25607 25608 #if defined(__i386) || defined(__amd64) 25609 dma_resid = wr_bp->b_bcount; 25610 oblkno = blkno; 25611 while (dma_resid != 0) { 25612 #endif 25613 25614 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25615 wr_bp->b_flags &= ~B_ERROR; 25616 25617 #if defined(__i386) || defined(__amd64) 25618 blkno = oblkno + 25619 ((wr_bp->b_bcount - dma_resid) / 25620 un->un_tgt_blocksize); 25621 nblk = dma_resid / un->un_tgt_blocksize; 25622 25623 if (wr_pktp) { 25624 /* Partial DMA transfers after initial transfer */ 25625 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25626 blkno, nblk); 25627 } else { 25628 /* Initial transfer */ 25629 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25630 un->un_pkt_flags, NULL_FUNC, NULL, 25631 blkno, nblk); 25632 } 25633 #else 25634 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25635 0, NULL_FUNC, NULL, blkno, nblk); 25636 #endif 25637 25638 if (rval == 0) { 25639 /* We were given a SCSI packet, continue. */ 25640 break; 25641 } 25642 25643 if (i == 0) { 25644 if (wr_bp->b_flags & B_ERROR) { 25645 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25646 "no resources for dumping; " 25647 "error code: 0x%x, retrying", 25648 geterror(wr_bp)); 25649 } else { 25650 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25651 "no resources for dumping; retrying"); 25652 } 25653 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25654 if (wr_bp->b_flags & B_ERROR) { 25655 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25656 "no resources for dumping; error code: " 25657 "0x%x, retrying\n", geterror(wr_bp)); 25658 } 25659 } else { 25660 if (wr_bp->b_flags & B_ERROR) { 25661 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25662 "no resources for dumping; " 25663 "error code: 0x%x, retries failed, " 25664 "giving up.\n", geterror(wr_bp)); 25665 } else { 25666 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25667 "no resources for dumping; " 25668 "retries failed, giving up.\n"); 25669 } 25670 mutex_enter(SD_MUTEX(un)); 25671 Restore_state(un); 25672 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25673 mutex_exit(SD_MUTEX(un)); 25674 scsi_free_consistent_buf(wr_bp); 25675 } else { 25676 mutex_exit(SD_MUTEX(un)); 25677 } 25678 return (EIO); 25679 } 25680 drv_usecwait(10000); 25681 } 25682 25683 #if defined(__i386) || defined(__amd64) 25684 /* 25685 * save the resid from PARTIAL_DMA 25686 */ 25687 dma_resid = wr_pktp->pkt_resid; 25688 if (dma_resid != 0) 25689 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25690 wr_pktp->pkt_resid = 0; 25691 #endif 25692 25693 /* SunBug 1222170 */ 25694 wr_pktp->pkt_flags = FLAG_NOINTR; 25695 25696 err = EIO; 25697 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25698 25699 /* 25700 * Scsi_poll returns 0 (success) if the command completes and 25701 * the status block is STATUS_GOOD. We should only check 25702 * errors if this condition is not true. Even then we should 25703 * send our own request sense packet only if we have a check 25704 * condition and auto request sense has not been performed by 25705 * the hba. 25706 */ 25707 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25708 25709 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25710 (wr_pktp->pkt_resid == 0)) { 25711 err = SD_SUCCESS; 25712 break; 25713 } 25714 25715 /* 25716 * Check CMD_DEV_GONE 1st, give up if device is gone. 25717 */ 25718 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25719 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25720 "Device is gone\n"); 25721 break; 25722 } 25723 25724 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25725 SD_INFO(SD_LOG_DUMP, un, 25726 "sddump: write failed with CHECK, try # %d\n", i); 25727 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25728 (void) sd_send_polled_RQS(un); 25729 } 25730 25731 continue; 25732 } 25733 25734 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25735 int reset_retval = 0; 25736 25737 SD_INFO(SD_LOG_DUMP, un, 25738 "sddump: write failed with BUSY, try # %d\n", i); 25739 25740 if (un->un_f_lun_reset_enabled == TRUE) { 25741 reset_retval = scsi_reset(SD_ADDRESS(un), 25742 RESET_LUN); 25743 } 25744 if (reset_retval == 0) { 25745 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25746 } 25747 (void) sd_send_polled_RQS(un); 25748 25749 } else { 25750 SD_INFO(SD_LOG_DUMP, un, 25751 "sddump: write failed with 0x%x, try # %d\n", 25752 SD_GET_PKT_STATUS(wr_pktp), i); 25753 mutex_enter(SD_MUTEX(un)); 25754 sd_reset_target(un, wr_pktp); 25755 mutex_exit(SD_MUTEX(un)); 25756 } 25757 25758 /* 25759 * If we are not getting anywhere with lun/target resets, 25760 * let's reset the bus. 25761 */ 25762 if (i == SD_NDUMP_RETRIES/2) { 25763 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25764 (void) sd_send_polled_RQS(un); 25765 } 25766 25767 } 25768 #if defined(__i386) || defined(__amd64) 25769 } /* dma_resid */ 25770 #endif 25771 25772 scsi_destroy_pkt(wr_pktp); 25773 mutex_enter(SD_MUTEX(un)); 25774 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25775 mutex_exit(SD_MUTEX(un)); 25776 scsi_free_consistent_buf(wr_bp); 25777 } else { 25778 mutex_exit(SD_MUTEX(un)); 25779 } 25780 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25781 return (err); 25782 } 25783 25784 /* 25785 * Function: sd_scsi_poll() 25786 * 25787 * Description: This is a wrapper for the scsi_poll call. 25788 * 25789 * Arguments: sd_lun - The unit structure 25790 * scsi_pkt - The scsi packet being sent to the device. 25791 * 25792 * Return Code: 0 - Command completed successfully with good status 25793 * -1 - Command failed. This could indicate a check condition 25794 * or other status value requiring recovery action. 25795 * 25796 */ 25797 25798 static int 25799 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25800 { 25801 int status; 25802 25803 ASSERT(un != NULL); 25804 ASSERT(!mutex_owned(SD_MUTEX(un))); 25805 ASSERT(pktp != NULL); 25806 25807 status = SD_SUCCESS; 25808 25809 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25810 pktp->pkt_flags |= un->un_tagflags; 25811 pktp->pkt_flags &= ~FLAG_NODISCON; 25812 } 25813 25814 status = sd_ddi_scsi_poll(pktp); 25815 /* 25816 * Scsi_poll returns 0 (success) if the command completes and the 25817 * status block is STATUS_GOOD. We should only check errors if this 25818 * condition is not true. Even then we should send our own request 25819 * sense packet only if we have a check condition and auto 25820 * request sense has not been performed by the hba. 25821 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25822 */ 25823 if ((status != SD_SUCCESS) && 25824 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25825 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25826 (pktp->pkt_reason != CMD_DEV_GONE)) 25827 (void) sd_send_polled_RQS(un); 25828 25829 return (status); 25830 } 25831 25832 /* 25833 * Function: sd_send_polled_RQS() 25834 * 25835 * Description: This sends the request sense command to a device. 25836 * 25837 * Arguments: sd_lun - The unit structure 25838 * 25839 * Return Code: 0 - Command completed successfully with good status 25840 * -1 - Command failed. 25841 * 25842 */ 25843 25844 static int 25845 sd_send_polled_RQS(struct sd_lun *un) 25846 { 25847 int ret_val; 25848 struct scsi_pkt *rqs_pktp; 25849 struct buf *rqs_bp; 25850 25851 ASSERT(un != NULL); 25852 ASSERT(!mutex_owned(SD_MUTEX(un))); 25853 25854 ret_val = SD_SUCCESS; 25855 25856 rqs_pktp = un->un_rqs_pktp; 25857 rqs_bp = un->un_rqs_bp; 25858 25859 mutex_enter(SD_MUTEX(un)); 25860 25861 if (un->un_sense_isbusy) { 25862 ret_val = SD_FAILURE; 25863 mutex_exit(SD_MUTEX(un)); 25864 return (ret_val); 25865 } 25866 25867 /* 25868 * If the request sense buffer (and packet) is not in use, 25869 * let's set the un_sense_isbusy and send our packet 25870 */ 25871 un->un_sense_isbusy = 1; 25872 rqs_pktp->pkt_resid = 0; 25873 rqs_pktp->pkt_reason = 0; 25874 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25875 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25876 25877 mutex_exit(SD_MUTEX(un)); 25878 25879 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25880 " 0x%p\n", rqs_bp->b_un.b_addr); 25881 25882 /* 25883 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25884 * axle - it has a call into us! 25885 */ 25886 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25887 SD_INFO(SD_LOG_COMMON, un, 25888 "sd_send_polled_RQS: RQS failed\n"); 25889 } 25890 25891 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25892 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25893 25894 mutex_enter(SD_MUTEX(un)); 25895 un->un_sense_isbusy = 0; 25896 mutex_exit(SD_MUTEX(un)); 25897 25898 return (ret_val); 25899 } 25900 25901 /* 25902 * Defines needed for localized version of the scsi_poll routine. 25903 */ 25904 #define SD_CSEC 10000 /* usecs */ 25905 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 25906 25907 25908 /* 25909 * Function: sd_ddi_scsi_poll() 25910 * 25911 * Description: Localized version of the scsi_poll routine. The purpose is to 25912 * send a scsi_pkt to a device as a polled command. This version 25913 * is to ensure more robust handling of transport errors. 25914 * Specifically this routine cures not ready, coming ready 25915 * transition for power up and reset of sonoma's. This can take 25916 * up to 45 seconds for power-on and 20 seconds for reset of a 25917 * sonoma lun. 25918 * 25919 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25920 * 25921 * Return Code: 0 - Command completed successfully with good status 25922 * -1 - Command failed. 25923 * 25924 */ 25925 25926 static int 25927 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25928 { 25929 int busy_count; 25930 int timeout; 25931 int rval = SD_FAILURE; 25932 int savef; 25933 struct scsi_extended_sense *sensep; 25934 long savet; 25935 void (*savec)(); 25936 /* 25937 * The following is defined in machdep.c and is used in determining if 25938 * the scsi transport system will do polled I/O instead of interrupt 25939 * I/O when called from xx_dump(). 25940 */ 25941 extern int do_polled_io; 25942 25943 /* 25944 * save old flags in pkt, to restore at end 25945 */ 25946 savef = pkt->pkt_flags; 25947 savec = pkt->pkt_comp; 25948 savet = pkt->pkt_time; 25949 25950 pkt->pkt_flags |= FLAG_NOINTR; 25951 25952 /* 25953 * XXX there is nothing in the SCSA spec that states that we should not 25954 * do a callback for polled cmds; however, removing this will break sd 25955 * and probably other target drivers 25956 */ 25957 pkt->pkt_comp = NULL; 25958 25959 /* 25960 * we don't like a polled command without timeout. 25961 * 60 seconds seems long enough. 25962 */ 25963 if (pkt->pkt_time == 0) { 25964 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25965 } 25966 25967 /* 25968 * Send polled cmd. 25969 * 25970 * We do some error recovery for various errors. Tran_busy, 25971 * queue full, and non-dispatched commands are retried every 10 msec. 25972 * as they are typically transient failures. Busy status and Not 25973 * Ready are retried every second as this status takes a while to 25974 * change. Unit attention is retried for pkt_time (60) times 25975 * with no delay. 25976 */ 25977 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 25978 25979 for (busy_count = 0; busy_count < timeout; busy_count++) { 25980 int rc; 25981 int poll_delay; 25982 25983 /* 25984 * Initialize pkt status variables. 25985 */ 25986 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 25987 25988 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 25989 if (rc != TRAN_BUSY) { 25990 /* Transport failed - give up. */ 25991 break; 25992 } else { 25993 /* Transport busy - try again. */ 25994 poll_delay = 1 * SD_CSEC; /* 10 msec */ 25995 } 25996 } else { 25997 /* 25998 * Transport accepted - check pkt status. 25999 */ 26000 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26001 if (pkt->pkt_reason == CMD_CMPLT && 26002 rc == STATUS_CHECK && 26003 pkt->pkt_state & STATE_ARQ_DONE) { 26004 struct scsi_arq_status *arqstat = 26005 (struct scsi_arq_status *)(pkt->pkt_scbp); 26006 26007 sensep = &arqstat->sts_sensedata; 26008 } else { 26009 sensep = NULL; 26010 } 26011 26012 if ((pkt->pkt_reason == CMD_CMPLT) && 26013 (rc == STATUS_GOOD)) { 26014 /* No error - we're done */ 26015 rval = SD_SUCCESS; 26016 break; 26017 26018 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26019 /* Lost connection - give up */ 26020 break; 26021 26022 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26023 (pkt->pkt_state == 0)) { 26024 /* Pkt not dispatched - try again. */ 26025 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26026 26027 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26028 (rc == STATUS_QFULL)) { 26029 /* Queue full - try again. */ 26030 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26031 26032 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26033 (rc == STATUS_BUSY)) { 26034 /* Busy - try again. */ 26035 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26036 busy_count += (SD_SEC_TO_CSEC - 1); 26037 26038 } else if ((sensep != NULL) && 26039 (sensep->es_key == KEY_UNIT_ATTENTION)) { 26040 /* Unit Attention - try again */ 26041 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 26042 continue; 26043 26044 } else if ((sensep != NULL) && 26045 (sensep->es_key == KEY_NOT_READY) && 26046 (sensep->es_add_code == 0x04) && 26047 (sensep->es_qual_code == 0x01)) { 26048 /* Not ready -> ready - try again. */ 26049 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26050 busy_count += (SD_SEC_TO_CSEC - 1); 26051 26052 } else { 26053 /* BAD status - give up. */ 26054 break; 26055 } 26056 } 26057 26058 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 26059 !do_polled_io) { 26060 delay(drv_usectohz(poll_delay)); 26061 } else { 26062 /* we busy wait during cpr_dump or interrupt threads */ 26063 drv_usecwait(poll_delay); 26064 } 26065 } 26066 26067 pkt->pkt_flags = savef; 26068 pkt->pkt_comp = savec; 26069 pkt->pkt_time = savet; 26070 return (rval); 26071 } 26072 26073 26074 /* 26075 * Function: sd_persistent_reservation_in_read_keys 26076 * 26077 * Description: This routine is the driver entry point for handling CD-ROM 26078 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26079 * by sending the SCSI-3 PRIN commands to the device. 26080 * Processes the read keys command response by copying the 26081 * reservation key information into the user provided buffer. 26082 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26083 * 26084 * Arguments: un - Pointer to soft state struct for the target. 26085 * usrp - user provided pointer to multihost Persistent In Read 26086 * Keys structure (mhioc_inkeys_t) 26087 * flag - this argument is a pass through to ddi_copyxxx() 26088 * directly from the mode argument of ioctl(). 26089 * 26090 * Return Code: 0 - Success 26091 * EACCES 26092 * ENOTSUP 26093 * errno return code from sd_send_scsi_cmd() 26094 * 26095 * Context: Can sleep. Does not return until command is completed. 26096 */ 26097 26098 static int 26099 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26100 mhioc_inkeys_t *usrp, int flag) 26101 { 26102 #ifdef _MULTI_DATAMODEL 26103 struct mhioc_key_list32 li32; 26104 #endif 26105 sd_prin_readkeys_t *in; 26106 mhioc_inkeys_t *ptr; 26107 mhioc_key_list_t li; 26108 uchar_t *data_bufp; 26109 int data_len; 26110 int rval; 26111 size_t copysz; 26112 26113 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26114 return (EINVAL); 26115 } 26116 bzero(&li, sizeof (mhioc_key_list_t)); 26117 26118 /* 26119 * Get the listsize from user 26120 */ 26121 #ifdef _MULTI_DATAMODEL 26122 26123 switch (ddi_model_convert_from(flag & FMODELS)) { 26124 case DDI_MODEL_ILP32: 26125 copysz = sizeof (struct mhioc_key_list32); 26126 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26127 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26128 "sd_persistent_reservation_in_read_keys: " 26129 "failed ddi_copyin: mhioc_key_list32_t\n"); 26130 rval = EFAULT; 26131 goto done; 26132 } 26133 li.listsize = li32.listsize; 26134 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26135 break; 26136 26137 case DDI_MODEL_NONE: 26138 copysz = sizeof (mhioc_key_list_t); 26139 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26140 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26141 "sd_persistent_reservation_in_read_keys: " 26142 "failed ddi_copyin: mhioc_key_list_t\n"); 26143 rval = EFAULT; 26144 goto done; 26145 } 26146 break; 26147 } 26148 26149 #else /* ! _MULTI_DATAMODEL */ 26150 copysz = sizeof (mhioc_key_list_t); 26151 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26152 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26153 "sd_persistent_reservation_in_read_keys: " 26154 "failed ddi_copyin: mhioc_key_list_t\n"); 26155 rval = EFAULT; 26156 goto done; 26157 } 26158 #endif 26159 26160 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26161 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26162 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26163 26164 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 26165 data_len, data_bufp)) != 0) { 26166 goto done; 26167 } 26168 in = (sd_prin_readkeys_t *)data_bufp; 26169 ptr->generation = BE_32(in->generation); 26170 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26171 26172 /* 26173 * Return the min(listsize, listlen) keys 26174 */ 26175 #ifdef _MULTI_DATAMODEL 26176 26177 switch (ddi_model_convert_from(flag & FMODELS)) { 26178 case DDI_MODEL_ILP32: 26179 li32.listlen = li.listlen; 26180 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26181 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26182 "sd_persistent_reservation_in_read_keys: " 26183 "failed ddi_copyout: mhioc_key_list32_t\n"); 26184 rval = EFAULT; 26185 goto done; 26186 } 26187 break; 26188 26189 case DDI_MODEL_NONE: 26190 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26191 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26192 "sd_persistent_reservation_in_read_keys: " 26193 "failed ddi_copyout: mhioc_key_list_t\n"); 26194 rval = EFAULT; 26195 goto done; 26196 } 26197 break; 26198 } 26199 26200 #else /* ! _MULTI_DATAMODEL */ 26201 26202 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26203 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26204 "sd_persistent_reservation_in_read_keys: " 26205 "failed ddi_copyout: mhioc_key_list_t\n"); 26206 rval = EFAULT; 26207 goto done; 26208 } 26209 26210 #endif /* _MULTI_DATAMODEL */ 26211 26212 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26213 li.listsize * MHIOC_RESV_KEY_SIZE); 26214 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26215 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26216 "sd_persistent_reservation_in_read_keys: " 26217 "failed ddi_copyout: keylist\n"); 26218 rval = EFAULT; 26219 } 26220 done: 26221 kmem_free(data_bufp, data_len); 26222 return (rval); 26223 } 26224 26225 26226 /* 26227 * Function: sd_persistent_reservation_in_read_resv 26228 * 26229 * Description: This routine is the driver entry point for handling CD-ROM 26230 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26231 * by sending the SCSI-3 PRIN commands to the device. 26232 * Process the read persistent reservations command response by 26233 * copying the reservation information into the user provided 26234 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26235 * 26236 * Arguments: un - Pointer to soft state struct for the target. 26237 * usrp - user provided pointer to multihost Persistent In Read 26238 * Keys structure (mhioc_inkeys_t) 26239 * flag - this argument is a pass through to ddi_copyxxx() 26240 * directly from the mode argument of ioctl(). 26241 * 26242 * Return Code: 0 - Success 26243 * EACCES 26244 * ENOTSUP 26245 * errno return code from sd_send_scsi_cmd() 26246 * 26247 * Context: Can sleep. Does not return until command is completed. 26248 */ 26249 26250 static int 26251 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26252 mhioc_inresvs_t *usrp, int flag) 26253 { 26254 #ifdef _MULTI_DATAMODEL 26255 struct mhioc_resv_desc_list32 resvlist32; 26256 #endif 26257 sd_prin_readresv_t *in; 26258 mhioc_inresvs_t *ptr; 26259 sd_readresv_desc_t *readresv_ptr; 26260 mhioc_resv_desc_list_t resvlist; 26261 mhioc_resv_desc_t resvdesc; 26262 uchar_t *data_bufp; 26263 int data_len; 26264 int rval; 26265 int i; 26266 size_t copysz; 26267 mhioc_resv_desc_t *bufp; 26268 26269 if ((ptr = usrp) == NULL) { 26270 return (EINVAL); 26271 } 26272 26273 /* 26274 * Get the listsize from user 26275 */ 26276 #ifdef _MULTI_DATAMODEL 26277 switch (ddi_model_convert_from(flag & FMODELS)) { 26278 case DDI_MODEL_ILP32: 26279 copysz = sizeof (struct mhioc_resv_desc_list32); 26280 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26281 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26282 "sd_persistent_reservation_in_read_resv: " 26283 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26284 rval = EFAULT; 26285 goto done; 26286 } 26287 resvlist.listsize = resvlist32.listsize; 26288 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26289 break; 26290 26291 case DDI_MODEL_NONE: 26292 copysz = sizeof (mhioc_resv_desc_list_t); 26293 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26294 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26295 "sd_persistent_reservation_in_read_resv: " 26296 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26297 rval = EFAULT; 26298 goto done; 26299 } 26300 break; 26301 } 26302 #else /* ! _MULTI_DATAMODEL */ 26303 copysz = sizeof (mhioc_resv_desc_list_t); 26304 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26305 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26306 "sd_persistent_reservation_in_read_resv: " 26307 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26308 rval = EFAULT; 26309 goto done; 26310 } 26311 #endif /* ! _MULTI_DATAMODEL */ 26312 26313 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26314 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26315 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26316 26317 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 26318 data_len, data_bufp)) != 0) { 26319 goto done; 26320 } 26321 in = (sd_prin_readresv_t *)data_bufp; 26322 ptr->generation = BE_32(in->generation); 26323 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26324 26325 /* 26326 * Return the min(listsize, listlen( keys 26327 */ 26328 #ifdef _MULTI_DATAMODEL 26329 26330 switch (ddi_model_convert_from(flag & FMODELS)) { 26331 case DDI_MODEL_ILP32: 26332 resvlist32.listlen = resvlist.listlen; 26333 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26334 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26335 "sd_persistent_reservation_in_read_resv: " 26336 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26337 rval = EFAULT; 26338 goto done; 26339 } 26340 break; 26341 26342 case DDI_MODEL_NONE: 26343 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26344 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26345 "sd_persistent_reservation_in_read_resv: " 26346 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26347 rval = EFAULT; 26348 goto done; 26349 } 26350 break; 26351 } 26352 26353 #else /* ! _MULTI_DATAMODEL */ 26354 26355 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26356 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26357 "sd_persistent_reservation_in_read_resv: " 26358 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26359 rval = EFAULT; 26360 goto done; 26361 } 26362 26363 #endif /* ! _MULTI_DATAMODEL */ 26364 26365 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26366 bufp = resvlist.list; 26367 copysz = sizeof (mhioc_resv_desc_t); 26368 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26369 i++, readresv_ptr++, bufp++) { 26370 26371 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26372 MHIOC_RESV_KEY_SIZE); 26373 resvdesc.type = readresv_ptr->type; 26374 resvdesc.scope = readresv_ptr->scope; 26375 resvdesc.scope_specific_addr = 26376 BE_32(readresv_ptr->scope_specific_addr); 26377 26378 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26379 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26380 "sd_persistent_reservation_in_read_resv: " 26381 "failed ddi_copyout: resvlist\n"); 26382 rval = EFAULT; 26383 goto done; 26384 } 26385 } 26386 done: 26387 kmem_free(data_bufp, data_len); 26388 return (rval); 26389 } 26390 26391 26392 /* 26393 * Function: sr_change_blkmode() 26394 * 26395 * Description: This routine is the driver entry point for handling CD-ROM 26396 * block mode ioctl requests. Support for returning and changing 26397 * the current block size in use by the device is implemented. The 26398 * LBA size is changed via a MODE SELECT Block Descriptor. 26399 * 26400 * This routine issues a mode sense with an allocation length of 26401 * 12 bytes for the mode page header and a single block descriptor. 26402 * 26403 * Arguments: dev - the device 'dev_t' 26404 * cmd - the request type; one of CDROMGBLKMODE (get) or 26405 * CDROMSBLKMODE (set) 26406 * data - current block size or requested block size 26407 * flag - this argument is a pass through to ddi_copyxxx() directly 26408 * from the mode argument of ioctl(). 26409 * 26410 * Return Code: the code returned by sd_send_scsi_cmd() 26411 * EINVAL if invalid arguments are provided 26412 * EFAULT if ddi_copyxxx() fails 26413 * ENXIO if fail ddi_get_soft_state 26414 * EIO if invalid mode sense block descriptor length 26415 * 26416 */ 26417 26418 static int 26419 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26420 { 26421 struct sd_lun *un = NULL; 26422 struct mode_header *sense_mhp, *select_mhp; 26423 struct block_descriptor *sense_desc, *select_desc; 26424 int current_bsize; 26425 int rval = EINVAL; 26426 uchar_t *sense = NULL; 26427 uchar_t *select = NULL; 26428 26429 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26430 26431 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26432 return (ENXIO); 26433 } 26434 26435 /* 26436 * The block length is changed via the Mode Select block descriptor, the 26437 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26438 * required as part of this routine. Therefore the mode sense allocation 26439 * length is specified to be the length of a mode page header and a 26440 * block descriptor. 26441 */ 26442 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26443 26444 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26445 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 26446 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26447 "sr_change_blkmode: Mode Sense Failed\n"); 26448 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26449 return (rval); 26450 } 26451 26452 /* Check the block descriptor len to handle only 1 block descriptor */ 26453 sense_mhp = (struct mode_header *)sense; 26454 if ((sense_mhp->bdesc_length == 0) || 26455 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26456 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26457 "sr_change_blkmode: Mode Sense returned invalid block" 26458 " descriptor length\n"); 26459 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26460 return (EIO); 26461 } 26462 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26463 current_bsize = ((sense_desc->blksize_hi << 16) | 26464 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26465 26466 /* Process command */ 26467 switch (cmd) { 26468 case CDROMGBLKMODE: 26469 /* Return the block size obtained during the mode sense */ 26470 if (ddi_copyout(¤t_bsize, (void *)data, 26471 sizeof (int), flag) != 0) 26472 rval = EFAULT; 26473 break; 26474 case CDROMSBLKMODE: 26475 /* Validate the requested block size */ 26476 switch (data) { 26477 case CDROM_BLK_512: 26478 case CDROM_BLK_1024: 26479 case CDROM_BLK_2048: 26480 case CDROM_BLK_2056: 26481 case CDROM_BLK_2336: 26482 case CDROM_BLK_2340: 26483 case CDROM_BLK_2352: 26484 case CDROM_BLK_2368: 26485 case CDROM_BLK_2448: 26486 case CDROM_BLK_2646: 26487 case CDROM_BLK_2647: 26488 break; 26489 default: 26490 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26491 "sr_change_blkmode: " 26492 "Block Size '%ld' Not Supported\n", data); 26493 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26494 return (EINVAL); 26495 } 26496 26497 /* 26498 * The current block size matches the requested block size so 26499 * there is no need to send the mode select to change the size 26500 */ 26501 if (current_bsize == data) { 26502 break; 26503 } 26504 26505 /* Build the select data for the requested block size */ 26506 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26507 select_mhp = (struct mode_header *)select; 26508 select_desc = 26509 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26510 /* 26511 * The LBA size is changed via the block descriptor, so the 26512 * descriptor is built according to the user data 26513 */ 26514 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26515 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26516 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26517 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26518 26519 /* Send the mode select for the requested block size */ 26520 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26521 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26522 SD_PATH_STANDARD)) != 0) { 26523 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26524 "sr_change_blkmode: Mode Select Failed\n"); 26525 /* 26526 * The mode select failed for the requested block size, 26527 * so reset the data for the original block size and 26528 * send it to the target. The error is indicated by the 26529 * return value for the failed mode select. 26530 */ 26531 select_desc->blksize_hi = sense_desc->blksize_hi; 26532 select_desc->blksize_mid = sense_desc->blksize_mid; 26533 select_desc->blksize_lo = sense_desc->blksize_lo; 26534 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26535 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26536 SD_PATH_STANDARD); 26537 } else { 26538 ASSERT(!mutex_owned(SD_MUTEX(un))); 26539 mutex_enter(SD_MUTEX(un)); 26540 sd_update_block_info(un, (uint32_t)data, 0); 26541 26542 mutex_exit(SD_MUTEX(un)); 26543 } 26544 break; 26545 default: 26546 /* should not reach here, but check anyway */ 26547 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26548 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26549 rval = EINVAL; 26550 break; 26551 } 26552 26553 if (select) { 26554 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26555 } 26556 if (sense) { 26557 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26558 } 26559 return (rval); 26560 } 26561 26562 26563 /* 26564 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26565 * implement driver support for getting and setting the CD speed. The command 26566 * set used will be based on the device type. If the device has not been 26567 * identified as MMC the Toshiba vendor specific mode page will be used. If 26568 * the device is MMC but does not support the Real Time Streaming feature 26569 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26570 * be used to read the speed. 26571 */ 26572 26573 /* 26574 * Function: sr_change_speed() 26575 * 26576 * Description: This routine is the driver entry point for handling CD-ROM 26577 * drive speed ioctl requests for devices supporting the Toshiba 26578 * vendor specific drive speed mode page. Support for returning 26579 * and changing the current drive speed in use by the device is 26580 * implemented. 26581 * 26582 * Arguments: dev - the device 'dev_t' 26583 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26584 * CDROMSDRVSPEED (set) 26585 * data - current drive speed or requested drive speed 26586 * flag - this argument is a pass through to ddi_copyxxx() directly 26587 * from the mode argument of ioctl(). 26588 * 26589 * Return Code: the code returned by sd_send_scsi_cmd() 26590 * EINVAL if invalid arguments are provided 26591 * EFAULT if ddi_copyxxx() fails 26592 * ENXIO if fail ddi_get_soft_state 26593 * EIO if invalid mode sense block descriptor length 26594 */ 26595 26596 static int 26597 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26598 { 26599 struct sd_lun *un = NULL; 26600 struct mode_header *sense_mhp, *select_mhp; 26601 struct mode_speed *sense_page, *select_page; 26602 int current_speed; 26603 int rval = EINVAL; 26604 int bd_len; 26605 uchar_t *sense = NULL; 26606 uchar_t *select = NULL; 26607 26608 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26609 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26610 return (ENXIO); 26611 } 26612 26613 /* 26614 * Note: The drive speed is being modified here according to a Toshiba 26615 * vendor specific mode page (0x31). 26616 */ 26617 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26618 26619 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26620 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26621 SD_PATH_STANDARD)) != 0) { 26622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26623 "sr_change_speed: Mode Sense Failed\n"); 26624 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26625 return (rval); 26626 } 26627 sense_mhp = (struct mode_header *)sense; 26628 26629 /* Check the block descriptor len to handle only 1 block descriptor */ 26630 bd_len = sense_mhp->bdesc_length; 26631 if (bd_len > MODE_BLK_DESC_LENGTH) { 26632 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26633 "sr_change_speed: Mode Sense returned invalid block " 26634 "descriptor length\n"); 26635 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26636 return (EIO); 26637 } 26638 26639 sense_page = (struct mode_speed *) 26640 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26641 current_speed = sense_page->speed; 26642 26643 /* Process command */ 26644 switch (cmd) { 26645 case CDROMGDRVSPEED: 26646 /* Return the drive speed obtained during the mode sense */ 26647 if (current_speed == 0x2) { 26648 current_speed = CDROM_TWELVE_SPEED; 26649 } 26650 if (ddi_copyout(¤t_speed, (void *)data, 26651 sizeof (int), flag) != 0) { 26652 rval = EFAULT; 26653 } 26654 break; 26655 case CDROMSDRVSPEED: 26656 /* Validate the requested drive speed */ 26657 switch ((uchar_t)data) { 26658 case CDROM_TWELVE_SPEED: 26659 data = 0x2; 26660 /*FALLTHROUGH*/ 26661 case CDROM_NORMAL_SPEED: 26662 case CDROM_DOUBLE_SPEED: 26663 case CDROM_QUAD_SPEED: 26664 case CDROM_MAXIMUM_SPEED: 26665 break; 26666 default: 26667 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26668 "sr_change_speed: " 26669 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26670 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26671 return (EINVAL); 26672 } 26673 26674 /* 26675 * The current drive speed matches the requested drive speed so 26676 * there is no need to send the mode select to change the speed 26677 */ 26678 if (current_speed == data) { 26679 break; 26680 } 26681 26682 /* Build the select data for the requested drive speed */ 26683 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26684 select_mhp = (struct mode_header *)select; 26685 select_mhp->bdesc_length = 0; 26686 select_page = 26687 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26688 select_page = 26689 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26690 select_page->mode_page.code = CDROM_MODE_SPEED; 26691 select_page->mode_page.length = 2; 26692 select_page->speed = (uchar_t)data; 26693 26694 /* Send the mode select for the requested block size */ 26695 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26696 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26697 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 26698 /* 26699 * The mode select failed for the requested drive speed, 26700 * so reset the data for the original drive speed and 26701 * send it to the target. The error is indicated by the 26702 * return value for the failed mode select. 26703 */ 26704 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26705 "sr_drive_speed: Mode Select Failed\n"); 26706 select_page->speed = sense_page->speed; 26707 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26708 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26709 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26710 } 26711 break; 26712 default: 26713 /* should not reach here, but check anyway */ 26714 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26715 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26716 rval = EINVAL; 26717 break; 26718 } 26719 26720 if (select) { 26721 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26722 } 26723 if (sense) { 26724 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26725 } 26726 26727 return (rval); 26728 } 26729 26730 26731 /* 26732 * Function: sr_atapi_change_speed() 26733 * 26734 * Description: This routine is the driver entry point for handling CD-ROM 26735 * drive speed ioctl requests for MMC devices that do not support 26736 * the Real Time Streaming feature (0x107). 26737 * 26738 * Note: This routine will use the SET SPEED command which may not 26739 * be supported by all devices. 26740 * 26741 * Arguments: dev- the device 'dev_t' 26742 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26743 * CDROMSDRVSPEED (set) 26744 * data- current drive speed or requested drive speed 26745 * flag- this argument is a pass through to ddi_copyxxx() directly 26746 * from the mode argument of ioctl(). 26747 * 26748 * Return Code: the code returned by sd_send_scsi_cmd() 26749 * EINVAL if invalid arguments are provided 26750 * EFAULT if ddi_copyxxx() fails 26751 * ENXIO if fail ddi_get_soft_state 26752 * EIO if invalid mode sense block descriptor length 26753 */ 26754 26755 static int 26756 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26757 { 26758 struct sd_lun *un; 26759 struct uscsi_cmd *com = NULL; 26760 struct mode_header_grp2 *sense_mhp; 26761 uchar_t *sense_page; 26762 uchar_t *sense = NULL; 26763 char cdb[CDB_GROUP5]; 26764 int bd_len; 26765 int current_speed = 0; 26766 int max_speed = 0; 26767 int rval; 26768 26769 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26770 26771 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26772 return (ENXIO); 26773 } 26774 26775 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26776 26777 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26778 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26779 SD_PATH_STANDARD)) != 0) { 26780 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26781 "sr_atapi_change_speed: Mode Sense Failed\n"); 26782 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26783 return (rval); 26784 } 26785 26786 /* Check the block descriptor len to handle only 1 block descriptor */ 26787 sense_mhp = (struct mode_header_grp2 *)sense; 26788 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26789 if (bd_len > MODE_BLK_DESC_LENGTH) { 26790 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26791 "sr_atapi_change_speed: Mode Sense returned invalid " 26792 "block descriptor length\n"); 26793 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26794 return (EIO); 26795 } 26796 26797 /* Calculate the current and maximum drive speeds */ 26798 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26799 current_speed = (sense_page[14] << 8) | sense_page[15]; 26800 max_speed = (sense_page[8] << 8) | sense_page[9]; 26801 26802 /* Process the command */ 26803 switch (cmd) { 26804 case CDROMGDRVSPEED: 26805 current_speed /= SD_SPEED_1X; 26806 if (ddi_copyout(¤t_speed, (void *)data, 26807 sizeof (int), flag) != 0) 26808 rval = EFAULT; 26809 break; 26810 case CDROMSDRVSPEED: 26811 /* Convert the speed code to KB/sec */ 26812 switch ((uchar_t)data) { 26813 case CDROM_NORMAL_SPEED: 26814 current_speed = SD_SPEED_1X; 26815 break; 26816 case CDROM_DOUBLE_SPEED: 26817 current_speed = 2 * SD_SPEED_1X; 26818 break; 26819 case CDROM_QUAD_SPEED: 26820 current_speed = 4 * SD_SPEED_1X; 26821 break; 26822 case CDROM_TWELVE_SPEED: 26823 current_speed = 12 * SD_SPEED_1X; 26824 break; 26825 case CDROM_MAXIMUM_SPEED: 26826 current_speed = 0xffff; 26827 break; 26828 default: 26829 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26830 "sr_atapi_change_speed: invalid drive speed %d\n", 26831 (uchar_t)data); 26832 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26833 return (EINVAL); 26834 } 26835 26836 /* Check the request against the drive's max speed. */ 26837 if (current_speed != 0xffff) { 26838 if (current_speed > max_speed) { 26839 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26840 return (EINVAL); 26841 } 26842 } 26843 26844 /* 26845 * Build and send the SET SPEED command 26846 * 26847 * Note: The SET SPEED (0xBB) command used in this routine is 26848 * obsolete per the SCSI MMC spec but still supported in the 26849 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26850 * therefore the command is still implemented in this routine. 26851 */ 26852 bzero(cdb, sizeof (cdb)); 26853 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26854 cdb[2] = (uchar_t)(current_speed >> 8); 26855 cdb[3] = (uchar_t)current_speed; 26856 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26857 com->uscsi_cdb = (caddr_t)cdb; 26858 com->uscsi_cdblen = CDB_GROUP5; 26859 com->uscsi_bufaddr = NULL; 26860 com->uscsi_buflen = 0; 26861 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26862 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, 0, 26863 UIO_SYSSPACE, SD_PATH_STANDARD); 26864 break; 26865 default: 26866 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26867 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26868 rval = EINVAL; 26869 } 26870 26871 if (sense) { 26872 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26873 } 26874 if (com) { 26875 kmem_free(com, sizeof (*com)); 26876 } 26877 return (rval); 26878 } 26879 26880 26881 /* 26882 * Function: sr_pause_resume() 26883 * 26884 * Description: This routine is the driver entry point for handling CD-ROM 26885 * pause/resume ioctl requests. This only affects the audio play 26886 * operation. 26887 * 26888 * Arguments: dev - the device 'dev_t' 26889 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26890 * for setting the resume bit of the cdb. 26891 * 26892 * Return Code: the code returned by sd_send_scsi_cmd() 26893 * EINVAL if invalid mode specified 26894 * 26895 */ 26896 26897 static int 26898 sr_pause_resume(dev_t dev, int cmd) 26899 { 26900 struct sd_lun *un; 26901 struct uscsi_cmd *com; 26902 char cdb[CDB_GROUP1]; 26903 int rval; 26904 26905 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26906 return (ENXIO); 26907 } 26908 26909 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26910 bzero(cdb, CDB_GROUP1); 26911 cdb[0] = SCMD_PAUSE_RESUME; 26912 switch (cmd) { 26913 case CDROMRESUME: 26914 cdb[8] = 1; 26915 break; 26916 case CDROMPAUSE: 26917 cdb[8] = 0; 26918 break; 26919 default: 26920 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26921 " Command '%x' Not Supported\n", cmd); 26922 rval = EINVAL; 26923 goto done; 26924 } 26925 26926 com->uscsi_cdb = cdb; 26927 com->uscsi_cdblen = CDB_GROUP1; 26928 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26929 26930 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 26931 UIO_SYSSPACE, SD_PATH_STANDARD); 26932 26933 done: 26934 kmem_free(com, sizeof (*com)); 26935 return (rval); 26936 } 26937 26938 26939 /* 26940 * Function: sr_play_msf() 26941 * 26942 * Description: This routine is the driver entry point for handling CD-ROM 26943 * ioctl requests to output the audio signals at the specified 26944 * starting address and continue the audio play until the specified 26945 * ending address (CDROMPLAYMSF) The address is in Minute Second 26946 * Frame (MSF) format. 26947 * 26948 * Arguments: dev - the device 'dev_t' 26949 * data - pointer to user provided audio msf structure, 26950 * specifying start/end addresses. 26951 * flag - this argument is a pass through to ddi_copyxxx() 26952 * directly from the mode argument of ioctl(). 26953 * 26954 * Return Code: the code returned by sd_send_scsi_cmd() 26955 * EFAULT if ddi_copyxxx() fails 26956 * ENXIO if fail ddi_get_soft_state 26957 * EINVAL if data pointer is NULL 26958 */ 26959 26960 static int 26961 sr_play_msf(dev_t dev, caddr_t data, int flag) 26962 { 26963 struct sd_lun *un; 26964 struct uscsi_cmd *com; 26965 struct cdrom_msf msf_struct; 26966 struct cdrom_msf *msf = &msf_struct; 26967 char cdb[CDB_GROUP1]; 26968 int rval; 26969 26970 if (data == NULL) { 26971 return (EINVAL); 26972 } 26973 26974 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26975 return (ENXIO); 26976 } 26977 26978 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 26979 return (EFAULT); 26980 } 26981 26982 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26983 bzero(cdb, CDB_GROUP1); 26984 cdb[0] = SCMD_PLAYAUDIO_MSF; 26985 if (un->un_f_cfg_playmsf_bcd == TRUE) { 26986 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 26987 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 26988 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 26989 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 26990 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 26991 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 26992 } else { 26993 cdb[3] = msf->cdmsf_min0; 26994 cdb[4] = msf->cdmsf_sec0; 26995 cdb[5] = msf->cdmsf_frame0; 26996 cdb[6] = msf->cdmsf_min1; 26997 cdb[7] = msf->cdmsf_sec1; 26998 cdb[8] = msf->cdmsf_frame1; 26999 } 27000 com->uscsi_cdb = cdb; 27001 com->uscsi_cdblen = CDB_GROUP1; 27002 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27003 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27004 UIO_SYSSPACE, SD_PATH_STANDARD); 27005 kmem_free(com, sizeof (*com)); 27006 return (rval); 27007 } 27008 27009 27010 /* 27011 * Function: sr_play_trkind() 27012 * 27013 * Description: This routine is the driver entry point for handling CD-ROM 27014 * ioctl requests to output the audio signals at the specified 27015 * starting address and continue the audio play until the specified 27016 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27017 * format. 27018 * 27019 * Arguments: dev - the device 'dev_t' 27020 * data - pointer to user provided audio track/index structure, 27021 * specifying start/end addresses. 27022 * flag - this argument is a pass through to ddi_copyxxx() 27023 * directly from the mode argument of ioctl(). 27024 * 27025 * Return Code: the code returned by sd_send_scsi_cmd() 27026 * EFAULT if ddi_copyxxx() fails 27027 * ENXIO if fail ddi_get_soft_state 27028 * EINVAL if data pointer is NULL 27029 */ 27030 27031 static int 27032 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27033 { 27034 struct cdrom_ti ti_struct; 27035 struct cdrom_ti *ti = &ti_struct; 27036 struct uscsi_cmd *com = NULL; 27037 char cdb[CDB_GROUP1]; 27038 int rval; 27039 27040 if (data == NULL) { 27041 return (EINVAL); 27042 } 27043 27044 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27045 return (EFAULT); 27046 } 27047 27048 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27049 bzero(cdb, CDB_GROUP1); 27050 cdb[0] = SCMD_PLAYAUDIO_TI; 27051 cdb[4] = ti->cdti_trk0; 27052 cdb[5] = ti->cdti_ind0; 27053 cdb[7] = ti->cdti_trk1; 27054 cdb[8] = ti->cdti_ind1; 27055 com->uscsi_cdb = cdb; 27056 com->uscsi_cdblen = CDB_GROUP1; 27057 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27058 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27059 UIO_SYSSPACE, SD_PATH_STANDARD); 27060 kmem_free(com, sizeof (*com)); 27061 return (rval); 27062 } 27063 27064 27065 /* 27066 * Function: sr_read_all_subcodes() 27067 * 27068 * Description: This routine is the driver entry point for handling CD-ROM 27069 * ioctl requests to return raw subcode data while the target is 27070 * playing audio (CDROMSUBCODE). 27071 * 27072 * Arguments: dev - the device 'dev_t' 27073 * data - pointer to user provided cdrom subcode structure, 27074 * specifying the transfer length and address. 27075 * flag - this argument is a pass through to ddi_copyxxx() 27076 * directly from the mode argument of ioctl(). 27077 * 27078 * Return Code: the code returned by sd_send_scsi_cmd() 27079 * EFAULT if ddi_copyxxx() fails 27080 * ENXIO if fail ddi_get_soft_state 27081 * EINVAL if data pointer is NULL 27082 */ 27083 27084 static int 27085 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27086 { 27087 struct sd_lun *un = NULL; 27088 struct uscsi_cmd *com = NULL; 27089 struct cdrom_subcode *subcode = NULL; 27090 int rval; 27091 size_t buflen; 27092 char cdb[CDB_GROUP5]; 27093 27094 #ifdef _MULTI_DATAMODEL 27095 /* To support ILP32 applications in an LP64 world */ 27096 struct cdrom_subcode32 cdrom_subcode32; 27097 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27098 #endif 27099 if (data == NULL) { 27100 return (EINVAL); 27101 } 27102 27103 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27104 return (ENXIO); 27105 } 27106 27107 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27108 27109 #ifdef _MULTI_DATAMODEL 27110 switch (ddi_model_convert_from(flag & FMODELS)) { 27111 case DDI_MODEL_ILP32: 27112 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27113 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27114 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27115 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27116 return (EFAULT); 27117 } 27118 /* Convert the ILP32 uscsi data from the application to LP64 */ 27119 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27120 break; 27121 case DDI_MODEL_NONE: 27122 if (ddi_copyin(data, subcode, 27123 sizeof (struct cdrom_subcode), flag)) { 27124 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27125 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27126 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27127 return (EFAULT); 27128 } 27129 break; 27130 } 27131 #else /* ! _MULTI_DATAMODEL */ 27132 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27133 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27134 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27135 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27136 return (EFAULT); 27137 } 27138 #endif /* _MULTI_DATAMODEL */ 27139 27140 /* 27141 * Since MMC-2 expects max 3 bytes for length, check if the 27142 * length input is greater than 3 bytes 27143 */ 27144 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27145 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27146 "sr_read_all_subcodes: " 27147 "cdrom transfer length too large: %d (limit %d)\n", 27148 subcode->cdsc_length, 0xFFFFFF); 27149 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27150 return (EINVAL); 27151 } 27152 27153 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27154 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27155 bzero(cdb, CDB_GROUP5); 27156 27157 if (un->un_f_mmc_cap == TRUE) { 27158 cdb[0] = (char)SCMD_READ_CD; 27159 cdb[2] = (char)0xff; 27160 cdb[3] = (char)0xff; 27161 cdb[4] = (char)0xff; 27162 cdb[5] = (char)0xff; 27163 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27164 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27165 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27166 cdb[10] = 1; 27167 } else { 27168 /* 27169 * Note: A vendor specific command (0xDF) is being used her to 27170 * request a read of all subcodes. 27171 */ 27172 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27173 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27174 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27175 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27176 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27177 } 27178 com->uscsi_cdb = cdb; 27179 com->uscsi_cdblen = CDB_GROUP5; 27180 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27181 com->uscsi_buflen = buflen; 27182 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27183 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27184 UIO_SYSSPACE, SD_PATH_STANDARD); 27185 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27186 kmem_free(com, sizeof (*com)); 27187 return (rval); 27188 } 27189 27190 27191 /* 27192 * Function: sr_read_subchannel() 27193 * 27194 * Description: This routine is the driver entry point for handling CD-ROM 27195 * ioctl requests to return the Q sub-channel data of the CD 27196 * current position block. (CDROMSUBCHNL) The data includes the 27197 * track number, index number, absolute CD-ROM address (LBA or MSF 27198 * format per the user) , track relative CD-ROM address (LBA or MSF 27199 * format per the user), control data and audio status. 27200 * 27201 * Arguments: dev - the device 'dev_t' 27202 * data - pointer to user provided cdrom sub-channel structure 27203 * flag - this argument is a pass through to ddi_copyxxx() 27204 * directly from the mode argument of ioctl(). 27205 * 27206 * Return Code: the code returned by sd_send_scsi_cmd() 27207 * EFAULT if ddi_copyxxx() fails 27208 * ENXIO if fail ddi_get_soft_state 27209 * EINVAL if data pointer is NULL 27210 */ 27211 27212 static int 27213 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27214 { 27215 struct sd_lun *un; 27216 struct uscsi_cmd *com; 27217 struct cdrom_subchnl subchanel; 27218 struct cdrom_subchnl *subchnl = &subchanel; 27219 char cdb[CDB_GROUP1]; 27220 caddr_t buffer; 27221 int rval; 27222 27223 if (data == NULL) { 27224 return (EINVAL); 27225 } 27226 27227 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27228 (un->un_state == SD_STATE_OFFLINE)) { 27229 return (ENXIO); 27230 } 27231 27232 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27233 return (EFAULT); 27234 } 27235 27236 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27237 bzero(cdb, CDB_GROUP1); 27238 cdb[0] = SCMD_READ_SUBCHANNEL; 27239 /* Set the MSF bit based on the user requested address format */ 27240 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27241 /* 27242 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27243 * returned 27244 */ 27245 cdb[2] = 0x40; 27246 /* 27247 * Set byte 3 to specify the return data format. A value of 0x01 27248 * indicates that the CD-ROM current position should be returned. 27249 */ 27250 cdb[3] = 0x01; 27251 cdb[8] = 0x10; 27252 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27253 com->uscsi_cdb = cdb; 27254 com->uscsi_cdblen = CDB_GROUP1; 27255 com->uscsi_bufaddr = buffer; 27256 com->uscsi_buflen = 16; 27257 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27258 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27259 UIO_SYSSPACE, SD_PATH_STANDARD); 27260 if (rval != 0) { 27261 kmem_free(buffer, 16); 27262 kmem_free(com, sizeof (*com)); 27263 return (rval); 27264 } 27265 27266 /* Process the returned Q sub-channel data */ 27267 subchnl->cdsc_audiostatus = buffer[1]; 27268 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27269 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27270 subchnl->cdsc_trk = buffer[6]; 27271 subchnl->cdsc_ind = buffer[7]; 27272 if (subchnl->cdsc_format & CDROM_LBA) { 27273 subchnl->cdsc_absaddr.lba = 27274 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27275 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27276 subchnl->cdsc_reladdr.lba = 27277 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27278 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27279 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27280 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27281 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27282 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27283 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27284 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27285 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27286 } else { 27287 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27288 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27289 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27290 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27291 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27292 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27293 } 27294 kmem_free(buffer, 16); 27295 kmem_free(com, sizeof (*com)); 27296 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27297 != 0) { 27298 return (EFAULT); 27299 } 27300 return (rval); 27301 } 27302 27303 27304 /* 27305 * Function: sr_read_tocentry() 27306 * 27307 * Description: This routine is the driver entry point for handling CD-ROM 27308 * ioctl requests to read from the Table of Contents (TOC) 27309 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27310 * fields, the starting address (LBA or MSF format per the user) 27311 * and the data mode if the user specified track is a data track. 27312 * 27313 * Note: The READ HEADER (0x44) command used in this routine is 27314 * obsolete per the SCSI MMC spec but still supported in the 27315 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27316 * therefore the command is still implemented in this routine. 27317 * 27318 * Arguments: dev - the device 'dev_t' 27319 * data - pointer to user provided toc entry structure, 27320 * specifying the track # and the address format 27321 * (LBA or MSF). 27322 * flag - this argument is a pass through to ddi_copyxxx() 27323 * directly from the mode argument of ioctl(). 27324 * 27325 * Return Code: the code returned by sd_send_scsi_cmd() 27326 * EFAULT if ddi_copyxxx() fails 27327 * ENXIO if fail ddi_get_soft_state 27328 * EINVAL if data pointer is NULL 27329 */ 27330 27331 static int 27332 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27333 { 27334 struct sd_lun *un = NULL; 27335 struct uscsi_cmd *com; 27336 struct cdrom_tocentry toc_entry; 27337 struct cdrom_tocentry *entry = &toc_entry; 27338 caddr_t buffer; 27339 int rval; 27340 char cdb[CDB_GROUP1]; 27341 27342 if (data == NULL) { 27343 return (EINVAL); 27344 } 27345 27346 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27347 (un->un_state == SD_STATE_OFFLINE)) { 27348 return (ENXIO); 27349 } 27350 27351 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27352 return (EFAULT); 27353 } 27354 27355 /* Validate the requested track and address format */ 27356 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27357 return (EINVAL); 27358 } 27359 27360 if (entry->cdte_track == 0) { 27361 return (EINVAL); 27362 } 27363 27364 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27365 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27366 bzero(cdb, CDB_GROUP1); 27367 27368 cdb[0] = SCMD_READ_TOC; 27369 /* Set the MSF bit based on the user requested address format */ 27370 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27371 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27372 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27373 } else { 27374 cdb[6] = entry->cdte_track; 27375 } 27376 27377 /* 27378 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27379 * (4 byte TOC response header + 8 byte track descriptor) 27380 */ 27381 cdb[8] = 12; 27382 com->uscsi_cdb = cdb; 27383 com->uscsi_cdblen = CDB_GROUP1; 27384 com->uscsi_bufaddr = buffer; 27385 com->uscsi_buflen = 0x0C; 27386 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27387 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27388 UIO_SYSSPACE, SD_PATH_STANDARD); 27389 if (rval != 0) { 27390 kmem_free(buffer, 12); 27391 kmem_free(com, sizeof (*com)); 27392 return (rval); 27393 } 27394 27395 /* Process the toc entry */ 27396 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27397 entry->cdte_ctrl = (buffer[5] & 0x0F); 27398 if (entry->cdte_format & CDROM_LBA) { 27399 entry->cdte_addr.lba = 27400 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27401 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27402 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27403 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27404 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27405 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27406 /* 27407 * Send a READ TOC command using the LBA address format to get 27408 * the LBA for the track requested so it can be used in the 27409 * READ HEADER request 27410 * 27411 * Note: The MSF bit of the READ HEADER command specifies the 27412 * output format. The block address specified in that command 27413 * must be in LBA format. 27414 */ 27415 cdb[1] = 0; 27416 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27417 UIO_SYSSPACE, SD_PATH_STANDARD); 27418 if (rval != 0) { 27419 kmem_free(buffer, 12); 27420 kmem_free(com, sizeof (*com)); 27421 return (rval); 27422 } 27423 } else { 27424 entry->cdte_addr.msf.minute = buffer[9]; 27425 entry->cdte_addr.msf.second = buffer[10]; 27426 entry->cdte_addr.msf.frame = buffer[11]; 27427 /* 27428 * Send a READ TOC command using the LBA address format to get 27429 * the LBA for the track requested so it can be used in the 27430 * READ HEADER request 27431 * 27432 * Note: The MSF bit of the READ HEADER command specifies the 27433 * output format. The block address specified in that command 27434 * must be in LBA format. 27435 */ 27436 cdb[1] = 0; 27437 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27438 UIO_SYSSPACE, SD_PATH_STANDARD); 27439 if (rval != 0) { 27440 kmem_free(buffer, 12); 27441 kmem_free(com, sizeof (*com)); 27442 return (rval); 27443 } 27444 } 27445 27446 /* 27447 * Build and send the READ HEADER command to determine the data mode of 27448 * the user specified track. 27449 */ 27450 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27451 (entry->cdte_track != CDROM_LEADOUT)) { 27452 bzero(cdb, CDB_GROUP1); 27453 cdb[0] = SCMD_READ_HEADER; 27454 cdb[2] = buffer[8]; 27455 cdb[3] = buffer[9]; 27456 cdb[4] = buffer[10]; 27457 cdb[5] = buffer[11]; 27458 cdb[8] = 0x08; 27459 com->uscsi_buflen = 0x08; 27460 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27461 UIO_SYSSPACE, SD_PATH_STANDARD); 27462 if (rval == 0) { 27463 entry->cdte_datamode = buffer[0]; 27464 } else { 27465 /* 27466 * READ HEADER command failed, since this is 27467 * obsoleted in one spec, its better to return 27468 * -1 for an invlid track so that we can still 27469 * recieve the rest of the TOC data. 27470 */ 27471 entry->cdte_datamode = (uchar_t)-1; 27472 } 27473 } else { 27474 entry->cdte_datamode = (uchar_t)-1; 27475 } 27476 27477 kmem_free(buffer, 12); 27478 kmem_free(com, sizeof (*com)); 27479 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27480 return (EFAULT); 27481 27482 return (rval); 27483 } 27484 27485 27486 /* 27487 * Function: sr_read_tochdr() 27488 * 27489 * Description: This routine is the driver entry point for handling CD-ROM 27490 * ioctl requests to read the Table of Contents (TOC) header 27491 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27492 * and ending track numbers 27493 * 27494 * Arguments: dev - the device 'dev_t' 27495 * data - pointer to user provided toc header structure, 27496 * specifying the starting and ending track numbers. 27497 * flag - this argument is a pass through to ddi_copyxxx() 27498 * directly from the mode argument of ioctl(). 27499 * 27500 * Return Code: the code returned by sd_send_scsi_cmd() 27501 * EFAULT if ddi_copyxxx() fails 27502 * ENXIO if fail ddi_get_soft_state 27503 * EINVAL if data pointer is NULL 27504 */ 27505 27506 static int 27507 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27508 { 27509 struct sd_lun *un; 27510 struct uscsi_cmd *com; 27511 struct cdrom_tochdr toc_header; 27512 struct cdrom_tochdr *hdr = &toc_header; 27513 char cdb[CDB_GROUP1]; 27514 int rval; 27515 caddr_t buffer; 27516 27517 if (data == NULL) { 27518 return (EINVAL); 27519 } 27520 27521 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27522 (un->un_state == SD_STATE_OFFLINE)) { 27523 return (ENXIO); 27524 } 27525 27526 buffer = kmem_zalloc(4, KM_SLEEP); 27527 bzero(cdb, CDB_GROUP1); 27528 cdb[0] = SCMD_READ_TOC; 27529 /* 27530 * Specifying a track number of 0x00 in the READ TOC command indicates 27531 * that the TOC header should be returned 27532 */ 27533 cdb[6] = 0x00; 27534 /* 27535 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27536 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27537 */ 27538 cdb[8] = 0x04; 27539 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27540 com->uscsi_cdb = cdb; 27541 com->uscsi_cdblen = CDB_GROUP1; 27542 com->uscsi_bufaddr = buffer; 27543 com->uscsi_buflen = 0x04; 27544 com->uscsi_timeout = 300; 27545 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27546 27547 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27548 UIO_SYSSPACE, SD_PATH_STANDARD); 27549 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27550 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27551 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27552 } else { 27553 hdr->cdth_trk0 = buffer[2]; 27554 hdr->cdth_trk1 = buffer[3]; 27555 } 27556 kmem_free(buffer, 4); 27557 kmem_free(com, sizeof (*com)); 27558 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27559 return (EFAULT); 27560 } 27561 return (rval); 27562 } 27563 27564 27565 /* 27566 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27567 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27568 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27569 * digital audio and extended architecture digital audio. These modes are 27570 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27571 * MMC specs. 27572 * 27573 * In addition to support for the various data formats these routines also 27574 * include support for devices that implement only the direct access READ 27575 * commands (0x08, 0x28), devices that implement the READ_CD commands 27576 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27577 * READ CDXA commands (0xD8, 0xDB) 27578 */ 27579 27580 /* 27581 * Function: sr_read_mode1() 27582 * 27583 * Description: This routine is the driver entry point for handling CD-ROM 27584 * ioctl read mode1 requests (CDROMREADMODE1). 27585 * 27586 * Arguments: dev - the device 'dev_t' 27587 * data - pointer to user provided cd read structure specifying 27588 * the lba buffer address and length. 27589 * flag - this argument is a pass through to ddi_copyxxx() 27590 * directly from the mode argument of ioctl(). 27591 * 27592 * Return Code: the code returned by sd_send_scsi_cmd() 27593 * EFAULT if ddi_copyxxx() fails 27594 * ENXIO if fail ddi_get_soft_state 27595 * EINVAL if data pointer is NULL 27596 */ 27597 27598 static int 27599 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27600 { 27601 struct sd_lun *un; 27602 struct cdrom_read mode1_struct; 27603 struct cdrom_read *mode1 = &mode1_struct; 27604 int rval; 27605 #ifdef _MULTI_DATAMODEL 27606 /* To support ILP32 applications in an LP64 world */ 27607 struct cdrom_read32 cdrom_read32; 27608 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27609 #endif /* _MULTI_DATAMODEL */ 27610 27611 if (data == NULL) { 27612 return (EINVAL); 27613 } 27614 27615 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27616 (un->un_state == SD_STATE_OFFLINE)) { 27617 return (ENXIO); 27618 } 27619 27620 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27621 "sd_read_mode1: entry: un:0x%p\n", un); 27622 27623 #ifdef _MULTI_DATAMODEL 27624 switch (ddi_model_convert_from(flag & FMODELS)) { 27625 case DDI_MODEL_ILP32: 27626 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27627 return (EFAULT); 27628 } 27629 /* Convert the ILP32 uscsi data from the application to LP64 */ 27630 cdrom_read32tocdrom_read(cdrd32, mode1); 27631 break; 27632 case DDI_MODEL_NONE: 27633 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27634 return (EFAULT); 27635 } 27636 } 27637 #else /* ! _MULTI_DATAMODEL */ 27638 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27639 return (EFAULT); 27640 } 27641 #endif /* _MULTI_DATAMODEL */ 27642 27643 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 27644 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27645 27646 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27647 "sd_read_mode1: exit: un:0x%p\n", un); 27648 27649 return (rval); 27650 } 27651 27652 27653 /* 27654 * Function: sr_read_cd_mode2() 27655 * 27656 * Description: This routine is the driver entry point for handling CD-ROM 27657 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27658 * support the READ CD (0xBE) command or the 1st generation 27659 * READ CD (0xD4) command. 27660 * 27661 * Arguments: dev - the device 'dev_t' 27662 * data - pointer to user provided cd read structure specifying 27663 * the lba buffer address and length. 27664 * flag - this argument is a pass through to ddi_copyxxx() 27665 * directly from the mode argument of ioctl(). 27666 * 27667 * Return Code: the code returned by sd_send_scsi_cmd() 27668 * EFAULT if ddi_copyxxx() fails 27669 * ENXIO if fail ddi_get_soft_state 27670 * EINVAL if data pointer is NULL 27671 */ 27672 27673 static int 27674 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27675 { 27676 struct sd_lun *un; 27677 struct uscsi_cmd *com; 27678 struct cdrom_read mode2_struct; 27679 struct cdrom_read *mode2 = &mode2_struct; 27680 uchar_t cdb[CDB_GROUP5]; 27681 int nblocks; 27682 int rval; 27683 #ifdef _MULTI_DATAMODEL 27684 /* To support ILP32 applications in an LP64 world */ 27685 struct cdrom_read32 cdrom_read32; 27686 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27687 #endif /* _MULTI_DATAMODEL */ 27688 27689 if (data == NULL) { 27690 return (EINVAL); 27691 } 27692 27693 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27694 (un->un_state == SD_STATE_OFFLINE)) { 27695 return (ENXIO); 27696 } 27697 27698 #ifdef _MULTI_DATAMODEL 27699 switch (ddi_model_convert_from(flag & FMODELS)) { 27700 case DDI_MODEL_ILP32: 27701 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27702 return (EFAULT); 27703 } 27704 /* Convert the ILP32 uscsi data from the application to LP64 */ 27705 cdrom_read32tocdrom_read(cdrd32, mode2); 27706 break; 27707 case DDI_MODEL_NONE: 27708 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27709 return (EFAULT); 27710 } 27711 break; 27712 } 27713 27714 #else /* ! _MULTI_DATAMODEL */ 27715 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27716 return (EFAULT); 27717 } 27718 #endif /* _MULTI_DATAMODEL */ 27719 27720 bzero(cdb, sizeof (cdb)); 27721 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27722 /* Read command supported by 1st generation atapi drives */ 27723 cdb[0] = SCMD_READ_CDD4; 27724 } else { 27725 /* Universal CD Access Command */ 27726 cdb[0] = SCMD_READ_CD; 27727 } 27728 27729 /* 27730 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27731 */ 27732 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27733 27734 /* set the start address */ 27735 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27736 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27737 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27738 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27739 27740 /* set the transfer length */ 27741 nblocks = mode2->cdread_buflen / 2336; 27742 cdb[6] = (uchar_t)(nblocks >> 16); 27743 cdb[7] = (uchar_t)(nblocks >> 8); 27744 cdb[8] = (uchar_t)nblocks; 27745 27746 /* set the filter bits */ 27747 cdb[9] = CDROM_READ_CD_USERDATA; 27748 27749 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27750 com->uscsi_cdb = (caddr_t)cdb; 27751 com->uscsi_cdblen = sizeof (cdb); 27752 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27753 com->uscsi_buflen = mode2->cdread_buflen; 27754 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27755 27756 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27757 UIO_SYSSPACE, SD_PATH_STANDARD); 27758 kmem_free(com, sizeof (*com)); 27759 return (rval); 27760 } 27761 27762 27763 /* 27764 * Function: sr_read_mode2() 27765 * 27766 * Description: This routine is the driver entry point for handling CD-ROM 27767 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27768 * do not support the READ CD (0xBE) command. 27769 * 27770 * Arguments: dev - the device 'dev_t' 27771 * data - pointer to user provided cd read structure specifying 27772 * the lba buffer address and length. 27773 * flag - this argument is a pass through to ddi_copyxxx() 27774 * directly from the mode argument of ioctl(). 27775 * 27776 * Return Code: the code returned by sd_send_scsi_cmd() 27777 * EFAULT if ddi_copyxxx() fails 27778 * ENXIO if fail ddi_get_soft_state 27779 * EINVAL if data pointer is NULL 27780 * EIO if fail to reset block size 27781 * EAGAIN if commands are in progress in the driver 27782 */ 27783 27784 static int 27785 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27786 { 27787 struct sd_lun *un; 27788 struct cdrom_read mode2_struct; 27789 struct cdrom_read *mode2 = &mode2_struct; 27790 int rval; 27791 uint32_t restore_blksize; 27792 struct uscsi_cmd *com; 27793 uchar_t cdb[CDB_GROUP0]; 27794 int nblocks; 27795 27796 #ifdef _MULTI_DATAMODEL 27797 /* To support ILP32 applications in an LP64 world */ 27798 struct cdrom_read32 cdrom_read32; 27799 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27800 #endif /* _MULTI_DATAMODEL */ 27801 27802 if (data == NULL) { 27803 return (EINVAL); 27804 } 27805 27806 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27807 (un->un_state == SD_STATE_OFFLINE)) { 27808 return (ENXIO); 27809 } 27810 27811 /* 27812 * Because this routine will update the device and driver block size 27813 * being used we want to make sure there are no commands in progress. 27814 * If commands are in progress the user will have to try again. 27815 * 27816 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27817 * in sdioctl to protect commands from sdioctl through to the top of 27818 * sd_uscsi_strategy. See sdioctl for details. 27819 */ 27820 mutex_enter(SD_MUTEX(un)); 27821 if (un->un_ncmds_in_driver != 1) { 27822 mutex_exit(SD_MUTEX(un)); 27823 return (EAGAIN); 27824 } 27825 mutex_exit(SD_MUTEX(un)); 27826 27827 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27828 "sd_read_mode2: entry: un:0x%p\n", un); 27829 27830 #ifdef _MULTI_DATAMODEL 27831 switch (ddi_model_convert_from(flag & FMODELS)) { 27832 case DDI_MODEL_ILP32: 27833 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27834 return (EFAULT); 27835 } 27836 /* Convert the ILP32 uscsi data from the application to LP64 */ 27837 cdrom_read32tocdrom_read(cdrd32, mode2); 27838 break; 27839 case DDI_MODEL_NONE: 27840 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27841 return (EFAULT); 27842 } 27843 break; 27844 } 27845 #else /* ! _MULTI_DATAMODEL */ 27846 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27847 return (EFAULT); 27848 } 27849 #endif /* _MULTI_DATAMODEL */ 27850 27851 /* Store the current target block size for restoration later */ 27852 restore_blksize = un->un_tgt_blocksize; 27853 27854 /* Change the device and soft state target block size to 2336 */ 27855 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27856 rval = EIO; 27857 goto done; 27858 } 27859 27860 27861 bzero(cdb, sizeof (cdb)); 27862 27863 /* set READ operation */ 27864 cdb[0] = SCMD_READ; 27865 27866 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27867 mode2->cdread_lba >>= 2; 27868 27869 /* set the start address */ 27870 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27871 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27872 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27873 27874 /* set the transfer length */ 27875 nblocks = mode2->cdread_buflen / 2336; 27876 cdb[4] = (uchar_t)nblocks & 0xFF; 27877 27878 /* build command */ 27879 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27880 com->uscsi_cdb = (caddr_t)cdb; 27881 com->uscsi_cdblen = sizeof (cdb); 27882 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27883 com->uscsi_buflen = mode2->cdread_buflen; 27884 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27885 27886 /* 27887 * Issue SCSI command with user space address for read buffer. 27888 * 27889 * This sends the command through main channel in the driver. 27890 * 27891 * Since this is accessed via an IOCTL call, we go through the 27892 * standard path, so that if the device was powered down, then 27893 * it would be 'awakened' to handle the command. 27894 */ 27895 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27896 UIO_SYSSPACE, SD_PATH_STANDARD); 27897 27898 kmem_free(com, sizeof (*com)); 27899 27900 /* Restore the device and soft state target block size */ 27901 if (sr_sector_mode(dev, restore_blksize) != 0) { 27902 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27903 "can't do switch back to mode 1\n"); 27904 /* 27905 * If sd_send_scsi_READ succeeded we still need to report 27906 * an error because we failed to reset the block size 27907 */ 27908 if (rval == 0) { 27909 rval = EIO; 27910 } 27911 } 27912 27913 done: 27914 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27915 "sd_read_mode2: exit: un:0x%p\n", un); 27916 27917 return (rval); 27918 } 27919 27920 27921 /* 27922 * Function: sr_sector_mode() 27923 * 27924 * Description: This utility function is used by sr_read_mode2 to set the target 27925 * block size based on the user specified size. This is a legacy 27926 * implementation based upon a vendor specific mode page 27927 * 27928 * Arguments: dev - the device 'dev_t' 27929 * data - flag indicating if block size is being set to 2336 or 27930 * 512. 27931 * 27932 * Return Code: the code returned by sd_send_scsi_cmd() 27933 * EFAULT if ddi_copyxxx() fails 27934 * ENXIO if fail ddi_get_soft_state 27935 * EINVAL if data pointer is NULL 27936 */ 27937 27938 static int 27939 sr_sector_mode(dev_t dev, uint32_t blksize) 27940 { 27941 struct sd_lun *un; 27942 uchar_t *sense; 27943 uchar_t *select; 27944 int rval; 27945 27946 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27947 (un->un_state == SD_STATE_OFFLINE)) { 27948 return (ENXIO); 27949 } 27950 27951 sense = kmem_zalloc(20, KM_SLEEP); 27952 27953 /* Note: This is a vendor specific mode page (0x81) */ 27954 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 27955 SD_PATH_STANDARD)) != 0) { 27956 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27957 "sr_sector_mode: Mode Sense failed\n"); 27958 kmem_free(sense, 20); 27959 return (rval); 27960 } 27961 select = kmem_zalloc(20, KM_SLEEP); 27962 select[3] = 0x08; 27963 select[10] = ((blksize >> 8) & 0xff); 27964 select[11] = (blksize & 0xff); 27965 select[12] = 0x01; 27966 select[13] = 0x06; 27967 select[14] = sense[14]; 27968 select[15] = sense[15]; 27969 if (blksize == SD_MODE2_BLKSIZE) { 27970 select[14] |= 0x01; 27971 } 27972 27973 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 27974 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 27975 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27976 "sr_sector_mode: Mode Select failed\n"); 27977 } else { 27978 /* 27979 * Only update the softstate block size if we successfully 27980 * changed the device block mode. 27981 */ 27982 mutex_enter(SD_MUTEX(un)); 27983 sd_update_block_info(un, blksize, 0); 27984 mutex_exit(SD_MUTEX(un)); 27985 } 27986 kmem_free(sense, 20); 27987 kmem_free(select, 20); 27988 return (rval); 27989 } 27990 27991 27992 /* 27993 * Function: sr_read_cdda() 27994 * 27995 * Description: This routine is the driver entry point for handling CD-ROM 27996 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 27997 * the target supports CDDA these requests are handled via a vendor 27998 * specific command (0xD8) If the target does not support CDDA 27999 * these requests are handled via the READ CD command (0xBE). 28000 * 28001 * Arguments: dev - the device 'dev_t' 28002 * data - pointer to user provided CD-DA structure specifying 28003 * the track starting address, transfer length, and 28004 * subcode options. 28005 * flag - this argument is a pass through to ddi_copyxxx() 28006 * directly from the mode argument of ioctl(). 28007 * 28008 * Return Code: the code returned by sd_send_scsi_cmd() 28009 * EFAULT if ddi_copyxxx() fails 28010 * ENXIO if fail ddi_get_soft_state 28011 * EINVAL if invalid arguments are provided 28012 * ENOTTY 28013 */ 28014 28015 static int 28016 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28017 { 28018 struct sd_lun *un; 28019 struct uscsi_cmd *com; 28020 struct cdrom_cdda *cdda; 28021 int rval; 28022 size_t buflen; 28023 char cdb[CDB_GROUP5]; 28024 28025 #ifdef _MULTI_DATAMODEL 28026 /* To support ILP32 applications in an LP64 world */ 28027 struct cdrom_cdda32 cdrom_cdda32; 28028 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28029 #endif /* _MULTI_DATAMODEL */ 28030 28031 if (data == NULL) { 28032 return (EINVAL); 28033 } 28034 28035 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28036 return (ENXIO); 28037 } 28038 28039 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28040 28041 #ifdef _MULTI_DATAMODEL 28042 switch (ddi_model_convert_from(flag & FMODELS)) { 28043 case DDI_MODEL_ILP32: 28044 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28045 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28046 "sr_read_cdda: ddi_copyin Failed\n"); 28047 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28048 return (EFAULT); 28049 } 28050 /* Convert the ILP32 uscsi data from the application to LP64 */ 28051 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28052 break; 28053 case DDI_MODEL_NONE: 28054 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28055 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28056 "sr_read_cdda: ddi_copyin Failed\n"); 28057 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28058 return (EFAULT); 28059 } 28060 break; 28061 } 28062 #else /* ! _MULTI_DATAMODEL */ 28063 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28064 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28065 "sr_read_cdda: ddi_copyin Failed\n"); 28066 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28067 return (EFAULT); 28068 } 28069 #endif /* _MULTI_DATAMODEL */ 28070 28071 /* 28072 * Since MMC-2 expects max 3 bytes for length, check if the 28073 * length input is greater than 3 bytes 28074 */ 28075 if ((cdda->cdda_length & 0xFF000000) != 0) { 28076 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28077 "cdrom transfer length too large: %d (limit %d)\n", 28078 cdda->cdda_length, 0xFFFFFF); 28079 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28080 return (EINVAL); 28081 } 28082 28083 switch (cdda->cdda_subcode) { 28084 case CDROM_DA_NO_SUBCODE: 28085 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28086 break; 28087 case CDROM_DA_SUBQ: 28088 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28089 break; 28090 case CDROM_DA_ALL_SUBCODE: 28091 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28092 break; 28093 case CDROM_DA_SUBCODE_ONLY: 28094 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28095 break; 28096 default: 28097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28098 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28099 cdda->cdda_subcode); 28100 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28101 return (EINVAL); 28102 } 28103 28104 /* Build and send the command */ 28105 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28106 bzero(cdb, CDB_GROUP5); 28107 28108 if (un->un_f_cfg_cdda == TRUE) { 28109 cdb[0] = (char)SCMD_READ_CD; 28110 cdb[1] = 0x04; 28111 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28112 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28113 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28114 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28115 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28116 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28117 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28118 cdb[9] = 0x10; 28119 switch (cdda->cdda_subcode) { 28120 case CDROM_DA_NO_SUBCODE : 28121 cdb[10] = 0x0; 28122 break; 28123 case CDROM_DA_SUBQ : 28124 cdb[10] = 0x2; 28125 break; 28126 case CDROM_DA_ALL_SUBCODE : 28127 cdb[10] = 0x1; 28128 break; 28129 case CDROM_DA_SUBCODE_ONLY : 28130 /* FALLTHROUGH */ 28131 default : 28132 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28133 kmem_free(com, sizeof (*com)); 28134 return (ENOTTY); 28135 } 28136 } else { 28137 cdb[0] = (char)SCMD_READ_CDDA; 28138 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28139 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28140 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28141 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28142 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28143 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28144 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28145 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28146 cdb[10] = cdda->cdda_subcode; 28147 } 28148 28149 com->uscsi_cdb = cdb; 28150 com->uscsi_cdblen = CDB_GROUP5; 28151 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28152 com->uscsi_buflen = buflen; 28153 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28154 28155 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28156 UIO_SYSSPACE, SD_PATH_STANDARD); 28157 28158 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28159 kmem_free(com, sizeof (*com)); 28160 return (rval); 28161 } 28162 28163 28164 /* 28165 * Function: sr_read_cdxa() 28166 * 28167 * Description: This routine is the driver entry point for handling CD-ROM 28168 * ioctl requests to return CD-XA (Extended Architecture) data. 28169 * (CDROMCDXA). 28170 * 28171 * Arguments: dev - the device 'dev_t' 28172 * data - pointer to user provided CD-XA structure specifying 28173 * the data starting address, transfer length, and format 28174 * flag - this argument is a pass through to ddi_copyxxx() 28175 * directly from the mode argument of ioctl(). 28176 * 28177 * Return Code: the code returned by sd_send_scsi_cmd() 28178 * EFAULT if ddi_copyxxx() fails 28179 * ENXIO if fail ddi_get_soft_state 28180 * EINVAL if data pointer is NULL 28181 */ 28182 28183 static int 28184 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28185 { 28186 struct sd_lun *un; 28187 struct uscsi_cmd *com; 28188 struct cdrom_cdxa *cdxa; 28189 int rval; 28190 size_t buflen; 28191 char cdb[CDB_GROUP5]; 28192 uchar_t read_flags; 28193 28194 #ifdef _MULTI_DATAMODEL 28195 /* To support ILP32 applications in an LP64 world */ 28196 struct cdrom_cdxa32 cdrom_cdxa32; 28197 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28198 #endif /* _MULTI_DATAMODEL */ 28199 28200 if (data == NULL) { 28201 return (EINVAL); 28202 } 28203 28204 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28205 return (ENXIO); 28206 } 28207 28208 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28209 28210 #ifdef _MULTI_DATAMODEL 28211 switch (ddi_model_convert_from(flag & FMODELS)) { 28212 case DDI_MODEL_ILP32: 28213 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28214 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28215 return (EFAULT); 28216 } 28217 /* 28218 * Convert the ILP32 uscsi data from the 28219 * application to LP64 for internal use. 28220 */ 28221 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28222 break; 28223 case DDI_MODEL_NONE: 28224 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28225 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28226 return (EFAULT); 28227 } 28228 break; 28229 } 28230 #else /* ! _MULTI_DATAMODEL */ 28231 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28232 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28233 return (EFAULT); 28234 } 28235 #endif /* _MULTI_DATAMODEL */ 28236 28237 /* 28238 * Since MMC-2 expects max 3 bytes for length, check if the 28239 * length input is greater than 3 bytes 28240 */ 28241 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28242 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28243 "cdrom transfer length too large: %d (limit %d)\n", 28244 cdxa->cdxa_length, 0xFFFFFF); 28245 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28246 return (EINVAL); 28247 } 28248 28249 switch (cdxa->cdxa_format) { 28250 case CDROM_XA_DATA: 28251 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28252 read_flags = 0x10; 28253 break; 28254 case CDROM_XA_SECTOR_DATA: 28255 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28256 read_flags = 0xf8; 28257 break; 28258 case CDROM_XA_DATA_W_ERROR: 28259 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28260 read_flags = 0xfc; 28261 break; 28262 default: 28263 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28264 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28265 cdxa->cdxa_format); 28266 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28267 return (EINVAL); 28268 } 28269 28270 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28271 bzero(cdb, CDB_GROUP5); 28272 if (un->un_f_mmc_cap == TRUE) { 28273 cdb[0] = (char)SCMD_READ_CD; 28274 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28275 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28276 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28277 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28278 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28279 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28280 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28281 cdb[9] = (char)read_flags; 28282 } else { 28283 /* 28284 * Note: A vendor specific command (0xDB) is being used her to 28285 * request a read of all subcodes. 28286 */ 28287 cdb[0] = (char)SCMD_READ_CDXA; 28288 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28289 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28290 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28291 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28292 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28293 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28294 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28295 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28296 cdb[10] = cdxa->cdxa_format; 28297 } 28298 com->uscsi_cdb = cdb; 28299 com->uscsi_cdblen = CDB_GROUP5; 28300 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28301 com->uscsi_buflen = buflen; 28302 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28303 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28304 UIO_SYSSPACE, SD_PATH_STANDARD); 28305 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28306 kmem_free(com, sizeof (*com)); 28307 return (rval); 28308 } 28309 28310 28311 /* 28312 * Function: sr_eject() 28313 * 28314 * Description: This routine is the driver entry point for handling CD-ROM 28315 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28316 * 28317 * Arguments: dev - the device 'dev_t' 28318 * 28319 * Return Code: the code returned by sd_send_scsi_cmd() 28320 */ 28321 28322 static int 28323 sr_eject(dev_t dev) 28324 { 28325 struct sd_lun *un; 28326 int rval; 28327 28328 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28329 (un->un_state == SD_STATE_OFFLINE)) { 28330 return (ENXIO); 28331 } 28332 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 28333 SD_PATH_STANDARD)) != 0) { 28334 return (rval); 28335 } 28336 28337 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 28338 SD_PATH_STANDARD); 28339 28340 if (rval == 0) { 28341 mutex_enter(SD_MUTEX(un)); 28342 sr_ejected(un); 28343 un->un_mediastate = DKIO_EJECTED; 28344 cv_broadcast(&un->un_state_cv); 28345 mutex_exit(SD_MUTEX(un)); 28346 } 28347 return (rval); 28348 } 28349 28350 28351 /* 28352 * Function: sr_ejected() 28353 * 28354 * Description: This routine updates the soft state structure to invalidate the 28355 * geometry information after the media has been ejected or a 28356 * media eject has been detected. 28357 * 28358 * Arguments: un - driver soft state (unit) structure 28359 */ 28360 28361 static void 28362 sr_ejected(struct sd_lun *un) 28363 { 28364 struct sd_errstats *stp; 28365 28366 ASSERT(un != NULL); 28367 ASSERT(mutex_owned(SD_MUTEX(un))); 28368 28369 un->un_f_blockcount_is_valid = FALSE; 28370 un->un_f_tgt_blocksize_is_valid = FALSE; 28371 un->un_f_geometry_is_valid = FALSE; 28372 28373 if (un->un_errstats != NULL) { 28374 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28375 stp->sd_capacity.value.ui64 = 0; 28376 } 28377 } 28378 28379 28380 /* 28381 * Function: sr_check_wp() 28382 * 28383 * Description: This routine checks the write protection of a removable media 28384 * disk via the write protect bit of the Mode Page Header device 28385 * specific field. This routine has been implemented to use the 28386 * error recovery mode page for all device types. 28387 * Note: In the future use a sd_send_scsi_MODE_SENSE() routine 28388 * 28389 * Arguments: dev - the device 'dev_t' 28390 * 28391 * Return Code: int indicating if the device is write protected (1) or not (0) 28392 * 28393 * Context: Kernel thread. 28394 * 28395 */ 28396 28397 static int 28398 sr_check_wp(dev_t dev) 28399 { 28400 struct sd_lun *un; 28401 uchar_t device_specific; 28402 uchar_t *sense; 28403 int hdrlen; 28404 int rval; 28405 int retry_flag = FALSE; 28406 28407 /* 28408 * Note: The return codes for this routine should be reworked to 28409 * properly handle the case of a NULL softstate. 28410 */ 28411 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28412 return (FALSE); 28413 } 28414 28415 if (un->un_f_cfg_is_atapi == TRUE) { 28416 retry_flag = TRUE; 28417 } 28418 28419 retry: 28420 if (un->un_f_cfg_is_atapi == TRUE) { 28421 /* 28422 * The mode page contents are not required; set the allocation 28423 * length for the mode page header only 28424 */ 28425 hdrlen = MODE_HEADER_LENGTH_GRP2; 28426 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28427 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 28428 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28429 device_specific = 28430 ((struct mode_header_grp2 *)sense)->device_specific; 28431 } else { 28432 hdrlen = MODE_HEADER_LENGTH; 28433 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28434 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 28435 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28436 device_specific = 28437 ((struct mode_header *)sense)->device_specific; 28438 } 28439 28440 if (rval != 0) { 28441 if ((un->un_f_cfg_is_atapi == TRUE) && (retry_flag)) { 28442 /* 28443 * For an Atapi Zip drive, observed the drive 28444 * reporting check condition for the first attempt. 28445 * Sense data indicating power on or bus device/reset. 28446 * Hence in case of failure need to try at least once 28447 * for Atapi devices. 28448 */ 28449 retry_flag = FALSE; 28450 kmem_free(sense, hdrlen); 28451 goto retry; 28452 } else { 28453 /* 28454 * Write protect mode sense failed; not all disks 28455 * understand this query. Return FALSE assuming that 28456 * these devices are not writable. 28457 */ 28458 rval = FALSE; 28459 } 28460 } else { 28461 if (device_specific & WRITE_PROTECT) { 28462 rval = TRUE; 28463 } else { 28464 rval = FALSE; 28465 } 28466 } 28467 kmem_free(sense, hdrlen); 28468 return (rval); 28469 } 28470 28471 28472 /* 28473 * Function: sr_volume_ctrl() 28474 * 28475 * Description: This routine is the driver entry point for handling CD-ROM 28476 * audio output volume ioctl requests. (CDROMVOLCTRL) 28477 * 28478 * Arguments: dev - the device 'dev_t' 28479 * data - pointer to user audio volume control structure 28480 * flag - this argument is a pass through to ddi_copyxxx() 28481 * directly from the mode argument of ioctl(). 28482 * 28483 * Return Code: the code returned by sd_send_scsi_cmd() 28484 * EFAULT if ddi_copyxxx() fails 28485 * ENXIO if fail ddi_get_soft_state 28486 * EINVAL if data pointer is NULL 28487 * 28488 */ 28489 28490 static int 28491 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28492 { 28493 struct sd_lun *un; 28494 struct cdrom_volctrl volume; 28495 struct cdrom_volctrl *vol = &volume; 28496 uchar_t *sense_page; 28497 uchar_t *select_page; 28498 uchar_t *sense; 28499 uchar_t *select; 28500 int sense_buflen; 28501 int select_buflen; 28502 int rval; 28503 28504 if (data == NULL) { 28505 return (EINVAL); 28506 } 28507 28508 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28509 (un->un_state == SD_STATE_OFFLINE)) { 28510 return (ENXIO); 28511 } 28512 28513 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28514 return (EFAULT); 28515 } 28516 28517 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28518 struct mode_header_grp2 *sense_mhp; 28519 struct mode_header_grp2 *select_mhp; 28520 int bd_len; 28521 28522 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28523 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28524 MODEPAGE_AUDIO_CTRL_LEN; 28525 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28526 select = kmem_zalloc(select_buflen, KM_SLEEP); 28527 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 28528 sense_buflen, MODEPAGE_AUDIO_CTRL, 28529 SD_PATH_STANDARD)) != 0) { 28530 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28531 "sr_volume_ctrl: Mode Sense Failed\n"); 28532 kmem_free(sense, sense_buflen); 28533 kmem_free(select, select_buflen); 28534 return (rval); 28535 } 28536 sense_mhp = (struct mode_header_grp2 *)sense; 28537 select_mhp = (struct mode_header_grp2 *)select; 28538 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28539 sense_mhp->bdesc_length_lo; 28540 if (bd_len > MODE_BLK_DESC_LENGTH) { 28541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28542 "sr_volume_ctrl: Mode Sense returned invalid " 28543 "block descriptor length\n"); 28544 kmem_free(sense, sense_buflen); 28545 kmem_free(select, select_buflen); 28546 return (EIO); 28547 } 28548 sense_page = (uchar_t *) 28549 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28550 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28551 select_mhp->length_msb = 0; 28552 select_mhp->length_lsb = 0; 28553 select_mhp->bdesc_length_hi = 0; 28554 select_mhp->bdesc_length_lo = 0; 28555 } else { 28556 struct mode_header *sense_mhp, *select_mhp; 28557 28558 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28559 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28560 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28561 select = kmem_zalloc(select_buflen, KM_SLEEP); 28562 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 28563 sense_buflen, MODEPAGE_AUDIO_CTRL, 28564 SD_PATH_STANDARD)) != 0) { 28565 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28566 "sr_volume_ctrl: Mode Sense Failed\n"); 28567 kmem_free(sense, sense_buflen); 28568 kmem_free(select, select_buflen); 28569 return (rval); 28570 } 28571 sense_mhp = (struct mode_header *)sense; 28572 select_mhp = (struct mode_header *)select; 28573 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28574 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28575 "sr_volume_ctrl: Mode Sense returned invalid " 28576 "block descriptor length\n"); 28577 kmem_free(sense, sense_buflen); 28578 kmem_free(select, select_buflen); 28579 return (EIO); 28580 } 28581 sense_page = (uchar_t *) 28582 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28583 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28584 select_mhp->length = 0; 28585 select_mhp->bdesc_length = 0; 28586 } 28587 /* 28588 * Note: An audio control data structure could be created and overlayed 28589 * on the following in place of the array indexing method implemented. 28590 */ 28591 28592 /* Build the select data for the user volume data */ 28593 select_page[0] = MODEPAGE_AUDIO_CTRL; 28594 select_page[1] = 0xE; 28595 /* Set the immediate bit */ 28596 select_page[2] = 0x04; 28597 /* Zero out reserved fields */ 28598 select_page[3] = 0x00; 28599 select_page[4] = 0x00; 28600 /* Return sense data for fields not to be modified */ 28601 select_page[5] = sense_page[5]; 28602 select_page[6] = sense_page[6]; 28603 select_page[7] = sense_page[7]; 28604 /* Set the user specified volume levels for channel 0 and 1 */ 28605 select_page[8] = 0x01; 28606 select_page[9] = vol->channel0; 28607 select_page[10] = 0x02; 28608 select_page[11] = vol->channel1; 28609 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28610 select_page[12] = sense_page[12]; 28611 select_page[13] = sense_page[13]; 28612 select_page[14] = sense_page[14]; 28613 select_page[15] = sense_page[15]; 28614 28615 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28616 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 28617 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28618 } else { 28619 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 28620 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28621 } 28622 28623 kmem_free(sense, sense_buflen); 28624 kmem_free(select, select_buflen); 28625 return (rval); 28626 } 28627 28628 28629 /* 28630 * Function: sr_read_sony_session_offset() 28631 * 28632 * Description: This routine is the driver entry point for handling CD-ROM 28633 * ioctl requests for session offset information. (CDROMREADOFFSET) 28634 * The address of the first track in the last session of a 28635 * multi-session CD-ROM is returned 28636 * 28637 * Note: This routine uses a vendor specific key value in the 28638 * command control field without implementing any vendor check here 28639 * or in the ioctl routine. 28640 * 28641 * Arguments: dev - the device 'dev_t' 28642 * data - pointer to an int to hold the requested address 28643 * flag - this argument is a pass through to ddi_copyxxx() 28644 * directly from the mode argument of ioctl(). 28645 * 28646 * Return Code: the code returned by sd_send_scsi_cmd() 28647 * EFAULT if ddi_copyxxx() fails 28648 * ENXIO if fail ddi_get_soft_state 28649 * EINVAL if data pointer is NULL 28650 */ 28651 28652 static int 28653 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28654 { 28655 struct sd_lun *un; 28656 struct uscsi_cmd *com; 28657 caddr_t buffer; 28658 char cdb[CDB_GROUP1]; 28659 int session_offset = 0; 28660 int rval; 28661 28662 if (data == NULL) { 28663 return (EINVAL); 28664 } 28665 28666 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28667 (un->un_state == SD_STATE_OFFLINE)) { 28668 return (ENXIO); 28669 } 28670 28671 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28672 bzero(cdb, CDB_GROUP1); 28673 cdb[0] = SCMD_READ_TOC; 28674 /* 28675 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28676 * (4 byte TOC response header + 8 byte response data) 28677 */ 28678 cdb[8] = SONY_SESSION_OFFSET_LEN; 28679 /* Byte 9 is the control byte. A vendor specific value is used */ 28680 cdb[9] = SONY_SESSION_OFFSET_KEY; 28681 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28682 com->uscsi_cdb = cdb; 28683 com->uscsi_cdblen = CDB_GROUP1; 28684 com->uscsi_bufaddr = buffer; 28685 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28686 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28687 28688 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 28689 UIO_SYSSPACE, SD_PATH_STANDARD); 28690 if (rval != 0) { 28691 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28692 kmem_free(com, sizeof (*com)); 28693 return (rval); 28694 } 28695 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28696 session_offset = 28697 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28698 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28699 /* 28700 * Offset returned offset in current lbasize block's. Convert to 28701 * 2k block's to return to the user 28702 */ 28703 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28704 session_offset >>= 2; 28705 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28706 session_offset >>= 1; 28707 } 28708 } 28709 28710 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28711 rval = EFAULT; 28712 } 28713 28714 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28715 kmem_free(com, sizeof (*com)); 28716 return (rval); 28717 } 28718 28719 28720 /* 28721 * Function: sd_wm_cache_constructor() 28722 * 28723 * Description: Cache Constructor for the wmap cache for the read/modify/write 28724 * devices. 28725 * 28726 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28727 * un - sd_lun structure for the device. 28728 * flag - the km flags passed to constructor 28729 * 28730 * Return Code: 0 on success. 28731 * -1 on failure. 28732 */ 28733 28734 /*ARGSUSED*/ 28735 static int 28736 sd_wm_cache_constructor(void *wm, void *un, int flags) 28737 { 28738 bzero(wm, sizeof (struct sd_w_map)); 28739 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28740 return (0); 28741 } 28742 28743 28744 /* 28745 * Function: sd_wm_cache_destructor() 28746 * 28747 * Description: Cache destructor for the wmap cache for the read/modify/write 28748 * devices. 28749 * 28750 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28751 * un - sd_lun structure for the device. 28752 */ 28753 /*ARGSUSED*/ 28754 static void 28755 sd_wm_cache_destructor(void *wm, void *un) 28756 { 28757 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28758 } 28759 28760 28761 /* 28762 * Function: sd_range_lock() 28763 * 28764 * Description: Lock the range of blocks specified as parameter to ensure 28765 * that read, modify write is atomic and no other i/o writes 28766 * to the same location. The range is specified in terms 28767 * of start and end blocks. Block numbers are the actual 28768 * media block numbers and not system. 28769 * 28770 * Arguments: un - sd_lun structure for the device. 28771 * startb - The starting block number 28772 * endb - The end block number 28773 * typ - type of i/o - simple/read_modify_write 28774 * 28775 * Return Code: wm - pointer to the wmap structure. 28776 * 28777 * Context: This routine can sleep. 28778 */ 28779 28780 static struct sd_w_map * 28781 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28782 { 28783 struct sd_w_map *wmp = NULL; 28784 struct sd_w_map *sl_wmp = NULL; 28785 struct sd_w_map *tmp_wmp; 28786 wm_state state = SD_WM_CHK_LIST; 28787 28788 28789 ASSERT(un != NULL); 28790 ASSERT(!mutex_owned(SD_MUTEX(un))); 28791 28792 mutex_enter(SD_MUTEX(un)); 28793 28794 while (state != SD_WM_DONE) { 28795 28796 switch (state) { 28797 case SD_WM_CHK_LIST: 28798 /* 28799 * This is the starting state. Check the wmap list 28800 * to see if the range is currently available. 28801 */ 28802 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28803 /* 28804 * If this is a simple write and no rmw 28805 * i/o is pending then try to lock the 28806 * range as the range should be available. 28807 */ 28808 state = SD_WM_LOCK_RANGE; 28809 } else { 28810 tmp_wmp = sd_get_range(un, startb, endb); 28811 if (tmp_wmp != NULL) { 28812 if ((wmp != NULL) && ONLIST(un, wmp)) { 28813 /* 28814 * Should not keep onlist wmps 28815 * while waiting this macro 28816 * will also do wmp = NULL; 28817 */ 28818 FREE_ONLIST_WMAP(un, wmp); 28819 } 28820 /* 28821 * sl_wmp is the wmap on which wait 28822 * is done, since the tmp_wmp points 28823 * to the inuse wmap, set sl_wmp to 28824 * tmp_wmp and change the state to sleep 28825 */ 28826 sl_wmp = tmp_wmp; 28827 state = SD_WM_WAIT_MAP; 28828 } else { 28829 state = SD_WM_LOCK_RANGE; 28830 } 28831 28832 } 28833 break; 28834 28835 case SD_WM_LOCK_RANGE: 28836 ASSERT(un->un_wm_cache); 28837 /* 28838 * The range need to be locked, try to get a wmap. 28839 * First attempt it with NO_SLEEP, want to avoid a sleep 28840 * if possible as we will have to release the sd mutex 28841 * if we have to sleep. 28842 */ 28843 if (wmp == NULL) 28844 wmp = kmem_cache_alloc(un->un_wm_cache, 28845 KM_NOSLEEP); 28846 if (wmp == NULL) { 28847 mutex_exit(SD_MUTEX(un)); 28848 _NOTE(DATA_READABLE_WITHOUT_LOCK 28849 (sd_lun::un_wm_cache)) 28850 wmp = kmem_cache_alloc(un->un_wm_cache, 28851 KM_SLEEP); 28852 mutex_enter(SD_MUTEX(un)); 28853 /* 28854 * we released the mutex so recheck and go to 28855 * check list state. 28856 */ 28857 state = SD_WM_CHK_LIST; 28858 } else { 28859 /* 28860 * We exit out of state machine since we 28861 * have the wmap. Do the housekeeping first. 28862 * place the wmap on the wmap list if it is not 28863 * on it already and then set the state to done. 28864 */ 28865 wmp->wm_start = startb; 28866 wmp->wm_end = endb; 28867 wmp->wm_flags = typ | SD_WM_BUSY; 28868 if (typ & SD_WTYPE_RMW) { 28869 un->un_rmw_count++; 28870 } 28871 /* 28872 * If not already on the list then link 28873 */ 28874 if (!ONLIST(un, wmp)) { 28875 wmp->wm_next = un->un_wm; 28876 wmp->wm_prev = NULL; 28877 if (wmp->wm_next) 28878 wmp->wm_next->wm_prev = wmp; 28879 un->un_wm = wmp; 28880 } 28881 state = SD_WM_DONE; 28882 } 28883 break; 28884 28885 case SD_WM_WAIT_MAP: 28886 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28887 /* 28888 * Wait is done on sl_wmp, which is set in the 28889 * check_list state. 28890 */ 28891 sl_wmp->wm_wanted_count++; 28892 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28893 sl_wmp->wm_wanted_count--; 28894 if (!(sl_wmp->wm_flags & SD_WM_BUSY)) { 28895 if (wmp != NULL) 28896 CHK_N_FREEWMP(un, wmp); 28897 wmp = sl_wmp; 28898 } 28899 sl_wmp = NULL; 28900 /* 28901 * After waking up, need to recheck for availability of 28902 * range. 28903 */ 28904 state = SD_WM_CHK_LIST; 28905 break; 28906 28907 default: 28908 panic("sd_range_lock: " 28909 "Unknown state %d in sd_range_lock", state); 28910 /*NOTREACHED*/ 28911 } /* switch(state) */ 28912 28913 } /* while(state != SD_WM_DONE) */ 28914 28915 mutex_exit(SD_MUTEX(un)); 28916 28917 ASSERT(wmp != NULL); 28918 28919 return (wmp); 28920 } 28921 28922 28923 /* 28924 * Function: sd_get_range() 28925 * 28926 * Description: Find if there any overlapping I/O to this one 28927 * Returns the write-map of 1st such I/O, NULL otherwise. 28928 * 28929 * Arguments: un - sd_lun structure for the device. 28930 * startb - The starting block number 28931 * endb - The end block number 28932 * 28933 * Return Code: wm - pointer to the wmap structure. 28934 */ 28935 28936 static struct sd_w_map * 28937 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28938 { 28939 struct sd_w_map *wmp; 28940 28941 ASSERT(un != NULL); 28942 28943 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28944 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28945 continue; 28946 } 28947 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28948 break; 28949 } 28950 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28951 break; 28952 } 28953 } 28954 28955 return (wmp); 28956 } 28957 28958 28959 /* 28960 * Function: sd_free_inlist_wmap() 28961 * 28962 * Description: Unlink and free a write map struct. 28963 * 28964 * Arguments: un - sd_lun structure for the device. 28965 * wmp - sd_w_map which needs to be unlinked. 28966 */ 28967 28968 static void 28969 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 28970 { 28971 ASSERT(un != NULL); 28972 28973 if (un->un_wm == wmp) { 28974 un->un_wm = wmp->wm_next; 28975 } else { 28976 wmp->wm_prev->wm_next = wmp->wm_next; 28977 } 28978 28979 if (wmp->wm_next) { 28980 wmp->wm_next->wm_prev = wmp->wm_prev; 28981 } 28982 28983 wmp->wm_next = wmp->wm_prev = NULL; 28984 28985 kmem_cache_free(un->un_wm_cache, wmp); 28986 } 28987 28988 28989 /* 28990 * Function: sd_range_unlock() 28991 * 28992 * Description: Unlock the range locked by wm. 28993 * Free write map if nobody else is waiting on it. 28994 * 28995 * Arguments: un - sd_lun structure for the device. 28996 * wmp - sd_w_map which needs to be unlinked. 28997 */ 28998 28999 static void 29000 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29001 { 29002 ASSERT(un != NULL); 29003 ASSERT(wm != NULL); 29004 ASSERT(!mutex_owned(SD_MUTEX(un))); 29005 29006 mutex_enter(SD_MUTEX(un)); 29007 29008 if (wm->wm_flags & SD_WTYPE_RMW) { 29009 un->un_rmw_count--; 29010 } 29011 29012 if (wm->wm_wanted_count) { 29013 wm->wm_flags = 0; 29014 /* 29015 * Broadcast that the wmap is available now. 29016 */ 29017 cv_broadcast(&wm->wm_avail); 29018 } else { 29019 /* 29020 * If no one is waiting on the map, it should be free'ed. 29021 */ 29022 sd_free_inlist_wmap(un, wm); 29023 } 29024 29025 mutex_exit(SD_MUTEX(un)); 29026 } 29027 29028 29029 /* 29030 * Function: sd_read_modify_write_task 29031 * 29032 * Description: Called from a taskq thread to initiate the write phase of 29033 * a read-modify-write request. This is used for targets where 29034 * un->un_sys_blocksize != un->un_tgt_blocksize. 29035 * 29036 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29037 * 29038 * Context: Called under taskq thread context. 29039 */ 29040 29041 static void 29042 sd_read_modify_write_task(void *arg) 29043 { 29044 struct sd_mapblocksize_info *bsp; 29045 struct buf *bp; 29046 struct sd_xbuf *xp; 29047 struct sd_lun *un; 29048 29049 bp = arg; /* The bp is given in arg */ 29050 ASSERT(bp != NULL); 29051 29052 /* Get the pointer to the layer-private data struct */ 29053 xp = SD_GET_XBUF(bp); 29054 ASSERT(xp != NULL); 29055 bsp = xp->xb_private; 29056 ASSERT(bsp != NULL); 29057 29058 un = SD_GET_UN(bp); 29059 ASSERT(un != NULL); 29060 ASSERT(!mutex_owned(SD_MUTEX(un))); 29061 29062 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29063 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29064 29065 /* 29066 * This is the write phase of a read-modify-write request, called 29067 * under the context of a taskq thread in response to the completion 29068 * of the read portion of the rmw request completing under interrupt 29069 * context. The write request must be sent from here down the iostart 29070 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29071 * we use the layer index saved in the layer-private data area. 29072 */ 29073 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29074 29075 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29076 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29077 } 29078 29079 29080 /* 29081 * Function: sddump_do_read_of_rmw() 29082 * 29083 * Description: This routine will be called from sddump, If sddump is called 29084 * with an I/O which not aligned on device blocksize boundary 29085 * then the write has to be converted to read-modify-write. 29086 * Do the read part here in order to keep sddump simple. 29087 * Note - That the sd_mutex is held across the call to this 29088 * routine. 29089 * 29090 * Arguments: un - sd_lun 29091 * blkno - block number in terms of media block size. 29092 * nblk - number of blocks. 29093 * bpp - pointer to pointer to the buf structure. On return 29094 * from this function, *bpp points to the valid buffer 29095 * to which the write has to be done. 29096 * 29097 * Return Code: 0 for success or errno-type return code 29098 */ 29099 29100 static int 29101 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29102 struct buf **bpp) 29103 { 29104 int err; 29105 int i; 29106 int rval; 29107 struct buf *bp; 29108 struct scsi_pkt *pkt = NULL; 29109 uint32_t target_blocksize; 29110 29111 ASSERT(un != NULL); 29112 ASSERT(mutex_owned(SD_MUTEX(un))); 29113 29114 target_blocksize = un->un_tgt_blocksize; 29115 29116 mutex_exit(SD_MUTEX(un)); 29117 29118 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29119 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29120 if (bp == NULL) { 29121 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29122 "no resources for dumping; giving up"); 29123 err = ENOMEM; 29124 goto done; 29125 } 29126 29127 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29128 blkno, nblk); 29129 if (rval != 0) { 29130 scsi_free_consistent_buf(bp); 29131 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29132 "no resources for dumping; giving up"); 29133 err = ENOMEM; 29134 goto done; 29135 } 29136 29137 pkt->pkt_flags |= FLAG_NOINTR; 29138 29139 err = EIO; 29140 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29141 29142 /* 29143 * Scsi_poll returns 0 (success) if the command completes and 29144 * the status block is STATUS_GOOD. We should only check 29145 * errors if this condition is not true. Even then we should 29146 * send our own request sense packet only if we have a check 29147 * condition and auto request sense has not been performed by 29148 * the hba. 29149 */ 29150 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29151 29152 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29153 err = 0; 29154 break; 29155 } 29156 29157 /* 29158 * Check CMD_DEV_GONE 1st, give up if device is gone, 29159 * no need to read RQS data. 29160 */ 29161 if (pkt->pkt_reason == CMD_DEV_GONE) { 29162 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29163 "Device is gone\n"); 29164 break; 29165 } 29166 29167 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29168 SD_INFO(SD_LOG_DUMP, un, 29169 "sddump: read failed with CHECK, try # %d\n", i); 29170 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29171 (void) sd_send_polled_RQS(un); 29172 } 29173 29174 continue; 29175 } 29176 29177 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29178 int reset_retval = 0; 29179 29180 SD_INFO(SD_LOG_DUMP, un, 29181 "sddump: read failed with BUSY, try # %d\n", i); 29182 29183 if (un->un_f_lun_reset_enabled == TRUE) { 29184 reset_retval = scsi_reset(SD_ADDRESS(un), 29185 RESET_LUN); 29186 } 29187 if (reset_retval == 0) { 29188 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29189 } 29190 (void) sd_send_polled_RQS(un); 29191 29192 } else { 29193 SD_INFO(SD_LOG_DUMP, un, 29194 "sddump: read failed with 0x%x, try # %d\n", 29195 SD_GET_PKT_STATUS(pkt), i); 29196 mutex_enter(SD_MUTEX(un)); 29197 sd_reset_target(un, pkt); 29198 mutex_exit(SD_MUTEX(un)); 29199 } 29200 29201 /* 29202 * If we are not getting anywhere with lun/target resets, 29203 * let's reset the bus. 29204 */ 29205 if (i > SD_NDUMP_RETRIES/2) { 29206 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29207 (void) sd_send_polled_RQS(un); 29208 } 29209 29210 } 29211 scsi_destroy_pkt(pkt); 29212 29213 if (err != 0) { 29214 scsi_free_consistent_buf(bp); 29215 *bpp = NULL; 29216 } else { 29217 *bpp = bp; 29218 } 29219 29220 done: 29221 mutex_enter(SD_MUTEX(un)); 29222 return (err); 29223 } 29224 29225 29226 /* 29227 * Function: sd_failfast_flushq 29228 * 29229 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29230 * in b_flags and move them onto the failfast queue, then kick 29231 * off a thread to return all bp's on the failfast queue to 29232 * their owners with an error set. 29233 * 29234 * Arguments: un - pointer to the soft state struct for the instance. 29235 * 29236 * Context: may execute in interrupt context. 29237 */ 29238 29239 static void 29240 sd_failfast_flushq(struct sd_lun *un) 29241 { 29242 struct buf *bp; 29243 struct buf *next_waitq_bp; 29244 struct buf *prev_waitq_bp = NULL; 29245 29246 ASSERT(un != NULL); 29247 ASSERT(mutex_owned(SD_MUTEX(un))); 29248 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29249 ASSERT(un->un_failfast_bp == NULL); 29250 29251 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29252 "sd_failfast_flushq: entry: un:0x%p\n", un); 29253 29254 /* 29255 * Check if we should flush all bufs when entering failfast state, or 29256 * just those with B_FAILFAST set. 29257 */ 29258 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29259 /* 29260 * Move *all* bp's on the wait queue to the failfast flush 29261 * queue, including those that do NOT have B_FAILFAST set. 29262 */ 29263 if (un->un_failfast_headp == NULL) { 29264 ASSERT(un->un_failfast_tailp == NULL); 29265 un->un_failfast_headp = un->un_waitq_headp; 29266 } else { 29267 ASSERT(un->un_failfast_tailp != NULL); 29268 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29269 } 29270 29271 un->un_failfast_tailp = un->un_waitq_tailp; 29272 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29273 29274 } else { 29275 /* 29276 * Go thru the wait queue, pick off all entries with 29277 * B_FAILFAST set, and move these onto the failfast queue. 29278 */ 29279 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29280 /* 29281 * Save the pointer to the next bp on the wait queue, 29282 * so we get to it on the next iteration of this loop. 29283 */ 29284 next_waitq_bp = bp->av_forw; 29285 29286 /* 29287 * If this bp from the wait queue does NOT have 29288 * B_FAILFAST set, just move on to the next element 29289 * in the wait queue. Note, this is the only place 29290 * where it is correct to set prev_waitq_bp. 29291 */ 29292 if ((bp->b_flags & B_FAILFAST) == 0) { 29293 prev_waitq_bp = bp; 29294 continue; 29295 } 29296 29297 /* 29298 * Remove the bp from the wait queue. 29299 */ 29300 if (bp == un->un_waitq_headp) { 29301 /* The bp is the first element of the waitq. */ 29302 un->un_waitq_headp = next_waitq_bp; 29303 if (un->un_waitq_headp == NULL) { 29304 /* The wait queue is now empty */ 29305 un->un_waitq_tailp = NULL; 29306 } 29307 } else { 29308 /* 29309 * The bp is either somewhere in the middle 29310 * or at the end of the wait queue. 29311 */ 29312 ASSERT(un->un_waitq_headp != NULL); 29313 ASSERT(prev_waitq_bp != NULL); 29314 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29315 == 0); 29316 if (bp == un->un_waitq_tailp) { 29317 /* bp is the last entry on the waitq. */ 29318 ASSERT(next_waitq_bp == NULL); 29319 un->un_waitq_tailp = prev_waitq_bp; 29320 } 29321 prev_waitq_bp->av_forw = next_waitq_bp; 29322 } 29323 bp->av_forw = NULL; 29324 29325 /* 29326 * Now put the bp onto the failfast queue. 29327 */ 29328 if (un->un_failfast_headp == NULL) { 29329 /* failfast queue is currently empty */ 29330 ASSERT(un->un_failfast_tailp == NULL); 29331 un->un_failfast_headp = 29332 un->un_failfast_tailp = bp; 29333 } else { 29334 /* Add the bp to the end of the failfast q */ 29335 ASSERT(un->un_failfast_tailp != NULL); 29336 ASSERT(un->un_failfast_tailp->b_flags & 29337 B_FAILFAST); 29338 un->un_failfast_tailp->av_forw = bp; 29339 un->un_failfast_tailp = bp; 29340 } 29341 } 29342 } 29343 29344 /* 29345 * Now return all bp's on the failfast queue to their owners. 29346 */ 29347 while ((bp = un->un_failfast_headp) != NULL) { 29348 29349 un->un_failfast_headp = bp->av_forw; 29350 if (un->un_failfast_headp == NULL) { 29351 un->un_failfast_tailp = NULL; 29352 } 29353 29354 /* 29355 * We want to return the bp with a failure error code, but 29356 * we do not want a call to sd_start_cmds() to occur here, 29357 * so use sd_return_failed_command_no_restart() instead of 29358 * sd_return_failed_command(). 29359 */ 29360 sd_return_failed_command_no_restart(un, bp, EIO); 29361 } 29362 29363 /* Flush the xbuf queues if required. */ 29364 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29365 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29366 } 29367 29368 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29369 "sd_failfast_flushq: exit: un:0x%p\n", un); 29370 } 29371 29372 29373 /* 29374 * Function: sd_failfast_flushq_callback 29375 * 29376 * Description: Return TRUE if the given bp meets the criteria for failfast 29377 * flushing. Used with ddi_xbuf_flushq(9F). 29378 * 29379 * Arguments: bp - ptr to buf struct to be examined. 29380 * 29381 * Context: Any 29382 */ 29383 29384 static int 29385 sd_failfast_flushq_callback(struct buf *bp) 29386 { 29387 /* 29388 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29389 * state is entered; OR (2) the given bp has B_FAILFAST set. 29390 */ 29391 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29392 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29393 } 29394 29395 29396 #if defined(__i386) || defined(__amd64) 29397 /* 29398 * Function: sd_setup_next_xfer 29399 * 29400 * Description: Prepare next I/O operation using DMA_PARTIAL 29401 * 29402 */ 29403 29404 static int 29405 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29406 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29407 { 29408 ssize_t num_blks_not_xfered; 29409 daddr_t strt_blk_num; 29410 ssize_t bytes_not_xfered; 29411 int rval; 29412 29413 ASSERT(pkt->pkt_resid == 0); 29414 29415 /* 29416 * Calculate next block number and amount to be transferred. 29417 * 29418 * How much data NOT transfered to the HBA yet. 29419 */ 29420 bytes_not_xfered = xp->xb_dma_resid; 29421 29422 /* 29423 * figure how many blocks NOT transfered to the HBA yet. 29424 */ 29425 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29426 29427 /* 29428 * set starting block number to the end of what WAS transfered. 29429 */ 29430 strt_blk_num = xp->xb_blkno + 29431 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29432 29433 /* 29434 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29435 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29436 * the disk mutex here. 29437 */ 29438 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29439 strt_blk_num, num_blks_not_xfered); 29440 29441 if (rval == 0) { 29442 29443 /* 29444 * Success. 29445 * 29446 * Adjust things if there are still more blocks to be 29447 * transfered. 29448 */ 29449 xp->xb_dma_resid = pkt->pkt_resid; 29450 pkt->pkt_resid = 0; 29451 29452 return (1); 29453 } 29454 29455 /* 29456 * There's really only one possible return value from 29457 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29458 * returns NULL. 29459 */ 29460 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29461 29462 bp->b_resid = bp->b_bcount; 29463 bp->b_flags |= B_ERROR; 29464 29465 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29466 "Error setting up next portion of DMA transfer\n"); 29467 29468 return (0); 29469 } 29470 #endif 29471 29472 /* 29473 * Note: The following sd_faultinjection_ioctl( ) routines implement 29474 * driver support for handling fault injection for error analysis 29475 * causing faults in multiple layers of the driver. 29476 * 29477 */ 29478 29479 #ifdef SD_FAULT_INJECTION 29480 static uint_t sd_fault_injection_on = 0; 29481 29482 /* 29483 * Function: sd_faultinjection_ioctl() 29484 * 29485 * Description: This routine is the driver entry point for handling 29486 * faultinjection ioctls to inject errors into the 29487 * layer model 29488 * 29489 * Arguments: cmd - the ioctl cmd recieved 29490 * arg - the arguments from user and returns 29491 */ 29492 29493 static void 29494 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29495 29496 uint_t i; 29497 uint_t rval; 29498 29499 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29500 29501 mutex_enter(SD_MUTEX(un)); 29502 29503 switch (cmd) { 29504 case SDIOCRUN: 29505 /* Allow pushed faults to be injected */ 29506 SD_INFO(SD_LOG_SDTEST, un, 29507 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29508 29509 sd_fault_injection_on = 1; 29510 29511 SD_INFO(SD_LOG_IOERR, un, 29512 "sd_faultinjection_ioctl: run finished\n"); 29513 break; 29514 29515 case SDIOCSTART: 29516 /* Start Injection Session */ 29517 SD_INFO(SD_LOG_SDTEST, un, 29518 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29519 29520 sd_fault_injection_on = 0; 29521 un->sd_injection_mask = 0xFFFFFFFF; 29522 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29523 un->sd_fi_fifo_pkt[i] = NULL; 29524 un->sd_fi_fifo_xb[i] = NULL; 29525 un->sd_fi_fifo_un[i] = NULL; 29526 un->sd_fi_fifo_arq[i] = NULL; 29527 } 29528 un->sd_fi_fifo_start = 0; 29529 un->sd_fi_fifo_end = 0; 29530 29531 mutex_enter(&(un->un_fi_mutex)); 29532 un->sd_fi_log[0] = '\0'; 29533 un->sd_fi_buf_len = 0; 29534 mutex_exit(&(un->un_fi_mutex)); 29535 29536 SD_INFO(SD_LOG_IOERR, un, 29537 "sd_faultinjection_ioctl: start finished\n"); 29538 break; 29539 29540 case SDIOCSTOP: 29541 /* Stop Injection Session */ 29542 SD_INFO(SD_LOG_SDTEST, un, 29543 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29544 sd_fault_injection_on = 0; 29545 un->sd_injection_mask = 0x0; 29546 29547 /* Empty stray or unuseds structs from fifo */ 29548 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29549 if (un->sd_fi_fifo_pkt[i] != NULL) { 29550 kmem_free(un->sd_fi_fifo_pkt[i], 29551 sizeof (struct sd_fi_pkt)); 29552 } 29553 if (un->sd_fi_fifo_xb[i] != NULL) { 29554 kmem_free(un->sd_fi_fifo_xb[i], 29555 sizeof (struct sd_fi_xb)); 29556 } 29557 if (un->sd_fi_fifo_un[i] != NULL) { 29558 kmem_free(un->sd_fi_fifo_un[i], 29559 sizeof (struct sd_fi_un)); 29560 } 29561 if (un->sd_fi_fifo_arq[i] != NULL) { 29562 kmem_free(un->sd_fi_fifo_arq[i], 29563 sizeof (struct sd_fi_arq)); 29564 } 29565 un->sd_fi_fifo_pkt[i] = NULL; 29566 un->sd_fi_fifo_un[i] = NULL; 29567 un->sd_fi_fifo_xb[i] = NULL; 29568 un->sd_fi_fifo_arq[i] = NULL; 29569 } 29570 un->sd_fi_fifo_start = 0; 29571 un->sd_fi_fifo_end = 0; 29572 29573 SD_INFO(SD_LOG_IOERR, un, 29574 "sd_faultinjection_ioctl: stop finished\n"); 29575 break; 29576 29577 case SDIOCINSERTPKT: 29578 /* Store a packet struct to be pushed onto fifo */ 29579 SD_INFO(SD_LOG_SDTEST, un, 29580 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29581 29582 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29583 29584 sd_fault_injection_on = 0; 29585 29586 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29587 if (un->sd_fi_fifo_pkt[i] != NULL) { 29588 kmem_free(un->sd_fi_fifo_pkt[i], 29589 sizeof (struct sd_fi_pkt)); 29590 } 29591 if (arg != NULL) { 29592 un->sd_fi_fifo_pkt[i] = 29593 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29594 if (un->sd_fi_fifo_pkt[i] == NULL) { 29595 /* Alloc failed don't store anything */ 29596 break; 29597 } 29598 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29599 sizeof (struct sd_fi_pkt), 0); 29600 if (rval == -1) { 29601 kmem_free(un->sd_fi_fifo_pkt[i], 29602 sizeof (struct sd_fi_pkt)); 29603 un->sd_fi_fifo_pkt[i] = NULL; 29604 } 29605 } else { 29606 SD_INFO(SD_LOG_IOERR, un, 29607 "sd_faultinjection_ioctl: pkt null\n"); 29608 } 29609 break; 29610 29611 case SDIOCINSERTXB: 29612 /* Store a xb struct to be pushed onto fifo */ 29613 SD_INFO(SD_LOG_SDTEST, un, 29614 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29615 29616 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29617 29618 sd_fault_injection_on = 0; 29619 29620 if (un->sd_fi_fifo_xb[i] != NULL) { 29621 kmem_free(un->sd_fi_fifo_xb[i], 29622 sizeof (struct sd_fi_xb)); 29623 un->sd_fi_fifo_xb[i] = NULL; 29624 } 29625 if (arg != NULL) { 29626 un->sd_fi_fifo_xb[i] = 29627 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29628 if (un->sd_fi_fifo_xb[i] == NULL) { 29629 /* Alloc failed don't store anything */ 29630 break; 29631 } 29632 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29633 sizeof (struct sd_fi_xb), 0); 29634 29635 if (rval == -1) { 29636 kmem_free(un->sd_fi_fifo_xb[i], 29637 sizeof (struct sd_fi_xb)); 29638 un->sd_fi_fifo_xb[i] = NULL; 29639 } 29640 } else { 29641 SD_INFO(SD_LOG_IOERR, un, 29642 "sd_faultinjection_ioctl: xb null\n"); 29643 } 29644 break; 29645 29646 case SDIOCINSERTUN: 29647 /* Store a un struct to be pushed onto fifo */ 29648 SD_INFO(SD_LOG_SDTEST, un, 29649 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29650 29651 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29652 29653 sd_fault_injection_on = 0; 29654 29655 if (un->sd_fi_fifo_un[i] != NULL) { 29656 kmem_free(un->sd_fi_fifo_un[i], 29657 sizeof (struct sd_fi_un)); 29658 un->sd_fi_fifo_un[i] = NULL; 29659 } 29660 if (arg != NULL) { 29661 un->sd_fi_fifo_un[i] = 29662 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29663 if (un->sd_fi_fifo_un[i] == NULL) { 29664 /* Alloc failed don't store anything */ 29665 break; 29666 } 29667 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29668 sizeof (struct sd_fi_un), 0); 29669 if (rval == -1) { 29670 kmem_free(un->sd_fi_fifo_un[i], 29671 sizeof (struct sd_fi_un)); 29672 un->sd_fi_fifo_un[i] = NULL; 29673 } 29674 29675 } else { 29676 SD_INFO(SD_LOG_IOERR, un, 29677 "sd_faultinjection_ioctl: un null\n"); 29678 } 29679 29680 break; 29681 29682 case SDIOCINSERTARQ: 29683 /* Store a arq struct to be pushed onto fifo */ 29684 SD_INFO(SD_LOG_SDTEST, un, 29685 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29686 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29687 29688 sd_fault_injection_on = 0; 29689 29690 if (un->sd_fi_fifo_arq[i] != NULL) { 29691 kmem_free(un->sd_fi_fifo_arq[i], 29692 sizeof (struct sd_fi_arq)); 29693 un->sd_fi_fifo_arq[i] = NULL; 29694 } 29695 if (arg != NULL) { 29696 un->sd_fi_fifo_arq[i] = 29697 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29698 if (un->sd_fi_fifo_arq[i] == NULL) { 29699 /* Alloc failed don't store anything */ 29700 break; 29701 } 29702 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29703 sizeof (struct sd_fi_arq), 0); 29704 if (rval == -1) { 29705 kmem_free(un->sd_fi_fifo_arq[i], 29706 sizeof (struct sd_fi_arq)); 29707 un->sd_fi_fifo_arq[i] = NULL; 29708 } 29709 29710 } else { 29711 SD_INFO(SD_LOG_IOERR, un, 29712 "sd_faultinjection_ioctl: arq null\n"); 29713 } 29714 29715 break; 29716 29717 case SDIOCPUSH: 29718 /* Push stored xb, pkt, un, and arq onto fifo */ 29719 sd_fault_injection_on = 0; 29720 29721 if (arg != NULL) { 29722 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29723 if (rval != -1 && 29724 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29725 un->sd_fi_fifo_end += i; 29726 } 29727 } else { 29728 SD_INFO(SD_LOG_IOERR, un, 29729 "sd_faultinjection_ioctl: push arg null\n"); 29730 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29731 un->sd_fi_fifo_end++; 29732 } 29733 } 29734 SD_INFO(SD_LOG_IOERR, un, 29735 "sd_faultinjection_ioctl: push to end=%d\n", 29736 un->sd_fi_fifo_end); 29737 break; 29738 29739 case SDIOCRETRIEVE: 29740 /* Return buffer of log from Injection session */ 29741 SD_INFO(SD_LOG_SDTEST, un, 29742 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29743 29744 sd_fault_injection_on = 0; 29745 29746 mutex_enter(&(un->un_fi_mutex)); 29747 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29748 un->sd_fi_buf_len+1, 0); 29749 mutex_exit(&(un->un_fi_mutex)); 29750 29751 if (rval == -1) { 29752 /* 29753 * arg is possibly invalid setting 29754 * it to NULL for return 29755 */ 29756 arg = NULL; 29757 } 29758 break; 29759 } 29760 29761 mutex_exit(SD_MUTEX(un)); 29762 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29763 " exit\n"); 29764 } 29765 29766 29767 /* 29768 * Function: sd_injection_log() 29769 * 29770 * Description: This routine adds buff to the already existing injection log 29771 * for retrieval via faultinjection_ioctl for use in fault 29772 * detection and recovery 29773 * 29774 * Arguments: buf - the string to add to the log 29775 */ 29776 29777 static void 29778 sd_injection_log(char *buf, struct sd_lun *un) 29779 { 29780 uint_t len; 29781 29782 ASSERT(un != NULL); 29783 ASSERT(buf != NULL); 29784 29785 mutex_enter(&(un->un_fi_mutex)); 29786 29787 len = min(strlen(buf), 255); 29788 /* Add logged value to Injection log to be returned later */ 29789 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29790 uint_t offset = strlen((char *)un->sd_fi_log); 29791 char *destp = (char *)un->sd_fi_log + offset; 29792 int i; 29793 for (i = 0; i < len; i++) { 29794 *destp++ = *buf++; 29795 } 29796 un->sd_fi_buf_len += len; 29797 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29798 } 29799 29800 mutex_exit(&(un->un_fi_mutex)); 29801 } 29802 29803 29804 /* 29805 * Function: sd_faultinjection() 29806 * 29807 * Description: This routine takes the pkt and changes its 29808 * content based on error injection scenerio. 29809 * 29810 * Arguments: pktp - packet to be changed 29811 */ 29812 29813 static void 29814 sd_faultinjection(struct scsi_pkt *pktp) 29815 { 29816 uint_t i; 29817 struct sd_fi_pkt *fi_pkt; 29818 struct sd_fi_xb *fi_xb; 29819 struct sd_fi_un *fi_un; 29820 struct sd_fi_arq *fi_arq; 29821 struct buf *bp; 29822 struct sd_xbuf *xb; 29823 struct sd_lun *un; 29824 29825 ASSERT(pktp != NULL); 29826 29827 /* pull bp xb and un from pktp */ 29828 bp = (struct buf *)pktp->pkt_private; 29829 xb = SD_GET_XBUF(bp); 29830 un = SD_GET_UN(bp); 29831 29832 ASSERT(un != NULL); 29833 29834 mutex_enter(SD_MUTEX(un)); 29835 29836 SD_TRACE(SD_LOG_SDTEST, un, 29837 "sd_faultinjection: entry Injection from sdintr\n"); 29838 29839 /* if injection is off return */ 29840 if (sd_fault_injection_on == 0 || 29841 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29842 mutex_exit(SD_MUTEX(un)); 29843 return; 29844 } 29845 29846 29847 /* take next set off fifo */ 29848 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29849 29850 fi_pkt = un->sd_fi_fifo_pkt[i]; 29851 fi_xb = un->sd_fi_fifo_xb[i]; 29852 fi_un = un->sd_fi_fifo_un[i]; 29853 fi_arq = un->sd_fi_fifo_arq[i]; 29854 29855 29856 /* set variables accordingly */ 29857 /* set pkt if it was on fifo */ 29858 if (fi_pkt != NULL) { 29859 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29860 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29861 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29862 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29863 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29864 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29865 29866 } 29867 29868 /* set xb if it was on fifo */ 29869 if (fi_xb != NULL) { 29870 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29871 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29872 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29873 SD_CONDSET(xb, xb, xb_victim_retry_count, 29874 "xb_victim_retry_count"); 29875 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29876 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29877 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29878 29879 /* copy in block data from sense */ 29880 if (fi_xb->xb_sense_data[0] != -1) { 29881 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29882 SENSE_LENGTH); 29883 } 29884 29885 /* copy in extended sense codes */ 29886 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 29887 "es_code"); 29888 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 29889 "es_key"); 29890 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 29891 "es_add_code"); 29892 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 29893 es_qual_code, "es_qual_code"); 29894 } 29895 29896 /* set un if it was on fifo */ 29897 if (fi_un != NULL) { 29898 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29899 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29900 SD_CONDSET(un, un, un_reset_retry_count, 29901 "un_reset_retry_count"); 29902 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29903 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29904 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29905 SD_CONDSET(un, un, un_f_geometry_is_valid, 29906 "un_f_geometry_is_valid"); 29907 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29908 "un_f_allow_bus_device_reset"); 29909 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29910 29911 } 29912 29913 /* copy in auto request sense if it was on fifo */ 29914 if (fi_arq != NULL) { 29915 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29916 } 29917 29918 /* free structs */ 29919 if (un->sd_fi_fifo_pkt[i] != NULL) { 29920 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29921 } 29922 if (un->sd_fi_fifo_xb[i] != NULL) { 29923 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29924 } 29925 if (un->sd_fi_fifo_un[i] != NULL) { 29926 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29927 } 29928 if (un->sd_fi_fifo_arq[i] != NULL) { 29929 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29930 } 29931 29932 /* 29933 * kmem_free does not gurantee to set to NULL 29934 * since we uses these to determine if we set 29935 * values or not lets confirm they are always 29936 * NULL after free 29937 */ 29938 un->sd_fi_fifo_pkt[i] = NULL; 29939 un->sd_fi_fifo_un[i] = NULL; 29940 un->sd_fi_fifo_xb[i] = NULL; 29941 un->sd_fi_fifo_arq[i] = NULL; 29942 29943 un->sd_fi_fifo_start++; 29944 29945 mutex_exit(SD_MUTEX(un)); 29946 29947 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29948 } 29949 29950 #endif /* SD_FAULT_INJECTION */ 29951