1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSI disk target driver. 31 */ 32 33 #include <sys/scsi/scsi.h> 34 #include <sys/dkbad.h> 35 #include <sys/dklabel.h> 36 #include <sys/dkio.h> 37 #include <sys/fdio.h> 38 #include <sys/cdio.h> 39 #include <sys/mhd.h> 40 #include <sys/vtoc.h> 41 #include <sys/dktp/fdisk.h> 42 #include <sys/file.h> 43 #include <sys/stat.h> 44 #include <sys/kstat.h> 45 #include <sys/vtrace.h> 46 #include <sys/note.h> 47 #include <sys/thread.h> 48 #include <sys/proc.h> 49 #include <sys/efi_partition.h> 50 #include <sys/var.h> 51 #include <sys/aio_req.h> 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 65 66 /* 67 * Loadable module info. 68 */ 69 #if (defined(__fibre)) 70 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 71 char _depends_on[] = "misc/scsi drv/fcp"; 72 #else 73 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 74 char _depends_on[] = "misc/scsi"; 75 #endif 76 77 /* 78 * Define the interconnect type, to allow the driver to distinguish 79 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 80 * 81 * This is really for backward compatability. In the future, the driver 82 * should actually check the "interconnect-type" property as reported by 83 * the HBA; however at present this property is not defined by all HBAs, 84 * so we will use this #define (1) to permit the driver to run in 85 * backward-compatability mode; and (2) to print a notification message 86 * if an FC HBA does not support the "interconnect-type" property. The 87 * behavior of the driver will be to assume parallel SCSI behaviors unless 88 * the "interconnect-type" property is defined by the HBA **AND** has a 89 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 90 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 91 * Channel behaviors (as per the old ssd). (Note that the 92 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 93 * will result in the driver assuming parallel SCSI behaviors.) 94 * 95 * (see common/sys/scsi/impl/services.h) 96 * 97 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 98 * since some FC HBAs may already support that, and there is some code in 99 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 100 * default would confuse that code, and besides things should work fine 101 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 102 * "interconnect_type" property. 103 */ 104 #if (defined(__fibre)) 105 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 106 #else 107 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 108 #endif 109 110 /* 111 * The name of the driver, established from the module name in _init. 112 */ 113 static char *sd_label = NULL; 114 115 /* 116 * Driver name is unfortunately prefixed on some driver.conf properties. 117 */ 118 #if (defined(__fibre)) 119 #define sd_max_xfer_size ssd_max_xfer_size 120 #define sd_config_list ssd_config_list 121 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 122 static char *sd_config_list = "ssd-config-list"; 123 #else 124 static char *sd_max_xfer_size = "sd_max_xfer_size"; 125 static char *sd_config_list = "sd-config-list"; 126 #endif 127 128 /* 129 * Driver global variables 130 */ 131 132 #if (defined(__fibre)) 133 /* 134 * These #defines are to avoid namespace collisions that occur because this 135 * code is currently used to compile two seperate driver modules: sd and ssd. 136 * All global variables need to be treated this way (even if declared static) 137 * in order to allow the debugger to resolve the names properly. 138 * It is anticipated that in the near future the ssd module will be obsoleted, 139 * at which time this namespace issue should go away. 140 */ 141 #define sd_state ssd_state 142 #define sd_io_time ssd_io_time 143 #define sd_failfast_enable ssd_failfast_enable 144 #define sd_ua_retry_count ssd_ua_retry_count 145 #define sd_report_pfa ssd_report_pfa 146 #define sd_max_throttle ssd_max_throttle 147 #define sd_min_throttle ssd_min_throttle 148 #define sd_rot_delay ssd_rot_delay 149 150 #define sd_retry_on_reservation_conflict \ 151 ssd_retry_on_reservation_conflict 152 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 153 #define sd_resv_conflict_name ssd_resv_conflict_name 154 155 #define sd_component_mask ssd_component_mask 156 #define sd_level_mask ssd_level_mask 157 #define sd_debug_un ssd_debug_un 158 #define sd_error_level ssd_error_level 159 160 #define sd_xbuf_active_limit ssd_xbuf_active_limit 161 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 162 163 #define sd_tr ssd_tr 164 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 165 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 166 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 167 #define sd_check_media_time ssd_check_media_time 168 #define sd_wait_cmds_complete ssd_wait_cmds_complete 169 #define sd_label_mutex ssd_label_mutex 170 #define sd_detach_mutex ssd_detach_mutex 171 #define sd_log_buf ssd_log_buf 172 #define sd_log_mutex ssd_log_mutex 173 174 #define sd_disk_table ssd_disk_table 175 #define sd_disk_table_size ssd_disk_table_size 176 #define sd_sense_mutex ssd_sense_mutex 177 #define sd_cdbtab ssd_cdbtab 178 179 #define sd_cb_ops ssd_cb_ops 180 #define sd_ops ssd_ops 181 #define sd_additional_codes ssd_additional_codes 182 183 #define sd_minor_data ssd_minor_data 184 #define sd_minor_data_efi ssd_minor_data_efi 185 186 #define sd_tq ssd_tq 187 #define sd_wmr_tq ssd_wmr_tq 188 #define sd_taskq_name ssd_taskq_name 189 #define sd_wmr_taskq_name ssd_wmr_taskq_name 190 #define sd_taskq_minalloc ssd_taskq_minalloc 191 #define sd_taskq_maxalloc ssd_taskq_maxalloc 192 193 #define sd_dump_format_string ssd_dump_format_string 194 195 #define sd_iostart_chain ssd_iostart_chain 196 #define sd_iodone_chain ssd_iodone_chain 197 198 #define sd_pm_idletime ssd_pm_idletime 199 200 #define sd_force_pm_supported ssd_force_pm_supported 201 202 #define sd_dtype_optical_bind ssd_dtype_optical_bind 203 204 #endif 205 206 207 #ifdef SDDEBUG 208 int sd_force_pm_supported = 0; 209 #endif /* SDDEBUG */ 210 211 void *sd_state = NULL; 212 int sd_io_time = SD_IO_TIME; 213 int sd_failfast_enable = 1; 214 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 215 int sd_report_pfa = 1; 216 int sd_max_throttle = SD_MAX_THROTTLE; 217 int sd_min_throttle = SD_MIN_THROTTLE; 218 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 219 int sd_qfull_throttle_enable = TRUE; 220 221 int sd_retry_on_reservation_conflict = 1; 222 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 223 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 224 225 static int sd_dtype_optical_bind = -1; 226 227 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 228 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 229 230 /* 231 * Global data for debug logging. To enable debug printing, sd_component_mask 232 * and sd_level_mask should be set to the desired bit patterns as outlined in 233 * sddef.h. 234 */ 235 uint_t sd_component_mask = 0x0; 236 uint_t sd_level_mask = 0x0; 237 struct sd_lun *sd_debug_un = NULL; 238 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 239 240 /* Note: these may go away in the future... */ 241 static uint32_t sd_xbuf_active_limit = 512; 242 static uint32_t sd_xbuf_reserve_limit = 16; 243 244 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 245 246 /* 247 * Timer value used to reset the throttle after it has been reduced 248 * (typically in response to TRAN_BUSY or STATUS_QFULL) 249 */ 250 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 251 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 252 253 /* 254 * Interval value associated with the media change scsi watch. 255 */ 256 static int sd_check_media_time = 3000000; 257 258 /* 259 * Wait value used for in progress operations during a DDI_SUSPEND 260 */ 261 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 262 263 /* 264 * sd_label_mutex protects a static buffer used in the disk label 265 * component of the driver 266 */ 267 static kmutex_t sd_label_mutex; 268 269 /* 270 * sd_detach_mutex protects un_layer_count, un_detach_count, and 271 * un_opens_in_progress in the sd_lun structure. 272 */ 273 static kmutex_t sd_detach_mutex; 274 275 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 276 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 277 278 /* 279 * Global buffer and mutex for debug logging 280 */ 281 static char sd_log_buf[1024]; 282 static kmutex_t sd_log_mutex; 283 284 285 /* 286 * "Smart" Probe Caching structs, globals, #defines, etc. 287 * For parallel scsi and non-self-identify device only. 288 */ 289 290 /* 291 * The following resources and routines are implemented to support 292 * "smart" probing, which caches the scsi_probe() results in an array, 293 * in order to help avoid long probe times. 294 */ 295 struct sd_scsi_probe_cache { 296 struct sd_scsi_probe_cache *next; 297 dev_info_t *pdip; 298 int cache[NTARGETS_WIDE]; 299 }; 300 301 static kmutex_t sd_scsi_probe_cache_mutex; 302 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 303 304 /* 305 * Really we only need protection on the head of the linked list, but 306 * better safe than sorry. 307 */ 308 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 309 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 312 sd_scsi_probe_cache_head)) 313 314 315 /* 316 * Vendor specific data name property declarations 317 */ 318 319 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 320 321 static sd_tunables seagate_properties = { 322 SEAGATE_THROTTLE_VALUE, 323 0, 324 0, 325 0, 326 0, 327 0, 328 0, 329 0, 330 0 331 }; 332 333 334 static sd_tunables fujitsu_properties = { 335 FUJITSU_THROTTLE_VALUE, 336 0, 337 0, 338 0, 339 0, 340 0, 341 0, 342 0, 343 0 344 }; 345 346 static sd_tunables ibm_properties = { 347 IBM_THROTTLE_VALUE, 348 0, 349 0, 350 0, 351 0, 352 0, 353 0, 354 0, 355 0 356 }; 357 358 static sd_tunables purple_properties = { 359 PURPLE_THROTTLE_VALUE, 360 0, 361 0, 362 PURPLE_BUSY_RETRIES, 363 PURPLE_RESET_RETRY_COUNT, 364 PURPLE_RESERVE_RELEASE_TIME, 365 0, 366 0, 367 0 368 }; 369 370 static sd_tunables sve_properties = { 371 SVE_THROTTLE_VALUE, 372 0, 373 0, 374 SVE_BUSY_RETRIES, 375 SVE_RESET_RETRY_COUNT, 376 SVE_RESERVE_RELEASE_TIME, 377 SVE_MIN_THROTTLE_VALUE, 378 SVE_DISKSORT_DISABLED_FLAG, 379 0 380 }; 381 382 static sd_tunables maserati_properties = { 383 0, 384 0, 385 0, 386 0, 387 0, 388 0, 389 0, 390 MASERATI_DISKSORT_DISABLED_FLAG, 391 MASERATI_LUN_RESET_ENABLED_FLAG 392 }; 393 394 static sd_tunables pirus_properties = { 395 PIRUS_THROTTLE_VALUE, 396 0, 397 PIRUS_NRR_COUNT, 398 PIRUS_BUSY_RETRIES, 399 PIRUS_RESET_RETRY_COUNT, 400 0, 401 PIRUS_MIN_THROTTLE_VALUE, 402 PIRUS_DISKSORT_DISABLED_FLAG, 403 PIRUS_LUN_RESET_ENABLED_FLAG 404 }; 405 406 #endif 407 408 #if (defined(__sparc) && !defined(__fibre)) || \ 409 (defined(__i386) || defined(__amd64)) 410 411 412 static sd_tunables elite_properties = { 413 ELITE_THROTTLE_VALUE, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0 422 }; 423 424 static sd_tunables st31200n_properties = { 425 ST31200N_THROTTLE_VALUE, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 0 434 }; 435 436 #endif /* Fibre or not */ 437 438 static sd_tunables lsi_properties_scsi = { 439 LSI_THROTTLE_VALUE, 440 0, 441 LSI_NOTREADY_RETRIES, 442 0, 443 0, 444 0, 445 0, 446 0, 447 0 448 }; 449 450 static sd_tunables symbios_properties = { 451 SYMBIOS_THROTTLE_VALUE, 452 0, 453 SYMBIOS_NOTREADY_RETRIES, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0 460 }; 461 462 static sd_tunables lsi_properties = { 463 0, 464 0, 465 LSI_NOTREADY_RETRIES, 466 0, 467 0, 468 0, 469 0, 470 0, 471 0 472 }; 473 474 static sd_tunables lsi_oem_properties = { 475 0, 476 0, 477 LSI_OEM_NOTREADY_RETRIES, 478 0, 479 0, 480 0, 481 0, 482 0, 483 0 484 }; 485 486 487 488 #if (defined(SD_PROP_TST)) 489 490 #define SD_TST_CTYPE_VAL CTYPE_CDROM 491 #define SD_TST_THROTTLE_VAL 16 492 #define SD_TST_NOTREADY_VAL 12 493 #define SD_TST_BUSY_VAL 60 494 #define SD_TST_RST_RETRY_VAL 36 495 #define SD_TST_RSV_REL_TIME 60 496 497 static sd_tunables tst_properties = { 498 SD_TST_THROTTLE_VAL, 499 SD_TST_CTYPE_VAL, 500 SD_TST_NOTREADY_VAL, 501 SD_TST_BUSY_VAL, 502 SD_TST_RST_RETRY_VAL, 503 SD_TST_RSV_REL_TIME, 504 0, 505 0, 506 0 507 }; 508 #endif 509 510 /* This is similiar to the ANSI toupper implementation */ 511 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 512 513 /* 514 * Static Driver Configuration Table 515 * 516 * This is the table of disks which need throttle adjustment (or, perhaps 517 * something else as defined by the flags at a future time.) device_id 518 * is a string consisting of concatenated vid (vendor), pid (product/model) 519 * and revision strings as defined in the scsi_inquiry structure. Offsets of 520 * the parts of the string are as defined by the sizes in the scsi_inquiry 521 * structure. Device type is searched as far as the device_id string is 522 * defined. Flags defines which values are to be set in the driver from the 523 * properties list. 524 * 525 * Entries below which begin and end with a "*" are a special case. 526 * These do not have a specific vendor, and the string which follows 527 * can appear anywhere in the 16 byte PID portion of the inquiry data. 528 * 529 * Entries below which begin and end with a " " (blank) are a special 530 * case. The comparison function will treat multiple consecutive blanks 531 * as equivalent to a single blank. For example, this causes a 532 * sd_disk_table entry of " NEC CDROM " to match a device's id string 533 * of "NEC CDROM". 534 * 535 * Note: The MD21 controller type has been obsoleted. 536 * ST318202F is a Legacy device 537 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 538 * made with an FC connection. The entries here are a legacy. 539 */ 540 static sd_disk_config_t sd_disk_table[] = { 541 #if defined(__fibre) || defined(__i386) || defined(__amd64) 542 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 543 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 544 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 545 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 546 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 547 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 548 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 549 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 550 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 551 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 552 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 553 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 554 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 555 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 556 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 557 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 558 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 559 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 560 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 561 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 562 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 563 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 564 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 565 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 566 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 567 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 568 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 569 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 570 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 571 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 572 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 573 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 574 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 575 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 576 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 577 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 578 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 579 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 580 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 581 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 582 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 583 { "SUN T3", SD_CONF_BSET_THROTTLE | 584 SD_CONF_BSET_BSY_RETRY_COUNT| 585 SD_CONF_BSET_RST_RETRIES| 586 SD_CONF_BSET_RSV_REL_TIME, 587 &purple_properties }, 588 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 589 SD_CONF_BSET_BSY_RETRY_COUNT| 590 SD_CONF_BSET_RST_RETRIES| 591 SD_CONF_BSET_RSV_REL_TIME| 592 SD_CONF_BSET_MIN_THROTTLE| 593 SD_CONF_BSET_DISKSORT_DISABLED, 594 &sve_properties }, 595 { "SUN T4", SD_CONF_BSET_THROTTLE | 596 SD_CONF_BSET_BSY_RETRY_COUNT| 597 SD_CONF_BSET_RST_RETRIES| 598 SD_CONF_BSET_RSV_REL_TIME, 599 &purple_properties }, 600 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 601 SD_CONF_BSET_LUN_RESET_ENABLED, 602 &maserati_properties }, 603 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 604 SD_CONF_BSET_NRR_COUNT| 605 SD_CONF_BSET_BSY_RETRY_COUNT| 606 SD_CONF_BSET_RST_RETRIES| 607 SD_CONF_BSET_MIN_THROTTLE| 608 SD_CONF_BSET_DISKSORT_DISABLED| 609 SD_CONF_BSET_LUN_RESET_ENABLED, 610 &pirus_properties }, 611 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 612 SD_CONF_BSET_NRR_COUNT| 613 SD_CONF_BSET_BSY_RETRY_COUNT| 614 SD_CONF_BSET_RST_RETRIES| 615 SD_CONF_BSET_MIN_THROTTLE| 616 SD_CONF_BSET_DISKSORT_DISABLED| 617 SD_CONF_BSET_LUN_RESET_ENABLED, 618 &pirus_properties }, 619 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 620 SD_CONF_BSET_NRR_COUNT| 621 SD_CONF_BSET_BSY_RETRY_COUNT| 622 SD_CONF_BSET_RST_RETRIES| 623 SD_CONF_BSET_MIN_THROTTLE| 624 SD_CONF_BSET_DISKSORT_DISABLED| 625 SD_CONF_BSET_LUN_RESET_ENABLED, 626 &pirus_properties }, 627 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 632 #endif /* fibre or NON-sparc platforms */ 633 #if ((defined(__sparc) && !defined(__fibre)) ||\ 634 (defined(__i386) || defined(__amd64))) 635 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 636 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 637 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 638 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 639 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 640 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 641 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 642 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 643 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 644 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 645 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 646 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 647 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 648 &symbios_properties }, 649 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 650 &lsi_properties_scsi }, 651 #if defined(__i386) || defined(__amd64) 652 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 653 | SD_CONF_BSET_READSUB_BCD 654 | SD_CONF_BSET_READ_TOC_ADDR_BCD 655 | SD_CONF_BSET_NO_READ_HEADER 656 | SD_CONF_BSET_READ_CD_XD4), NULL }, 657 658 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 659 | SD_CONF_BSET_READSUB_BCD 660 | SD_CONF_BSET_READ_TOC_ADDR_BCD 661 | SD_CONF_BSET_NO_READ_HEADER 662 | SD_CONF_BSET_READ_CD_XD4), NULL }, 663 #endif /* __i386 || __amd64 */ 664 #endif /* sparc NON-fibre or NON-sparc platforms */ 665 666 #if (defined(SD_PROP_TST)) 667 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 668 | SD_CONF_BSET_CTYPE 669 | SD_CONF_BSET_NRR_COUNT 670 | SD_CONF_BSET_FAB_DEVID 671 | SD_CONF_BSET_NOCACHE 672 | SD_CONF_BSET_BSY_RETRY_COUNT 673 | SD_CONF_BSET_PLAYMSF_BCD 674 | SD_CONF_BSET_READSUB_BCD 675 | SD_CONF_BSET_READ_TOC_TRK_BCD 676 | SD_CONF_BSET_READ_TOC_ADDR_BCD 677 | SD_CONF_BSET_NO_READ_HEADER 678 | SD_CONF_BSET_READ_CD_XD4 679 | SD_CONF_BSET_RST_RETRIES 680 | SD_CONF_BSET_RSV_REL_TIME 681 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 682 #endif 683 }; 684 685 static const int sd_disk_table_size = 686 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 687 688 689 /* 690 * Return codes of sd_uselabel(). 691 */ 692 #define SD_LABEL_IS_VALID 0 693 #define SD_LABEL_IS_INVALID 1 694 695 #define SD_INTERCONNECT_PARALLEL 0 696 #define SD_INTERCONNECT_FABRIC 1 697 #define SD_INTERCONNECT_FIBRE 2 698 #define SD_INTERCONNECT_SSA 3 699 #define SD_IS_PARALLEL_SCSI(un) \ 700 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 701 702 /* 703 * Definitions used by device id registration routines 704 */ 705 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 706 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 707 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 708 #define WD_NODE 7 /* the whole disk minor */ 709 710 static kmutex_t sd_sense_mutex = {0}; 711 712 /* 713 * Macros for updates of the driver state 714 */ 715 #define New_state(un, s) \ 716 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 717 #define Restore_state(un) \ 718 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 719 720 static struct sd_cdbinfo sd_cdbtab[] = { 721 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 722 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 723 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 724 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 725 }; 726 727 /* 728 * Specifies the number of seconds that must have elapsed since the last 729 * cmd. has completed for a device to be declared idle to the PM framework. 730 */ 731 static int sd_pm_idletime = 1; 732 733 /* 734 * Internal function prototypes 735 */ 736 737 #if (defined(__fibre)) 738 /* 739 * These #defines are to avoid namespace collisions that occur because this 740 * code is currently used to compile two seperate driver modules: sd and ssd. 741 * All function names need to be treated this way (even if declared static) 742 * in order to allow the debugger to resolve the names properly. 743 * It is anticipated that in the near future the ssd module will be obsoleted, 744 * at which time this ugliness should go away. 745 */ 746 #define sd_log_trace ssd_log_trace 747 #define sd_log_info ssd_log_info 748 #define sd_log_err ssd_log_err 749 #define sdprobe ssdprobe 750 #define sdinfo ssdinfo 751 #define sd_prop_op ssd_prop_op 752 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 753 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 754 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 755 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 756 #define sd_spin_up_unit ssd_spin_up_unit 757 #define sd_enable_descr_sense ssd_enable_descr_sense 758 #define sd_set_mmc_caps ssd_set_mmc_caps 759 #define sd_read_unit_properties ssd_read_unit_properties 760 #define sd_process_sdconf_file ssd_process_sdconf_file 761 #define sd_process_sdconf_table ssd_process_sdconf_table 762 #define sd_sdconf_id_match ssd_sdconf_id_match 763 #define sd_blank_cmp ssd_blank_cmp 764 #define sd_chk_vers1_data ssd_chk_vers1_data 765 #define sd_set_vers1_properties ssd_set_vers1_properties 766 #define sd_validate_geometry ssd_validate_geometry 767 768 #if defined(_SUNOS_VTOC_16) 769 #define sd_convert_geometry ssd_convert_geometry 770 #endif 771 772 #define sd_resync_geom_caches ssd_resync_geom_caches 773 #define sd_read_fdisk ssd_read_fdisk 774 #define sd_get_physical_geometry ssd_get_physical_geometry 775 #define sd_get_virtual_geometry ssd_get_virtual_geometry 776 #define sd_update_block_info ssd_update_block_info 777 #define sd_swap_efi_gpt ssd_swap_efi_gpt 778 #define sd_swap_efi_gpe ssd_swap_efi_gpe 779 #define sd_validate_efi ssd_validate_efi 780 #define sd_use_efi ssd_use_efi 781 #define sd_uselabel ssd_uselabel 782 #define sd_build_default_label ssd_build_default_label 783 #define sd_has_max_chs_vals ssd_has_max_chs_vals 784 #define sd_inq_fill ssd_inq_fill 785 #define sd_register_devid ssd_register_devid 786 #define sd_get_devid_block ssd_get_devid_block 787 #define sd_get_devid ssd_get_devid 788 #define sd_create_devid ssd_create_devid 789 #define sd_write_deviceid ssd_write_deviceid 790 #define sd_check_vpd_page_support ssd_check_vpd_page_support 791 #define sd_setup_pm ssd_setup_pm 792 #define sd_create_pm_components ssd_create_pm_components 793 #define sd_ddi_suspend ssd_ddi_suspend 794 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 795 #define sd_ddi_resume ssd_ddi_resume 796 #define sd_ddi_pm_resume ssd_ddi_pm_resume 797 #define sdpower ssdpower 798 #define sdattach ssdattach 799 #define sddetach ssddetach 800 #define sd_unit_attach ssd_unit_attach 801 #define sd_unit_detach ssd_unit_detach 802 #define sd_create_minor_nodes ssd_create_minor_nodes 803 #define sd_create_errstats ssd_create_errstats 804 #define sd_set_errstats ssd_set_errstats 805 #define sd_set_pstats ssd_set_pstats 806 #define sddump ssddump 807 #define sd_scsi_poll ssd_scsi_poll 808 #define sd_send_polled_RQS ssd_send_polled_RQS 809 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 810 #define sd_init_event_callbacks ssd_init_event_callbacks 811 #define sd_event_callback ssd_event_callback 812 #define sd_disable_caching ssd_disable_caching 813 #define sd_make_device ssd_make_device 814 #define sdopen ssdopen 815 #define sdclose ssdclose 816 #define sd_ready_and_valid ssd_ready_and_valid 817 #define sdmin ssdmin 818 #define sdread ssdread 819 #define sdwrite ssdwrite 820 #define sdaread ssdaread 821 #define sdawrite ssdawrite 822 #define sdstrategy ssdstrategy 823 #define sdioctl ssdioctl 824 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 825 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 826 #define sd_checksum_iostart ssd_checksum_iostart 827 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 828 #define sd_pm_iostart ssd_pm_iostart 829 #define sd_core_iostart ssd_core_iostart 830 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 831 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 832 #define sd_checksum_iodone ssd_checksum_iodone 833 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 834 #define sd_pm_iodone ssd_pm_iodone 835 #define sd_initpkt_for_buf ssd_initpkt_for_buf 836 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 837 #define sd_setup_rw_pkt ssd_setup_rw_pkt 838 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 839 #define sd_buf_iodone ssd_buf_iodone 840 #define sd_uscsi_strategy ssd_uscsi_strategy 841 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 842 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 843 #define sd_uscsi_iodone ssd_uscsi_iodone 844 #define sd_xbuf_strategy ssd_xbuf_strategy 845 #define sd_xbuf_init ssd_xbuf_init 846 #define sd_pm_entry ssd_pm_entry 847 #define sd_pm_exit ssd_pm_exit 848 849 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 850 #define sd_pm_timeout_handler ssd_pm_timeout_handler 851 852 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 853 #define sdintr ssdintr 854 #define sd_start_cmds ssd_start_cmds 855 #define sd_send_scsi_cmd ssd_send_scsi_cmd 856 #define sd_bioclone_alloc ssd_bioclone_alloc 857 #define sd_bioclone_free ssd_bioclone_free 858 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 859 #define sd_shadow_buf_free ssd_shadow_buf_free 860 #define sd_print_transport_rejected_message \ 861 ssd_print_transport_rejected_message 862 #define sd_retry_command ssd_retry_command 863 #define sd_set_retry_bp ssd_set_retry_bp 864 #define sd_send_request_sense_command ssd_send_request_sense_command 865 #define sd_start_retry_command ssd_start_retry_command 866 #define sd_start_direct_priority_command \ 867 ssd_start_direct_priority_command 868 #define sd_return_failed_command ssd_return_failed_command 869 #define sd_return_failed_command_no_restart \ 870 ssd_return_failed_command_no_restart 871 #define sd_return_command ssd_return_command 872 #define sd_sync_with_callback ssd_sync_with_callback 873 #define sdrunout ssdrunout 874 #define sd_mark_rqs_busy ssd_mark_rqs_busy 875 #define sd_mark_rqs_idle ssd_mark_rqs_idle 876 #define sd_reduce_throttle ssd_reduce_throttle 877 #define sd_restore_throttle ssd_restore_throttle 878 #define sd_print_incomplete_msg ssd_print_incomplete_msg 879 #define sd_init_cdb_limits ssd_init_cdb_limits 880 #define sd_pkt_status_good ssd_pkt_status_good 881 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 882 #define sd_pkt_status_busy ssd_pkt_status_busy 883 #define sd_pkt_status_reservation_conflict \ 884 ssd_pkt_status_reservation_conflict 885 #define sd_pkt_status_qfull ssd_pkt_status_qfull 886 #define sd_handle_request_sense ssd_handle_request_sense 887 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 888 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 889 #define sd_validate_sense_data ssd_validate_sense_data 890 #define sd_decode_sense ssd_decode_sense 891 #define sd_print_sense_msg ssd_print_sense_msg 892 #define sd_extract_sense_info_descr ssd_extract_sense_info_descr 893 #define sd_sense_key_no_sense ssd_sense_key_no_sense 894 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 895 #define sd_sense_key_not_ready ssd_sense_key_not_ready 896 #define sd_sense_key_medium_or_hardware_error \ 897 ssd_sense_key_medium_or_hardware_error 898 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 899 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 900 #define sd_sense_key_fail_command ssd_sense_key_fail_command 901 #define sd_sense_key_blank_check ssd_sense_key_blank_check 902 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 903 #define sd_sense_key_default ssd_sense_key_default 904 #define sd_print_retry_msg ssd_print_retry_msg 905 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 906 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 907 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 908 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 909 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 910 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 911 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 912 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 913 #define sd_pkt_reason_default ssd_pkt_reason_default 914 #define sd_reset_target ssd_reset_target 915 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 916 #define sd_start_stop_unit_task ssd_start_stop_unit_task 917 #define sd_taskq_create ssd_taskq_create 918 #define sd_taskq_delete ssd_taskq_delete 919 #define sd_media_change_task ssd_media_change_task 920 #define sd_handle_mchange ssd_handle_mchange 921 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 922 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 923 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 924 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 925 #define sd_send_scsi_feature_GET_CONFIGURATION \ 926 sd_send_scsi_feature_GET_CONFIGURATION 927 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 928 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 929 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 930 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 931 ssd_send_scsi_PERSISTENT_RESERVE_IN 932 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 933 ssd_send_scsi_PERSISTENT_RESERVE_OUT 934 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 935 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 936 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 937 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 938 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 939 #define sd_alloc_rqs ssd_alloc_rqs 940 #define sd_free_rqs ssd_free_rqs 941 #define sd_dump_memory ssd_dump_memory 942 #define sd_uscsi_ioctl ssd_uscsi_ioctl 943 #define sd_get_media_info ssd_get_media_info 944 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 945 #define sd_dkio_get_geometry ssd_dkio_get_geometry 946 #define sd_dkio_set_geometry ssd_dkio_set_geometry 947 #define sd_dkio_get_partition ssd_dkio_get_partition 948 #define sd_dkio_set_partition ssd_dkio_set_partition 949 #define sd_dkio_partition ssd_dkio_partition 950 #define sd_dkio_get_vtoc ssd_dkio_get_vtoc 951 #define sd_dkio_get_efi ssd_dkio_get_efi 952 #define sd_build_user_vtoc ssd_build_user_vtoc 953 #define sd_dkio_set_vtoc ssd_dkio_set_vtoc 954 #define sd_dkio_set_efi ssd_dkio_set_efi 955 #define sd_build_label_vtoc ssd_build_label_vtoc 956 #define sd_write_label ssd_write_label 957 #define sd_clear_vtoc ssd_clear_vtoc 958 #define sd_clear_efi ssd_clear_efi 959 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 960 #define sd_setup_next_xfer ssd_setup_next_xfer 961 #define sd_dkio_get_temp ssd_dkio_get_temp 962 #define sd_dkio_get_mboot ssd_dkio_get_mboot 963 #define sd_dkio_set_mboot ssd_dkio_set_mboot 964 #define sd_setup_default_geometry ssd_setup_default_geometry 965 #define sd_update_fdisk_and_vtoc ssd_update_fdisk_and_vtoc 966 #define sd_check_mhd ssd_check_mhd 967 #define sd_mhd_watch_cb ssd_mhd_watch_cb 968 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 969 #define sd_sname ssd_sname 970 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 971 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 972 #define sd_take_ownership ssd_take_ownership 973 #define sd_reserve_release ssd_reserve_release 974 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 975 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 976 #define sd_persistent_reservation_in_read_keys \ 977 ssd_persistent_reservation_in_read_keys 978 #define sd_persistent_reservation_in_read_resv \ 979 ssd_persistent_reservation_in_read_resv 980 #define sd_mhdioc_takeown ssd_mhdioc_takeown 981 #define sd_mhdioc_failfast ssd_mhdioc_failfast 982 #define sd_mhdioc_release ssd_mhdioc_release 983 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 984 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 985 #define sd_mhdioc_inresv ssd_mhdioc_inresv 986 #define sr_change_blkmode ssr_change_blkmode 987 #define sr_change_speed ssr_change_speed 988 #define sr_atapi_change_speed ssr_atapi_change_speed 989 #define sr_pause_resume ssr_pause_resume 990 #define sr_play_msf ssr_play_msf 991 #define sr_play_trkind ssr_play_trkind 992 #define sr_read_all_subcodes ssr_read_all_subcodes 993 #define sr_read_subchannel ssr_read_subchannel 994 #define sr_read_tocentry ssr_read_tocentry 995 #define sr_read_tochdr ssr_read_tochdr 996 #define sr_read_cdda ssr_read_cdda 997 #define sr_read_cdxa ssr_read_cdxa 998 #define sr_read_mode1 ssr_read_mode1 999 #define sr_read_mode2 ssr_read_mode2 1000 #define sr_read_cd_mode2 ssr_read_cd_mode2 1001 #define sr_sector_mode ssr_sector_mode 1002 #define sr_eject ssr_eject 1003 #define sr_ejected ssr_ejected 1004 #define sr_check_wp ssr_check_wp 1005 #define sd_check_media ssd_check_media 1006 #define sd_media_watch_cb ssd_media_watch_cb 1007 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1008 #define sr_volume_ctrl ssr_volume_ctrl 1009 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1010 #define sd_log_page_supported ssd_log_page_supported 1011 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1012 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1013 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1014 #define sd_range_lock ssd_range_lock 1015 #define sd_get_range ssd_get_range 1016 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1017 #define sd_range_unlock ssd_range_unlock 1018 #define sd_read_modify_write_task ssd_read_modify_write_task 1019 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1020 1021 #define sd_iostart_chain ssd_iostart_chain 1022 #define sd_iodone_chain ssd_iodone_chain 1023 #define sd_initpkt_map ssd_initpkt_map 1024 #define sd_destroypkt_map ssd_destroypkt_map 1025 #define sd_chain_type_map ssd_chain_type_map 1026 #define sd_chain_index_map ssd_chain_index_map 1027 1028 #define sd_failfast_flushctl ssd_failfast_flushctl 1029 #define sd_failfast_flushq ssd_failfast_flushq 1030 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1031 1032 #define sd_is_lsi ssd_is_lsi 1033 1034 #endif /* #if (defined(__fibre)) */ 1035 1036 1037 int _init(void); 1038 int _fini(void); 1039 int _info(struct modinfo *modinfop); 1040 1041 /*PRINTFLIKE3*/ 1042 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1043 /*PRINTFLIKE3*/ 1044 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1045 /*PRINTFLIKE3*/ 1046 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1047 1048 static int sdprobe(dev_info_t *devi); 1049 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1050 void **result); 1051 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1052 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1053 1054 /* 1055 * Smart probe for parallel scsi 1056 */ 1057 static void sd_scsi_probe_cache_init(void); 1058 static void sd_scsi_probe_cache_fini(void); 1059 static void sd_scsi_clear_probe_cache(void); 1060 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1061 1062 static int sd_spin_up_unit(struct sd_lun *un); 1063 #ifdef _LP64 1064 static void sd_enable_descr_sense(struct sd_lun *un); 1065 #endif /* _LP64 */ 1066 static void sd_set_mmc_caps(struct sd_lun *un); 1067 1068 static void sd_read_unit_properties(struct sd_lun *un); 1069 static int sd_process_sdconf_file(struct sd_lun *un); 1070 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1071 int *data_list, sd_tunables *values); 1072 static void sd_process_sdconf_table(struct sd_lun *un); 1073 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1074 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1075 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1076 int list_len, char *dataname_ptr); 1077 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1078 sd_tunables *prop_list); 1079 static int sd_validate_geometry(struct sd_lun *un, int path_flag); 1080 1081 #if defined(_SUNOS_VTOC_16) 1082 static void sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g); 1083 #endif 1084 1085 static void sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 1086 int path_flag); 1087 static int sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, 1088 int path_flag); 1089 static void sd_get_physical_geometry(struct sd_lun *un, 1090 struct geom_cache *pgeom_p, int capacity, int lbasize, int path_flag); 1091 static void sd_get_virtual_geometry(struct sd_lun *un, int capacity, 1092 int lbasize); 1093 static int sd_uselabel(struct sd_lun *un, struct dk_label *l, int path_flag); 1094 static void sd_swap_efi_gpt(efi_gpt_t *); 1095 static void sd_swap_efi_gpe(int nparts, efi_gpe_t *); 1096 static int sd_validate_efi(efi_gpt_t *); 1097 static int sd_use_efi(struct sd_lun *, int); 1098 static void sd_build_default_label(struct sd_lun *un); 1099 1100 #if defined(_FIRMWARE_NEEDS_FDISK) 1101 static int sd_has_max_chs_vals(struct ipart *fdp); 1102 #endif 1103 static void sd_inq_fill(char *p, int l, char *s); 1104 1105 1106 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1107 int reservation_flag); 1108 static daddr_t sd_get_devid_block(struct sd_lun *un); 1109 static int sd_get_devid(struct sd_lun *un); 1110 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1111 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1112 static int sd_write_deviceid(struct sd_lun *un); 1113 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1114 static int sd_check_vpd_page_support(struct sd_lun *un); 1115 1116 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1117 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1118 1119 static int sd_ddi_suspend(dev_info_t *devi); 1120 static int sd_ddi_pm_suspend(struct sd_lun *un); 1121 static int sd_ddi_resume(dev_info_t *devi); 1122 static int sd_ddi_pm_resume(struct sd_lun *un); 1123 static int sdpower(dev_info_t *devi, int component, int level); 1124 1125 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1126 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1127 static int sd_unit_attach(dev_info_t *devi); 1128 static int sd_unit_detach(dev_info_t *devi); 1129 1130 static int sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi); 1131 static void sd_create_errstats(struct sd_lun *un, int instance); 1132 static void sd_set_errstats(struct sd_lun *un); 1133 static void sd_set_pstats(struct sd_lun *un); 1134 1135 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1136 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1137 static int sd_send_polled_RQS(struct sd_lun *un); 1138 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1139 1140 #if (defined(__fibre)) 1141 /* 1142 * Event callbacks (photon) 1143 */ 1144 static void sd_init_event_callbacks(struct sd_lun *un); 1145 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1146 #endif 1147 1148 1149 static int sd_disable_caching(struct sd_lun *un); 1150 static dev_t sd_make_device(dev_info_t *devi); 1151 1152 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1153 uint64_t capacity); 1154 1155 /* 1156 * Driver entry point functions. 1157 */ 1158 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1159 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1160 static int sd_ready_and_valid(struct sd_lun *un); 1161 1162 static void sdmin(struct buf *bp); 1163 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1164 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1165 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1166 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1167 1168 static int sdstrategy(struct buf *bp); 1169 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1170 1171 /* 1172 * Function prototypes for layering functions in the iostart chain. 1173 */ 1174 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1175 struct buf *bp); 1176 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1177 struct buf *bp); 1178 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1179 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1180 struct buf *bp); 1181 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1182 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1183 1184 /* 1185 * Function prototypes for layering functions in the iodone chain. 1186 */ 1187 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1188 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1189 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1190 struct buf *bp); 1191 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1192 struct buf *bp); 1193 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1194 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1195 struct buf *bp); 1196 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1197 1198 /* 1199 * Prototypes for functions to support buf(9S) based IO. 1200 */ 1201 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1202 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1203 static void sd_destroypkt_for_buf(struct buf *); 1204 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1205 struct buf *bp, int flags, 1206 int (*callback)(caddr_t), caddr_t callback_arg, 1207 diskaddr_t lba, uint32_t blockcount); 1208 #if defined(__i386) || defined(__amd64) 1209 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1210 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1211 #endif /* defined(__i386) || defined(__amd64) */ 1212 1213 /* 1214 * Prototypes for functions to support USCSI IO. 1215 */ 1216 static int sd_uscsi_strategy(struct buf *bp); 1217 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1218 static void sd_destroypkt_for_uscsi(struct buf *); 1219 1220 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1221 uchar_t chain_type, void *pktinfop); 1222 1223 static int sd_pm_entry(struct sd_lun *un); 1224 static void sd_pm_exit(struct sd_lun *un); 1225 1226 static void sd_pm_idletimeout_handler(void *arg); 1227 1228 /* 1229 * sd_core internal functions (used at the sd_core_io layer). 1230 */ 1231 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1232 static void sdintr(struct scsi_pkt *pktp); 1233 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1234 1235 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 1236 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 1237 int path_flag); 1238 1239 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1240 daddr_t blkno, int (*func)(struct buf *)); 1241 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1242 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1243 static void sd_bioclone_free(struct buf *bp); 1244 static void sd_shadow_buf_free(struct buf *bp); 1245 1246 static void sd_print_transport_rejected_message(struct sd_lun *un, 1247 struct sd_xbuf *xp, int code); 1248 1249 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1250 int retry_check_flag, 1251 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1252 int c), 1253 void *user_arg, int failure_code, clock_t retry_delay, 1254 void (*statp)(kstat_io_t *)); 1255 1256 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1257 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1258 1259 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1260 struct scsi_pkt *pktp); 1261 static void sd_start_retry_command(void *arg); 1262 static void sd_start_direct_priority_command(void *arg); 1263 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1264 int errcode); 1265 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1266 struct buf *bp, int errcode); 1267 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1268 static void sd_sync_with_callback(struct sd_lun *un); 1269 static int sdrunout(caddr_t arg); 1270 1271 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1272 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1273 1274 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1275 static void sd_restore_throttle(void *arg); 1276 1277 static void sd_init_cdb_limits(struct sd_lun *un); 1278 1279 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1280 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1281 1282 /* 1283 * Error handling functions 1284 */ 1285 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1286 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1287 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1288 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1289 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1290 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1291 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1292 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1293 1294 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1295 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1296 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1297 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1298 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1299 struct sd_xbuf *xp); 1300 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1301 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1302 1303 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1304 void *arg, int code); 1305 static diskaddr_t sd_extract_sense_info_descr( 1306 struct scsi_descr_sense_hdr *sdsp); 1307 1308 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1311 uint8_t asc, 1312 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1313 static void sd_sense_key_not_ready(struct sd_lun *un, 1314 uint8_t asc, uint8_t ascq, 1315 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1316 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1317 int sense_key, uint8_t asc, 1318 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1319 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 static void sd_sense_key_unit_attention(struct sd_lun *un, 1322 uint8_t asc, 1323 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1324 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1325 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1326 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1328 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_sense_key_default(struct sd_lun *un, 1331 int sense_key, 1332 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 1334 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1335 void *arg, int flag); 1336 1337 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1338 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1339 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1340 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1341 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1342 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1344 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1345 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1346 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1348 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1349 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1350 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 1354 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1355 1356 static void sd_start_stop_unit_callback(void *arg); 1357 static void sd_start_stop_unit_task(void *arg); 1358 1359 static void sd_taskq_create(void); 1360 static void sd_taskq_delete(void); 1361 static void sd_media_change_task(void *arg); 1362 1363 static int sd_handle_mchange(struct sd_lun *un); 1364 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1365 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1366 uint32_t *lbap, int path_flag); 1367 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1368 uint32_t *lbap, int path_flag); 1369 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1370 int path_flag); 1371 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1372 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1373 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1374 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1375 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1376 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1377 uchar_t usr_cmd, uchar_t *usr_bufp); 1378 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un); 1379 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1380 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1381 uchar_t *bufaddr, uint_t buflen); 1382 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1383 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1384 uchar_t *bufaddr, uint_t buflen, char feature); 1385 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1386 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1387 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1388 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1389 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1390 size_t buflen, daddr_t start_block, int path_flag); 1391 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1392 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1393 path_flag) 1394 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1395 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1396 path_flag) 1397 1398 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1399 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1400 uint16_t param_ptr, int path_flag); 1401 1402 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1403 static void sd_free_rqs(struct sd_lun *un); 1404 1405 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1406 uchar_t *data, int len, int fmt); 1407 1408 /* 1409 * Disk Ioctl Function Prototypes 1410 */ 1411 static int sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag); 1412 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1413 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1414 static int sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, 1415 int geom_validated); 1416 static int sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag); 1417 static int sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, 1418 int geom_validated); 1419 static int sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag); 1420 static int sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, 1421 int geom_validated); 1422 static int sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag); 1423 static int sd_dkio_partition(dev_t dev, caddr_t arg, int flag); 1424 static void sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1425 static int sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag); 1426 static int sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag); 1427 static int sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1428 static int sd_write_label(dev_t dev); 1429 static int sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl); 1430 static void sd_clear_vtoc(struct sd_lun *un); 1431 static void sd_clear_efi(struct sd_lun *un); 1432 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1433 static int sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag); 1434 static int sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag); 1435 static void sd_setup_default_geometry(struct sd_lun *un); 1436 #if defined(__i386) || defined(__amd64) 1437 static int sd_update_fdisk_and_vtoc(struct sd_lun *un); 1438 #endif 1439 1440 /* 1441 * Multi-host Ioctl Prototypes 1442 */ 1443 static int sd_check_mhd(dev_t dev, int interval); 1444 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1445 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1446 static char *sd_sname(uchar_t status); 1447 static void sd_mhd_resvd_recover(void *arg); 1448 static void sd_resv_reclaim_thread(); 1449 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1450 static int sd_reserve_release(dev_t dev, int cmd); 1451 static void sd_rmv_resv_reclaim_req(dev_t dev); 1452 static void sd_mhd_reset_notify_cb(caddr_t arg); 1453 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1454 mhioc_inkeys_t *usrp, int flag); 1455 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1456 mhioc_inresvs_t *usrp, int flag); 1457 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1458 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1459 static int sd_mhdioc_release(dev_t dev); 1460 static int sd_mhdioc_register_devid(dev_t dev); 1461 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1462 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1463 1464 /* 1465 * SCSI removable prototypes 1466 */ 1467 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1468 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1469 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1470 static int sr_pause_resume(dev_t dev, int mode); 1471 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1472 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1473 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1474 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1475 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1476 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1477 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1478 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1482 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1483 static int sr_eject(dev_t dev); 1484 static void sr_ejected(register struct sd_lun *un); 1485 static int sr_check_wp(dev_t dev); 1486 static int sd_check_media(dev_t dev, enum dkio_state state); 1487 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1488 static void sd_delayed_cv_broadcast(void *arg); 1489 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1491 1492 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1493 1494 /* 1495 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1496 */ 1497 static void sd_check_for_writable_cd(struct sd_lun *un); 1498 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1499 static void sd_wm_cache_destructor(void *wm, void *un); 1500 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1501 daddr_t endb, ushort_t typ); 1502 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1503 daddr_t endb); 1504 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1505 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1506 static void sd_read_modify_write_task(void * arg); 1507 static int 1508 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1509 struct buf **bpp); 1510 1511 1512 /* 1513 * Function prototypes for failfast support. 1514 */ 1515 static void sd_failfast_flushq(struct sd_lun *un); 1516 static int sd_failfast_flushq_callback(struct buf *bp); 1517 1518 /* 1519 * Function prototypes to check for lsi devices 1520 */ 1521 static void sd_is_lsi(struct sd_lun *un); 1522 1523 /* 1524 * Function prototypes for x86 support 1525 */ 1526 #if defined(__i386) || defined(__amd64) 1527 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1528 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1529 #endif 1530 1531 /* 1532 * Constants for failfast support: 1533 * 1534 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1535 * failfast processing being performed. 1536 * 1537 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1538 * failfast processing on all bufs with B_FAILFAST set. 1539 */ 1540 1541 #define SD_FAILFAST_INACTIVE 0 1542 #define SD_FAILFAST_ACTIVE 1 1543 1544 /* 1545 * Bitmask to control behavior of buf(9S) flushes when a transition to 1546 * the failfast state occurs. Optional bits include: 1547 * 1548 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1549 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1550 * be flushed. 1551 * 1552 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1553 * driver, in addition to the regular wait queue. This includes the xbuf 1554 * queues. When clear, only the driver's wait queue will be flushed. 1555 */ 1556 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1557 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1558 1559 /* 1560 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1561 * to flush all queues within the driver. 1562 */ 1563 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1564 1565 1566 /* 1567 * SD Testing Fault Injection 1568 */ 1569 #ifdef SD_FAULT_INJECTION 1570 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1571 static void sd_faultinjection(struct scsi_pkt *pktp); 1572 static void sd_injection_log(char *buf, struct sd_lun *un); 1573 #endif 1574 1575 /* 1576 * Device driver ops vector 1577 */ 1578 static struct cb_ops sd_cb_ops = { 1579 sdopen, /* open */ 1580 sdclose, /* close */ 1581 sdstrategy, /* strategy */ 1582 nodev, /* print */ 1583 sddump, /* dump */ 1584 sdread, /* read */ 1585 sdwrite, /* write */ 1586 sdioctl, /* ioctl */ 1587 nodev, /* devmap */ 1588 nodev, /* mmap */ 1589 nodev, /* segmap */ 1590 nochpoll, /* poll */ 1591 sd_prop_op, /* cb_prop_op */ 1592 0, /* streamtab */ 1593 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1594 CB_REV, /* cb_rev */ 1595 sdaread, /* async I/O read entry point */ 1596 sdawrite /* async I/O write entry point */ 1597 }; 1598 1599 static struct dev_ops sd_ops = { 1600 DEVO_REV, /* devo_rev, */ 1601 0, /* refcnt */ 1602 sdinfo, /* info */ 1603 nulldev, /* identify */ 1604 sdprobe, /* probe */ 1605 sdattach, /* attach */ 1606 sddetach, /* detach */ 1607 nodev, /* reset */ 1608 &sd_cb_ops, /* driver operations */ 1609 NULL, /* bus operations */ 1610 sdpower /* power */ 1611 }; 1612 1613 1614 /* 1615 * This is the loadable module wrapper. 1616 */ 1617 #include <sys/modctl.h> 1618 1619 static struct modldrv modldrv = { 1620 &mod_driverops, /* Type of module. This one is a driver */ 1621 SD_MODULE_NAME, /* Module name. */ 1622 &sd_ops /* driver ops */ 1623 }; 1624 1625 1626 static struct modlinkage modlinkage = { 1627 MODREV_1, 1628 &modldrv, 1629 NULL 1630 }; 1631 1632 1633 static struct scsi_asq_key_strings sd_additional_codes[] = { 1634 0x81, 0, "Logical Unit is Reserved", 1635 0x85, 0, "Audio Address Not Valid", 1636 0xb6, 0, "Media Load Mechanism Failed", 1637 0xB9, 0, "Audio Play Operation Aborted", 1638 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1639 0x53, 2, "Medium removal prevented", 1640 0x6f, 0, "Authentication failed during key exchange", 1641 0x6f, 1, "Key not present", 1642 0x6f, 2, "Key not established", 1643 0x6f, 3, "Read without proper authentication", 1644 0x6f, 4, "Mismatched region to this logical unit", 1645 0x6f, 5, "Region reset count error", 1646 0xffff, 0x0, NULL 1647 }; 1648 1649 1650 /* 1651 * Struct for passing printing information for sense data messages 1652 */ 1653 struct sd_sense_info { 1654 int ssi_severity; 1655 int ssi_pfa_flag; 1656 }; 1657 1658 /* 1659 * Table of function pointers for iostart-side routines. Seperate "chains" 1660 * of layered function calls are formed by placing the function pointers 1661 * sequentially in the desired order. Functions are called according to an 1662 * incrementing table index ordering. The last function in each chain must 1663 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1664 * in the sd_iodone_chain[] array. 1665 * 1666 * Note: It may seem more natural to organize both the iostart and iodone 1667 * functions together, into an array of structures (or some similar 1668 * organization) with a common index, rather than two seperate arrays which 1669 * must be maintained in synchronization. The purpose of this division is 1670 * to achiece improved performance: individual arrays allows for more 1671 * effective cache line utilization on certain platforms. 1672 */ 1673 1674 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1675 1676 1677 static sd_chain_t sd_iostart_chain[] = { 1678 1679 /* Chain for buf IO for disk drive targets (PM enabled) */ 1680 sd_mapblockaddr_iostart, /* Index: 0 */ 1681 sd_pm_iostart, /* Index: 1 */ 1682 sd_core_iostart, /* Index: 2 */ 1683 1684 /* Chain for buf IO for disk drive targets (PM disabled) */ 1685 sd_mapblockaddr_iostart, /* Index: 3 */ 1686 sd_core_iostart, /* Index: 4 */ 1687 1688 /* Chain for buf IO for removable-media targets (PM enabled) */ 1689 sd_mapblockaddr_iostart, /* Index: 5 */ 1690 sd_mapblocksize_iostart, /* Index: 6 */ 1691 sd_pm_iostart, /* Index: 7 */ 1692 sd_core_iostart, /* Index: 8 */ 1693 1694 /* Chain for buf IO for removable-media targets (PM disabled) */ 1695 sd_mapblockaddr_iostart, /* Index: 9 */ 1696 sd_mapblocksize_iostart, /* Index: 10 */ 1697 sd_core_iostart, /* Index: 11 */ 1698 1699 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1700 sd_mapblockaddr_iostart, /* Index: 12 */ 1701 sd_checksum_iostart, /* Index: 13 */ 1702 sd_pm_iostart, /* Index: 14 */ 1703 sd_core_iostart, /* Index: 15 */ 1704 1705 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1706 sd_mapblockaddr_iostart, /* Index: 16 */ 1707 sd_checksum_iostart, /* Index: 17 */ 1708 sd_core_iostart, /* Index: 18 */ 1709 1710 /* Chain for USCSI commands (all targets) */ 1711 sd_pm_iostart, /* Index: 19 */ 1712 sd_core_iostart, /* Index: 20 */ 1713 1714 /* Chain for checksumming USCSI commands (all targets) */ 1715 sd_checksum_uscsi_iostart, /* Index: 21 */ 1716 sd_pm_iostart, /* Index: 22 */ 1717 sd_core_iostart, /* Index: 23 */ 1718 1719 /* Chain for "direct" USCSI commands (all targets) */ 1720 sd_core_iostart, /* Index: 24 */ 1721 1722 /* Chain for "direct priority" USCSI commands (all targets) */ 1723 sd_core_iostart, /* Index: 25 */ 1724 }; 1725 1726 /* 1727 * Macros to locate the first function of each iostart chain in the 1728 * sd_iostart_chain[] array. These are located by the index in the array. 1729 */ 1730 #define SD_CHAIN_DISK_IOSTART 0 1731 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1732 #define SD_CHAIN_RMMEDIA_IOSTART 5 1733 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1734 #define SD_CHAIN_CHKSUM_IOSTART 12 1735 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1736 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1737 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1738 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1739 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1740 1741 1742 /* 1743 * Table of function pointers for the iodone-side routines for the driver- 1744 * internal layering mechanism. The calling sequence for iodone routines 1745 * uses a decrementing table index, so the last routine called in a chain 1746 * must be at the lowest array index location for that chain. The last 1747 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1748 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1749 * of the functions in an iodone side chain must correspond to the ordering 1750 * of the iostart routines for that chain. Note that there is no iodone 1751 * side routine that corresponds to sd_core_iostart(), so there is no 1752 * entry in the table for this. 1753 */ 1754 1755 static sd_chain_t sd_iodone_chain[] = { 1756 1757 /* Chain for buf IO for disk drive targets (PM enabled) */ 1758 sd_buf_iodone, /* Index: 0 */ 1759 sd_mapblockaddr_iodone, /* Index: 1 */ 1760 sd_pm_iodone, /* Index: 2 */ 1761 1762 /* Chain for buf IO for disk drive targets (PM disabled) */ 1763 sd_buf_iodone, /* Index: 3 */ 1764 sd_mapblockaddr_iodone, /* Index: 4 */ 1765 1766 /* Chain for buf IO for removable-media targets (PM enabled) */ 1767 sd_buf_iodone, /* Index: 5 */ 1768 sd_mapblockaddr_iodone, /* Index: 6 */ 1769 sd_mapblocksize_iodone, /* Index: 7 */ 1770 sd_pm_iodone, /* Index: 8 */ 1771 1772 /* Chain for buf IO for removable-media targets (PM disabled) */ 1773 sd_buf_iodone, /* Index: 9 */ 1774 sd_mapblockaddr_iodone, /* Index: 10 */ 1775 sd_mapblocksize_iodone, /* Index: 11 */ 1776 1777 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1778 sd_buf_iodone, /* Index: 12 */ 1779 sd_mapblockaddr_iodone, /* Index: 13 */ 1780 sd_checksum_iodone, /* Index: 14 */ 1781 sd_pm_iodone, /* Index: 15 */ 1782 1783 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1784 sd_buf_iodone, /* Index: 16 */ 1785 sd_mapblockaddr_iodone, /* Index: 17 */ 1786 sd_checksum_iodone, /* Index: 18 */ 1787 1788 /* Chain for USCSI commands (non-checksum targets) */ 1789 sd_uscsi_iodone, /* Index: 19 */ 1790 sd_pm_iodone, /* Index: 20 */ 1791 1792 /* Chain for USCSI commands (checksum targets) */ 1793 sd_uscsi_iodone, /* Index: 21 */ 1794 sd_checksum_uscsi_iodone, /* Index: 22 */ 1795 sd_pm_iodone, /* Index: 22 */ 1796 1797 /* Chain for "direct" USCSI commands (all targets) */ 1798 sd_uscsi_iodone, /* Index: 24 */ 1799 1800 /* Chain for "direct priority" USCSI commands (all targets) */ 1801 sd_uscsi_iodone, /* Index: 25 */ 1802 }; 1803 1804 1805 /* 1806 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1807 * each iodone-side chain. These are located by the array index, but as the 1808 * iodone side functions are called in a decrementing-index order, the 1809 * highest index number in each chain must be specified (as these correspond 1810 * to the first function in the iodone chain that will be called by the core 1811 * at IO completion time). 1812 */ 1813 1814 #define SD_CHAIN_DISK_IODONE 2 1815 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1816 #define SD_CHAIN_RMMEDIA_IODONE 8 1817 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1818 #define SD_CHAIN_CHKSUM_IODONE 15 1819 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1820 #define SD_CHAIN_USCSI_CMD_IODONE 20 1821 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1822 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1823 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1824 1825 1826 1827 1828 /* 1829 * Array to map a layering chain index to the appropriate initpkt routine. 1830 * The redundant entries are present so that the index used for accessing 1831 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1832 * with this table as well. 1833 */ 1834 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1835 1836 static sd_initpkt_t sd_initpkt_map[] = { 1837 1838 /* Chain for buf IO for disk drive targets (PM enabled) */ 1839 sd_initpkt_for_buf, /* Index: 0 */ 1840 sd_initpkt_for_buf, /* Index: 1 */ 1841 sd_initpkt_for_buf, /* Index: 2 */ 1842 1843 /* Chain for buf IO for disk drive targets (PM disabled) */ 1844 sd_initpkt_for_buf, /* Index: 3 */ 1845 sd_initpkt_for_buf, /* Index: 4 */ 1846 1847 /* Chain for buf IO for removable-media targets (PM enabled) */ 1848 sd_initpkt_for_buf, /* Index: 5 */ 1849 sd_initpkt_for_buf, /* Index: 6 */ 1850 sd_initpkt_for_buf, /* Index: 7 */ 1851 sd_initpkt_for_buf, /* Index: 8 */ 1852 1853 /* Chain for buf IO for removable-media targets (PM disabled) */ 1854 sd_initpkt_for_buf, /* Index: 9 */ 1855 sd_initpkt_for_buf, /* Index: 10 */ 1856 sd_initpkt_for_buf, /* Index: 11 */ 1857 1858 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1859 sd_initpkt_for_buf, /* Index: 12 */ 1860 sd_initpkt_for_buf, /* Index: 13 */ 1861 sd_initpkt_for_buf, /* Index: 14 */ 1862 sd_initpkt_for_buf, /* Index: 15 */ 1863 1864 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1865 sd_initpkt_for_buf, /* Index: 16 */ 1866 sd_initpkt_for_buf, /* Index: 17 */ 1867 sd_initpkt_for_buf, /* Index: 18 */ 1868 1869 /* Chain for USCSI commands (non-checksum targets) */ 1870 sd_initpkt_for_uscsi, /* Index: 19 */ 1871 sd_initpkt_for_uscsi, /* Index: 20 */ 1872 1873 /* Chain for USCSI commands (checksum targets) */ 1874 sd_initpkt_for_uscsi, /* Index: 21 */ 1875 sd_initpkt_for_uscsi, /* Index: 22 */ 1876 sd_initpkt_for_uscsi, /* Index: 22 */ 1877 1878 /* Chain for "direct" USCSI commands (all targets) */ 1879 sd_initpkt_for_uscsi, /* Index: 24 */ 1880 1881 /* Chain for "direct priority" USCSI commands (all targets) */ 1882 sd_initpkt_for_uscsi, /* Index: 25 */ 1883 1884 }; 1885 1886 1887 /* 1888 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1889 * The redundant entries are present so that the index used for accessing 1890 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1891 * with this table as well. 1892 */ 1893 typedef void (*sd_destroypkt_t)(struct buf *); 1894 1895 static sd_destroypkt_t sd_destroypkt_map[] = { 1896 1897 /* Chain for buf IO for disk drive targets (PM enabled) */ 1898 sd_destroypkt_for_buf, /* Index: 0 */ 1899 sd_destroypkt_for_buf, /* Index: 1 */ 1900 sd_destroypkt_for_buf, /* Index: 2 */ 1901 1902 /* Chain for buf IO for disk drive targets (PM disabled) */ 1903 sd_destroypkt_for_buf, /* Index: 3 */ 1904 sd_destroypkt_for_buf, /* Index: 4 */ 1905 1906 /* Chain for buf IO for removable-media targets (PM enabled) */ 1907 sd_destroypkt_for_buf, /* Index: 5 */ 1908 sd_destroypkt_for_buf, /* Index: 6 */ 1909 sd_destroypkt_for_buf, /* Index: 7 */ 1910 sd_destroypkt_for_buf, /* Index: 8 */ 1911 1912 /* Chain for buf IO for removable-media targets (PM disabled) */ 1913 sd_destroypkt_for_buf, /* Index: 9 */ 1914 sd_destroypkt_for_buf, /* Index: 10 */ 1915 sd_destroypkt_for_buf, /* Index: 11 */ 1916 1917 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1918 sd_destroypkt_for_buf, /* Index: 12 */ 1919 sd_destroypkt_for_buf, /* Index: 13 */ 1920 sd_destroypkt_for_buf, /* Index: 14 */ 1921 sd_destroypkt_for_buf, /* Index: 15 */ 1922 1923 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1924 sd_destroypkt_for_buf, /* Index: 16 */ 1925 sd_destroypkt_for_buf, /* Index: 17 */ 1926 sd_destroypkt_for_buf, /* Index: 18 */ 1927 1928 /* Chain for USCSI commands (non-checksum targets) */ 1929 sd_destroypkt_for_uscsi, /* Index: 19 */ 1930 sd_destroypkt_for_uscsi, /* Index: 20 */ 1931 1932 /* Chain for USCSI commands (checksum targets) */ 1933 sd_destroypkt_for_uscsi, /* Index: 21 */ 1934 sd_destroypkt_for_uscsi, /* Index: 22 */ 1935 sd_destroypkt_for_uscsi, /* Index: 22 */ 1936 1937 /* Chain for "direct" USCSI commands (all targets) */ 1938 sd_destroypkt_for_uscsi, /* Index: 24 */ 1939 1940 /* Chain for "direct priority" USCSI commands (all targets) */ 1941 sd_destroypkt_for_uscsi, /* Index: 25 */ 1942 1943 }; 1944 1945 1946 1947 /* 1948 * Array to map a layering chain index to the appropriate chain "type". 1949 * The chain type indicates a specific property/usage of the chain. 1950 * The redundant entries are present so that the index used for accessing 1951 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1952 * with this table as well. 1953 */ 1954 1955 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1956 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1957 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1958 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1959 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1960 /* (for error recovery) */ 1961 1962 static int sd_chain_type_map[] = { 1963 1964 /* Chain for buf IO for disk drive targets (PM enabled) */ 1965 SD_CHAIN_BUFIO, /* Index: 0 */ 1966 SD_CHAIN_BUFIO, /* Index: 1 */ 1967 SD_CHAIN_BUFIO, /* Index: 2 */ 1968 1969 /* Chain for buf IO for disk drive targets (PM disabled) */ 1970 SD_CHAIN_BUFIO, /* Index: 3 */ 1971 SD_CHAIN_BUFIO, /* Index: 4 */ 1972 1973 /* Chain for buf IO for removable-media targets (PM enabled) */ 1974 SD_CHAIN_BUFIO, /* Index: 5 */ 1975 SD_CHAIN_BUFIO, /* Index: 6 */ 1976 SD_CHAIN_BUFIO, /* Index: 7 */ 1977 SD_CHAIN_BUFIO, /* Index: 8 */ 1978 1979 /* Chain for buf IO for removable-media targets (PM disabled) */ 1980 SD_CHAIN_BUFIO, /* Index: 9 */ 1981 SD_CHAIN_BUFIO, /* Index: 10 */ 1982 SD_CHAIN_BUFIO, /* Index: 11 */ 1983 1984 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1985 SD_CHAIN_BUFIO, /* Index: 12 */ 1986 SD_CHAIN_BUFIO, /* Index: 13 */ 1987 SD_CHAIN_BUFIO, /* Index: 14 */ 1988 SD_CHAIN_BUFIO, /* Index: 15 */ 1989 1990 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1991 SD_CHAIN_BUFIO, /* Index: 16 */ 1992 SD_CHAIN_BUFIO, /* Index: 17 */ 1993 SD_CHAIN_BUFIO, /* Index: 18 */ 1994 1995 /* Chain for USCSI commands (non-checksum targets) */ 1996 SD_CHAIN_USCSI, /* Index: 19 */ 1997 SD_CHAIN_USCSI, /* Index: 20 */ 1998 1999 /* Chain for USCSI commands (checksum targets) */ 2000 SD_CHAIN_USCSI, /* Index: 21 */ 2001 SD_CHAIN_USCSI, /* Index: 22 */ 2002 SD_CHAIN_USCSI, /* Index: 22 */ 2003 2004 /* Chain for "direct" USCSI commands (all targets) */ 2005 SD_CHAIN_DIRECT, /* Index: 24 */ 2006 2007 /* Chain for "direct priority" USCSI commands (all targets) */ 2008 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2009 }; 2010 2011 2012 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2013 #define SD_IS_BUFIO(xp) \ 2014 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2015 2016 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2017 #define SD_IS_DIRECT_PRIORITY(xp) \ 2018 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2019 2020 2021 2022 /* 2023 * Struct, array, and macros to map a specific chain to the appropriate 2024 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2025 * 2026 * The sd_chain_index_map[] array is used at attach time to set the various 2027 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2028 * chain to be used with the instance. This allows different instances to use 2029 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2030 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2031 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2032 * dynamically & without the use of locking; and (2) a layer may update the 2033 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2034 * to allow for deferred processing of an IO within the same chain from a 2035 * different execution context. 2036 */ 2037 2038 struct sd_chain_index { 2039 int sci_iostart_index; 2040 int sci_iodone_index; 2041 }; 2042 2043 static struct sd_chain_index sd_chain_index_map[] = { 2044 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2045 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2046 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2047 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2048 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2049 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2050 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2051 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2052 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2053 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2054 }; 2055 2056 2057 /* 2058 * The following are indexes into the sd_chain_index_map[] array. 2059 */ 2060 2061 /* un->un_buf_chain_type must be set to one of these */ 2062 #define SD_CHAIN_INFO_DISK 0 2063 #define SD_CHAIN_INFO_DISK_NO_PM 1 2064 #define SD_CHAIN_INFO_RMMEDIA 2 2065 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2066 #define SD_CHAIN_INFO_CHKSUM 4 2067 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2068 2069 /* un->un_uscsi_chain_type must be set to one of these */ 2070 #define SD_CHAIN_INFO_USCSI_CMD 6 2071 /* USCSI with PM disabled is the same as DIRECT */ 2072 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2073 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2074 2075 /* un->un_direct_chain_type must be set to one of these */ 2076 #define SD_CHAIN_INFO_DIRECT_CMD 8 2077 2078 /* un->un_priority_chain_type must be set to one of these */ 2079 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2080 2081 /* size for devid inquiries */ 2082 #define MAX_INQUIRY_SIZE 0xF0 2083 2084 /* 2085 * Macros used by functions to pass a given buf(9S) struct along to the 2086 * next function in the layering chain for further processing. 2087 * 2088 * In the following macros, passing more than three arguments to the called 2089 * routines causes the optimizer for the SPARC compiler to stop doing tail 2090 * call elimination which results in significant performance degradation. 2091 */ 2092 #define SD_BEGIN_IOSTART(index, un, bp) \ 2093 ((*(sd_iostart_chain[index]))(index, un, bp)) 2094 2095 #define SD_BEGIN_IODONE(index, un, bp) \ 2096 ((*(sd_iodone_chain[index]))(index, un, bp)) 2097 2098 #define SD_NEXT_IOSTART(index, un, bp) \ 2099 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2100 2101 #define SD_NEXT_IODONE(index, un, bp) \ 2102 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2103 2104 2105 /* 2106 * Function: _init 2107 * 2108 * Description: This is the driver _init(9E) entry point. 2109 * 2110 * Return Code: Returns the value from mod_install(9F) or 2111 * ddi_soft_state_init(9F) as appropriate. 2112 * 2113 * Context: Called when driver module loaded. 2114 */ 2115 2116 int 2117 _init(void) 2118 { 2119 int err; 2120 2121 /* establish driver name from module name */ 2122 sd_label = mod_modname(&modlinkage); 2123 2124 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2125 SD_MAXUNIT); 2126 2127 if (err != 0) { 2128 return (err); 2129 } 2130 2131 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2132 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2133 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2134 2135 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2136 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2137 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2138 2139 /* 2140 * it's ok to init here even for fibre device 2141 */ 2142 sd_scsi_probe_cache_init(); 2143 2144 /* 2145 * Creating taskq before mod_install ensures that all callers (threads) 2146 * that enter the module after a successfull mod_install encounter 2147 * a valid taskq. 2148 */ 2149 sd_taskq_create(); 2150 2151 err = mod_install(&modlinkage); 2152 if (err != 0) { 2153 /* delete taskq if install fails */ 2154 sd_taskq_delete(); 2155 2156 mutex_destroy(&sd_detach_mutex); 2157 mutex_destroy(&sd_log_mutex); 2158 mutex_destroy(&sd_label_mutex); 2159 2160 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2161 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2162 cv_destroy(&sd_tr.srq_inprocess_cv); 2163 2164 sd_scsi_probe_cache_fini(); 2165 2166 ddi_soft_state_fini(&sd_state); 2167 return (err); 2168 } 2169 2170 return (err); 2171 } 2172 2173 2174 /* 2175 * Function: _fini 2176 * 2177 * Description: This is the driver _fini(9E) entry point. 2178 * 2179 * Return Code: Returns the value from mod_remove(9F) 2180 * 2181 * Context: Called when driver module is unloaded. 2182 */ 2183 2184 int 2185 _fini(void) 2186 { 2187 int err; 2188 2189 if ((err = mod_remove(&modlinkage)) != 0) { 2190 return (err); 2191 } 2192 2193 sd_taskq_delete(); 2194 2195 mutex_destroy(&sd_detach_mutex); 2196 mutex_destroy(&sd_log_mutex); 2197 mutex_destroy(&sd_label_mutex); 2198 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2199 2200 sd_scsi_probe_cache_fini(); 2201 2202 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2203 cv_destroy(&sd_tr.srq_inprocess_cv); 2204 2205 ddi_soft_state_fini(&sd_state); 2206 2207 return (err); 2208 } 2209 2210 2211 /* 2212 * Function: _info 2213 * 2214 * Description: This is the driver _info(9E) entry point. 2215 * 2216 * Arguments: modinfop - pointer to the driver modinfo structure 2217 * 2218 * Return Code: Returns the value from mod_info(9F). 2219 * 2220 * Context: Kernel thread context 2221 */ 2222 2223 int 2224 _info(struct modinfo *modinfop) 2225 { 2226 return (mod_info(&modlinkage, modinfop)); 2227 } 2228 2229 2230 /* 2231 * The following routines implement the driver message logging facility. 2232 * They provide component- and level- based debug output filtering. 2233 * Output may also be restricted to messages for a single instance by 2234 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2235 * to NULL, then messages for all instances are printed. 2236 * 2237 * These routines have been cloned from each other due to the language 2238 * constraints of macros and variable argument list processing. 2239 */ 2240 2241 2242 /* 2243 * Function: sd_log_err 2244 * 2245 * Description: This routine is called by the SD_ERROR macro for debug 2246 * logging of error conditions. 2247 * 2248 * Arguments: comp - driver component being logged 2249 * dev - pointer to driver info structure 2250 * fmt - error string and format to be logged 2251 */ 2252 2253 static void 2254 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2255 { 2256 va_list ap; 2257 dev_info_t *dev; 2258 2259 ASSERT(un != NULL); 2260 dev = SD_DEVINFO(un); 2261 ASSERT(dev != NULL); 2262 2263 /* 2264 * Filter messages based on the global component and level masks. 2265 * Also print if un matches the value of sd_debug_un, or if 2266 * sd_debug_un is set to NULL. 2267 */ 2268 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2269 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2270 mutex_enter(&sd_log_mutex); 2271 va_start(ap, fmt); 2272 (void) vsprintf(sd_log_buf, fmt, ap); 2273 va_end(ap); 2274 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2275 mutex_exit(&sd_log_mutex); 2276 } 2277 #ifdef SD_FAULT_INJECTION 2278 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2279 if (un->sd_injection_mask & comp) { 2280 mutex_enter(&sd_log_mutex); 2281 va_start(ap, fmt); 2282 (void) vsprintf(sd_log_buf, fmt, ap); 2283 va_end(ap); 2284 sd_injection_log(sd_log_buf, un); 2285 mutex_exit(&sd_log_mutex); 2286 } 2287 #endif 2288 } 2289 2290 2291 /* 2292 * Function: sd_log_info 2293 * 2294 * Description: This routine is called by the SD_INFO macro for debug 2295 * logging of general purpose informational conditions. 2296 * 2297 * Arguments: comp - driver component being logged 2298 * dev - pointer to driver info structure 2299 * fmt - info string and format to be logged 2300 */ 2301 2302 static void 2303 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2304 { 2305 va_list ap; 2306 dev_info_t *dev; 2307 2308 ASSERT(un != NULL); 2309 dev = SD_DEVINFO(un); 2310 ASSERT(dev != NULL); 2311 2312 /* 2313 * Filter messages based on the global component and level masks. 2314 * Also print if un matches the value of sd_debug_un, or if 2315 * sd_debug_un is set to NULL. 2316 */ 2317 if ((sd_component_mask & component) && 2318 (sd_level_mask & SD_LOGMASK_INFO) && 2319 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2320 mutex_enter(&sd_log_mutex); 2321 va_start(ap, fmt); 2322 (void) vsprintf(sd_log_buf, fmt, ap); 2323 va_end(ap); 2324 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2325 mutex_exit(&sd_log_mutex); 2326 } 2327 #ifdef SD_FAULT_INJECTION 2328 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2329 if (un->sd_injection_mask & component) { 2330 mutex_enter(&sd_log_mutex); 2331 va_start(ap, fmt); 2332 (void) vsprintf(sd_log_buf, fmt, ap); 2333 va_end(ap); 2334 sd_injection_log(sd_log_buf, un); 2335 mutex_exit(&sd_log_mutex); 2336 } 2337 #endif 2338 } 2339 2340 2341 /* 2342 * Function: sd_log_trace 2343 * 2344 * Description: This routine is called by the SD_TRACE macro for debug 2345 * logging of trace conditions (i.e. function entry/exit). 2346 * 2347 * Arguments: comp - driver component being logged 2348 * dev - pointer to driver info structure 2349 * fmt - trace string and format to be logged 2350 */ 2351 2352 static void 2353 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2354 { 2355 va_list ap; 2356 dev_info_t *dev; 2357 2358 ASSERT(un != NULL); 2359 dev = SD_DEVINFO(un); 2360 ASSERT(dev != NULL); 2361 2362 /* 2363 * Filter messages based on the global component and level masks. 2364 * Also print if un matches the value of sd_debug_un, or if 2365 * sd_debug_un is set to NULL. 2366 */ 2367 if ((sd_component_mask & component) && 2368 (sd_level_mask & SD_LOGMASK_TRACE) && 2369 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2370 mutex_enter(&sd_log_mutex); 2371 va_start(ap, fmt); 2372 (void) vsprintf(sd_log_buf, fmt, ap); 2373 va_end(ap); 2374 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2375 mutex_exit(&sd_log_mutex); 2376 } 2377 #ifdef SD_FAULT_INJECTION 2378 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2379 if (un->sd_injection_mask & component) { 2380 mutex_enter(&sd_log_mutex); 2381 va_start(ap, fmt); 2382 (void) vsprintf(sd_log_buf, fmt, ap); 2383 va_end(ap); 2384 sd_injection_log(sd_log_buf, un); 2385 mutex_exit(&sd_log_mutex); 2386 } 2387 #endif 2388 } 2389 2390 2391 /* 2392 * Function: sdprobe 2393 * 2394 * Description: This is the driver probe(9e) entry point function. 2395 * 2396 * Arguments: devi - opaque device info handle 2397 * 2398 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2399 * DDI_PROBE_FAILURE: If the probe failed. 2400 * DDI_PROBE_PARTIAL: If the instance is not present now, 2401 * but may be present in the future. 2402 */ 2403 2404 static int 2405 sdprobe(dev_info_t *devi) 2406 { 2407 struct scsi_device *devp; 2408 int rval; 2409 int instance; 2410 2411 /* 2412 * if it wasn't for pln, sdprobe could actually be nulldev 2413 * in the "__fibre" case. 2414 */ 2415 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2416 return (DDI_PROBE_DONTCARE); 2417 } 2418 2419 devp = ddi_get_driver_private(devi); 2420 2421 if (devp == NULL) { 2422 /* Ooops... nexus driver is mis-configured... */ 2423 return (DDI_PROBE_FAILURE); 2424 } 2425 2426 instance = ddi_get_instance(devi); 2427 2428 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2429 return (DDI_PROBE_PARTIAL); 2430 } 2431 2432 /* 2433 * Call the SCSA utility probe routine to see if we actually 2434 * have a target at this SCSI nexus. 2435 */ 2436 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2437 case SCSIPROBE_EXISTS: 2438 switch (devp->sd_inq->inq_dtype) { 2439 case DTYPE_DIRECT: 2440 rval = DDI_PROBE_SUCCESS; 2441 break; 2442 case DTYPE_RODIRECT: 2443 /* CDs etc. Can be removable media */ 2444 rval = DDI_PROBE_SUCCESS; 2445 break; 2446 case DTYPE_OPTICAL: 2447 /* 2448 * Rewritable optical driver HP115AA 2449 * Can also be removable media 2450 */ 2451 2452 /* 2453 * Do not attempt to bind to DTYPE_OPTICAL if 2454 * pre solaris 9 sparc sd behavior is required 2455 * 2456 * If first time through and sd_dtype_optical_bind 2457 * has not been set in /etc/system check properties 2458 */ 2459 2460 if (sd_dtype_optical_bind < 0) { 2461 sd_dtype_optical_bind = ddi_prop_get_int 2462 (DDI_DEV_T_ANY, devi, 0, 2463 "optical-device-bind", 1); 2464 } 2465 2466 if (sd_dtype_optical_bind == 0) { 2467 rval = DDI_PROBE_FAILURE; 2468 } else { 2469 rval = DDI_PROBE_SUCCESS; 2470 } 2471 break; 2472 2473 case DTYPE_NOTPRESENT: 2474 default: 2475 rval = DDI_PROBE_FAILURE; 2476 break; 2477 } 2478 break; 2479 default: 2480 rval = DDI_PROBE_PARTIAL; 2481 break; 2482 } 2483 2484 /* 2485 * This routine checks for resource allocation prior to freeing, 2486 * so it will take care of the "smart probing" case where a 2487 * scsi_probe() may or may not have been issued and will *not* 2488 * free previously-freed resources. 2489 */ 2490 scsi_unprobe(devp); 2491 return (rval); 2492 } 2493 2494 2495 /* 2496 * Function: sdinfo 2497 * 2498 * Description: This is the driver getinfo(9e) entry point function. 2499 * Given the device number, return the devinfo pointer from 2500 * the scsi_device structure or the instance number 2501 * associated with the dev_t. 2502 * 2503 * Arguments: dip - pointer to device info structure 2504 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2505 * DDI_INFO_DEVT2INSTANCE) 2506 * arg - driver dev_t 2507 * resultp - user buffer for request response 2508 * 2509 * Return Code: DDI_SUCCESS 2510 * DDI_FAILURE 2511 */ 2512 /* ARGSUSED */ 2513 static int 2514 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2515 { 2516 struct sd_lun *un; 2517 dev_t dev; 2518 int instance; 2519 int error; 2520 2521 switch (infocmd) { 2522 case DDI_INFO_DEVT2DEVINFO: 2523 dev = (dev_t)arg; 2524 instance = SDUNIT(dev); 2525 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2526 return (DDI_FAILURE); 2527 } 2528 *result = (void *) SD_DEVINFO(un); 2529 error = DDI_SUCCESS; 2530 break; 2531 case DDI_INFO_DEVT2INSTANCE: 2532 dev = (dev_t)arg; 2533 instance = SDUNIT(dev); 2534 *result = (void *)(uintptr_t)instance; 2535 error = DDI_SUCCESS; 2536 break; 2537 default: 2538 error = DDI_FAILURE; 2539 } 2540 return (error); 2541 } 2542 2543 /* 2544 * Function: sd_prop_op 2545 * 2546 * Description: This is the driver prop_op(9e) entry point function. 2547 * Return the number of blocks for the partition in question 2548 * or forward the request to the property facilities. 2549 * 2550 * Arguments: dev - device number 2551 * dip - pointer to device info structure 2552 * prop_op - property operator 2553 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2554 * name - pointer to property name 2555 * valuep - pointer or address of the user buffer 2556 * lengthp - property length 2557 * 2558 * Return Code: DDI_PROP_SUCCESS 2559 * DDI_PROP_NOT_FOUND 2560 * DDI_PROP_UNDEFINED 2561 * DDI_PROP_NO_MEMORY 2562 * DDI_PROP_BUF_TOO_SMALL 2563 */ 2564 2565 static int 2566 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2567 char *name, caddr_t valuep, int *lengthp) 2568 { 2569 int instance = ddi_get_instance(dip); 2570 struct sd_lun *un; 2571 uint64_t nblocks64; 2572 2573 /* 2574 * Our dynamic properties are all device specific and size oriented. 2575 * Requests issued under conditions where size is valid are passed 2576 * to ddi_prop_op_nblocks with the size information, otherwise the 2577 * request is passed to ddi_prop_op. Size depends on valid geometry. 2578 */ 2579 un = ddi_get_soft_state(sd_state, instance); 2580 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2581 (un->un_f_geometry_is_valid == FALSE)) { 2582 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2583 name, valuep, lengthp)); 2584 } else { 2585 /* get nblocks value */ 2586 ASSERT(!mutex_owned(SD_MUTEX(un))); 2587 mutex_enter(SD_MUTEX(un)); 2588 nblocks64 = (ulong_t)un->un_map[SDPART(dev)].dkl_nblk; 2589 mutex_exit(SD_MUTEX(un)); 2590 2591 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2592 name, valuep, lengthp, nblocks64)); 2593 } 2594 } 2595 2596 /* 2597 * The following functions are for smart probing: 2598 * sd_scsi_probe_cache_init() 2599 * sd_scsi_probe_cache_fini() 2600 * sd_scsi_clear_probe_cache() 2601 * sd_scsi_probe_with_cache() 2602 */ 2603 2604 /* 2605 * Function: sd_scsi_probe_cache_init 2606 * 2607 * Description: Initializes the probe response cache mutex and head pointer. 2608 * 2609 * Context: Kernel thread context 2610 */ 2611 2612 static void 2613 sd_scsi_probe_cache_init(void) 2614 { 2615 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2616 sd_scsi_probe_cache_head = NULL; 2617 } 2618 2619 2620 /* 2621 * Function: sd_scsi_probe_cache_fini 2622 * 2623 * Description: Frees all resources associated with the probe response cache. 2624 * 2625 * Context: Kernel thread context 2626 */ 2627 2628 static void 2629 sd_scsi_probe_cache_fini(void) 2630 { 2631 struct sd_scsi_probe_cache *cp; 2632 struct sd_scsi_probe_cache *ncp; 2633 2634 /* Clean up our smart probing linked list */ 2635 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2636 ncp = cp->next; 2637 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2638 } 2639 sd_scsi_probe_cache_head = NULL; 2640 mutex_destroy(&sd_scsi_probe_cache_mutex); 2641 } 2642 2643 2644 /* 2645 * Function: sd_scsi_clear_probe_cache 2646 * 2647 * Description: This routine clears the probe response cache. This is 2648 * done when open() returns ENXIO so that when deferred 2649 * attach is attempted (possibly after a device has been 2650 * turned on) we will retry the probe. Since we don't know 2651 * which target we failed to open, we just clear the 2652 * entire cache. 2653 * 2654 * Context: Kernel thread context 2655 */ 2656 2657 static void 2658 sd_scsi_clear_probe_cache(void) 2659 { 2660 struct sd_scsi_probe_cache *cp; 2661 int i; 2662 2663 mutex_enter(&sd_scsi_probe_cache_mutex); 2664 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2665 /* 2666 * Reset all entries to SCSIPROBE_EXISTS. This will 2667 * force probing to be performed the next time 2668 * sd_scsi_probe_with_cache is called. 2669 */ 2670 for (i = 0; i < NTARGETS_WIDE; i++) { 2671 cp->cache[i] = SCSIPROBE_EXISTS; 2672 } 2673 } 2674 mutex_exit(&sd_scsi_probe_cache_mutex); 2675 } 2676 2677 2678 /* 2679 * Function: sd_scsi_probe_with_cache 2680 * 2681 * Description: This routine implements support for a scsi device probe 2682 * with cache. The driver maintains a cache of the target 2683 * responses to scsi probes. If we get no response from a 2684 * target during a probe inquiry, we remember that, and we 2685 * avoid additional calls to scsi_probe on non-zero LUNs 2686 * on the same target until the cache is cleared. By doing 2687 * so we avoid the 1/4 sec selection timeout for nonzero 2688 * LUNs. lun0 of a target is always probed. 2689 * 2690 * Arguments: devp - Pointer to a scsi_device(9S) structure 2691 * waitfunc - indicates what the allocator routines should 2692 * do when resources are not available. This value 2693 * is passed on to scsi_probe() when that routine 2694 * is called. 2695 * 2696 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2697 * otherwise the value returned by scsi_probe(9F). 2698 * 2699 * Context: Kernel thread context 2700 */ 2701 2702 static int 2703 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2704 { 2705 struct sd_scsi_probe_cache *cp; 2706 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2707 int lun, tgt; 2708 2709 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2710 SCSI_ADDR_PROP_LUN, 0); 2711 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2712 SCSI_ADDR_PROP_TARGET, -1); 2713 2714 /* Make sure caching enabled and target in range */ 2715 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2716 /* do it the old way (no cache) */ 2717 return (scsi_probe(devp, waitfn)); 2718 } 2719 2720 mutex_enter(&sd_scsi_probe_cache_mutex); 2721 2722 /* Find the cache for this scsi bus instance */ 2723 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2724 if (cp->pdip == pdip) { 2725 break; 2726 } 2727 } 2728 2729 /* If we can't find a cache for this pdip, create one */ 2730 if (cp == NULL) { 2731 int i; 2732 2733 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2734 KM_SLEEP); 2735 cp->pdip = pdip; 2736 cp->next = sd_scsi_probe_cache_head; 2737 sd_scsi_probe_cache_head = cp; 2738 for (i = 0; i < NTARGETS_WIDE; i++) { 2739 cp->cache[i] = SCSIPROBE_EXISTS; 2740 } 2741 } 2742 2743 mutex_exit(&sd_scsi_probe_cache_mutex); 2744 2745 /* Recompute the cache for this target if LUN zero */ 2746 if (lun == 0) { 2747 cp->cache[tgt] = SCSIPROBE_EXISTS; 2748 } 2749 2750 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2751 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2752 return (SCSIPROBE_NORESP); 2753 } 2754 2755 /* Do the actual probe; save & return the result */ 2756 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2757 } 2758 2759 2760 /* 2761 * Function: sd_spin_up_unit 2762 * 2763 * Description: Issues the following commands to spin-up the device: 2764 * START STOP UNIT, and INQUIRY. 2765 * 2766 * Arguments: un - driver soft state (unit) structure 2767 * 2768 * Return Code: 0 - success 2769 * EIO - failure 2770 * EACCES - reservation conflict 2771 * 2772 * Context: Kernel thread context 2773 */ 2774 2775 static int 2776 sd_spin_up_unit(struct sd_lun *un) 2777 { 2778 size_t resid = 0; 2779 int has_conflict = FALSE; 2780 uchar_t *bufaddr; 2781 2782 ASSERT(un != NULL); 2783 2784 /* 2785 * Send a throwaway START UNIT command. 2786 * 2787 * If we fail on this, we don't care presently what precisely 2788 * is wrong. EMC's arrays will also fail this with a check 2789 * condition (0x2/0x4/0x3) if the device is "inactive," but 2790 * we don't want to fail the attach because it may become 2791 * "active" later. 2792 */ 2793 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2794 == EACCES) 2795 has_conflict = TRUE; 2796 2797 /* 2798 * Send another INQUIRY command to the target. This is necessary for 2799 * non-removable media direct access devices because their INQUIRY data 2800 * may not be fully qualified until they are spun up (perhaps via the 2801 * START command above). Note: This seems to be needed for some 2802 * legacy devices only.) The INQUIRY command should succeed even if a 2803 * Reservation Conflict is present. 2804 */ 2805 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2806 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2807 kmem_free(bufaddr, SUN_INQSIZE); 2808 return (EIO); 2809 } 2810 2811 /* 2812 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2813 * Note that this routine does not return a failure here even if the 2814 * INQUIRY command did not return any data. This is a legacy behavior. 2815 */ 2816 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2817 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2818 } 2819 2820 kmem_free(bufaddr, SUN_INQSIZE); 2821 2822 /* If we hit a reservation conflict above, tell the caller. */ 2823 if (has_conflict == TRUE) { 2824 return (EACCES); 2825 } 2826 2827 return (0); 2828 } 2829 2830 #ifdef _LP64 2831 /* 2832 * Function: sd_enable_descr_sense 2833 * 2834 * Description: This routine attempts to select descriptor sense format 2835 * using the Control mode page. Devices that support 64 bit 2836 * LBAs (for >2TB luns) should also implement descriptor 2837 * sense data so we will call this function whenever we see 2838 * a lun larger than 2TB. If for some reason the device 2839 * supports 64 bit LBAs but doesn't support descriptor sense 2840 * presumably the mode select will fail. Everything will 2841 * continue to work normally except that we will not get 2842 * complete sense data for commands that fail with an LBA 2843 * larger than 32 bits. 2844 * 2845 * Arguments: un - driver soft state (unit) structure 2846 * 2847 * Context: Kernel thread context only 2848 */ 2849 2850 static void 2851 sd_enable_descr_sense(struct sd_lun *un) 2852 { 2853 uchar_t *header; 2854 struct mode_control_scsi3 *ctrl_bufp; 2855 size_t buflen; 2856 size_t bd_len; 2857 2858 /* 2859 * Read MODE SENSE page 0xA, Control Mode Page 2860 */ 2861 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 2862 sizeof (struct mode_control_scsi3); 2863 header = kmem_zalloc(buflen, KM_SLEEP); 2864 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 2865 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 2866 SD_ERROR(SD_LOG_COMMON, un, 2867 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 2868 goto eds_exit; 2869 } 2870 2871 /* 2872 * Determine size of Block Descriptors in order to locate 2873 * the mode page data. ATAPI devices return 0, SCSI devices 2874 * should return MODE_BLK_DESC_LENGTH. 2875 */ 2876 bd_len = ((struct mode_header *)header)->bdesc_length; 2877 2878 ctrl_bufp = (struct mode_control_scsi3 *) 2879 (header + MODE_HEADER_LENGTH + bd_len); 2880 2881 /* 2882 * Clear PS bit for MODE SELECT 2883 */ 2884 ctrl_bufp->mode_page.ps = 0; 2885 2886 /* 2887 * Set D_SENSE to enable descriptor sense format. 2888 */ 2889 ctrl_bufp->d_sense = 1; 2890 2891 /* 2892 * Use MODE SELECT to commit the change to the D_SENSE bit 2893 */ 2894 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 2895 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 2896 SD_INFO(SD_LOG_COMMON, un, 2897 "sd_enable_descr_sense: mode select ctrl page failed\n"); 2898 goto eds_exit; 2899 } 2900 2901 eds_exit: 2902 kmem_free(header, buflen); 2903 } 2904 #endif /* _LP64 */ 2905 2906 2907 /* 2908 * Function: sd_set_mmc_caps 2909 * 2910 * Description: This routine determines if the device is MMC compliant and if 2911 * the device supports CDDA via a mode sense of the CDVD 2912 * capabilities mode page. Also checks if the device is a 2913 * dvdram writable device. 2914 * 2915 * Arguments: un - driver soft state (unit) structure 2916 * 2917 * Context: Kernel thread context only 2918 */ 2919 2920 static void 2921 sd_set_mmc_caps(struct sd_lun *un) 2922 { 2923 struct mode_header_grp2 *sense_mhp; 2924 uchar_t *sense_page; 2925 caddr_t buf; 2926 int bd_len; 2927 int status; 2928 struct uscsi_cmd com; 2929 int rtn; 2930 uchar_t *out_data_rw, *out_data_hd; 2931 uchar_t *rqbuf_rw, *rqbuf_hd; 2932 2933 ASSERT(un != NULL); 2934 2935 /* 2936 * The flags which will be set in this function are - mmc compliant, 2937 * dvdram writable device, cdda support. Initialize them to FALSE 2938 * and if a capability is detected - it will be set to TRUE. 2939 */ 2940 un->un_f_mmc_cap = FALSE; 2941 un->un_f_dvdram_writable_device = FALSE; 2942 un->un_f_cfg_cdda = FALSE; 2943 2944 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 2945 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 2946 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 2947 2948 if (status != 0) { 2949 /* command failed; just return */ 2950 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2951 return; 2952 } 2953 /* 2954 * If the mode sense request for the CDROM CAPABILITIES 2955 * page (0x2A) succeeds the device is assumed to be MMC. 2956 */ 2957 un->un_f_mmc_cap = TRUE; 2958 2959 /* Get to the page data */ 2960 sense_mhp = (struct mode_header_grp2 *)buf; 2961 bd_len = (sense_mhp->bdesc_length_hi << 8) | 2962 sense_mhp->bdesc_length_lo; 2963 if (bd_len > MODE_BLK_DESC_LENGTH) { 2964 /* 2965 * We did not get back the expected block descriptor 2966 * length so we cannot determine if the device supports 2967 * CDDA. However, we still indicate the device is MMC 2968 * according to the successful response to the page 2969 * 0x2A mode sense request. 2970 */ 2971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 2972 "sd_set_mmc_caps: Mode Sense returned " 2973 "invalid block descriptor length\n"); 2974 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2975 return; 2976 } 2977 2978 /* See if read CDDA is supported */ 2979 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 2980 bd_len); 2981 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 2982 2983 /* See if writing DVD RAM is supported. */ 2984 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 2985 if (un->un_f_dvdram_writable_device == TRUE) { 2986 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2987 return; 2988 } 2989 2990 /* 2991 * If the device presents DVD or CD capabilities in the mode 2992 * page, we can return here since a RRD will not have 2993 * these capabilities. 2994 */ 2995 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 2996 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2997 return; 2998 } 2999 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3000 3001 /* 3002 * If un->un_f_dvdram_writable_device is still FALSE, 3003 * check for a Removable Rigid Disk (RRD). A RRD 3004 * device is identified by the features RANDOM_WRITABLE and 3005 * HARDWARE_DEFECT_MANAGEMENT. 3006 */ 3007 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3008 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3009 3010 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3011 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3012 RANDOM_WRITABLE); 3013 if (rtn != 0) { 3014 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3015 kmem_free(rqbuf_rw, SENSE_LENGTH); 3016 return; 3017 } 3018 3019 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3020 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3021 3022 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3023 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3024 HARDWARE_DEFECT_MANAGEMENT); 3025 if (rtn == 0) { 3026 /* 3027 * We have good information, check for random writable 3028 * and hardware defect features. 3029 */ 3030 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3031 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3032 un->un_f_dvdram_writable_device = TRUE; 3033 } 3034 } 3035 3036 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3037 kmem_free(rqbuf_rw, SENSE_LENGTH); 3038 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3039 kmem_free(rqbuf_hd, SENSE_LENGTH); 3040 } 3041 3042 /* 3043 * Function: sd_check_for_writable_cd 3044 * 3045 * Description: This routine determines if the media in the device is 3046 * writable or not. It uses the get configuration command (0x46) 3047 * to determine if the media is writable 3048 * 3049 * Arguments: un - driver soft state (unit) structure 3050 * 3051 * Context: Never called at interrupt context. 3052 */ 3053 3054 static void 3055 sd_check_for_writable_cd(struct sd_lun *un) 3056 { 3057 struct uscsi_cmd com; 3058 uchar_t *out_data; 3059 uchar_t *rqbuf; 3060 int rtn; 3061 uchar_t *out_data_rw, *out_data_hd; 3062 uchar_t *rqbuf_rw, *rqbuf_hd; 3063 struct mode_header_grp2 *sense_mhp; 3064 uchar_t *sense_page; 3065 caddr_t buf; 3066 int bd_len; 3067 int status; 3068 3069 ASSERT(un != NULL); 3070 ASSERT(mutex_owned(SD_MUTEX(un))); 3071 3072 /* 3073 * Initialize the writable media to false, if configuration info. 3074 * tells us otherwise then only we will set it. 3075 */ 3076 un->un_f_mmc_writable_media = FALSE; 3077 mutex_exit(SD_MUTEX(un)); 3078 3079 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3080 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3081 3082 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3083 out_data, SD_PROFILE_HEADER_LEN); 3084 3085 mutex_enter(SD_MUTEX(un)); 3086 if (rtn == 0) { 3087 /* 3088 * We have good information, check for writable DVD. 3089 */ 3090 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3091 un->un_f_mmc_writable_media = TRUE; 3092 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3093 kmem_free(rqbuf, SENSE_LENGTH); 3094 return; 3095 } 3096 } 3097 3098 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3099 kmem_free(rqbuf, SENSE_LENGTH); 3100 3101 /* 3102 * Determine if this is a RRD type device. 3103 */ 3104 mutex_exit(SD_MUTEX(un)); 3105 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3106 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3107 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3108 mutex_enter(SD_MUTEX(un)); 3109 if (status != 0) { 3110 /* command failed; just return */ 3111 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3112 return; 3113 } 3114 3115 /* Get to the page data */ 3116 sense_mhp = (struct mode_header_grp2 *)buf; 3117 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3118 if (bd_len > MODE_BLK_DESC_LENGTH) { 3119 /* 3120 * We did not get back the expected block descriptor length so 3121 * we cannot check the mode page. 3122 */ 3123 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3124 "sd_check_for_writable_cd: Mode Sense returned " 3125 "invalid block descriptor length\n"); 3126 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3127 return; 3128 } 3129 3130 /* 3131 * If the device presents DVD or CD capabilities in the mode 3132 * page, we can return here since a RRD device will not have 3133 * these capabilities. 3134 */ 3135 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3136 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3137 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3138 return; 3139 } 3140 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3141 3142 /* 3143 * If un->un_f_mmc_writable_media is still FALSE, 3144 * check for RRD type media. A RRD device is identified 3145 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3146 */ 3147 mutex_exit(SD_MUTEX(un)); 3148 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3149 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3150 3151 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3152 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3153 RANDOM_WRITABLE); 3154 if (rtn != 0) { 3155 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3156 kmem_free(rqbuf_rw, SENSE_LENGTH); 3157 mutex_enter(SD_MUTEX(un)); 3158 return; 3159 } 3160 3161 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3162 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3163 3164 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3165 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3166 HARDWARE_DEFECT_MANAGEMENT); 3167 mutex_enter(SD_MUTEX(un)); 3168 if (rtn == 0) { 3169 /* 3170 * We have good information, check for random writable 3171 * and hardware defect features as current. 3172 */ 3173 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3174 (out_data_rw[10] & 0x1) && 3175 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3176 (out_data_hd[10] & 0x1)) { 3177 un->un_f_mmc_writable_media = TRUE; 3178 } 3179 } 3180 3181 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3182 kmem_free(rqbuf_rw, SENSE_LENGTH); 3183 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3184 kmem_free(rqbuf_hd, SENSE_LENGTH); 3185 } 3186 3187 /* 3188 * Function: sd_read_unit_properties 3189 * 3190 * Description: The following implements a property lookup mechanism. 3191 * Properties for particular disks (keyed on vendor, model 3192 * and rev numbers) are sought in the sd.conf file via 3193 * sd_process_sdconf_file(), and if not found there, are 3194 * looked for in a list hardcoded in this driver via 3195 * sd_process_sdconf_table() Once located the properties 3196 * are used to update the driver unit structure. 3197 * 3198 * Arguments: un - driver soft state (unit) structure 3199 */ 3200 3201 static void 3202 sd_read_unit_properties(struct sd_lun *un) 3203 { 3204 /* 3205 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3206 * the "sd-config-list" property (from the sd.conf file) or if 3207 * there was not a match for the inquiry vid/pid. If this event 3208 * occurs the static driver configuration table is searched for 3209 * a match. 3210 */ 3211 ASSERT(un != NULL); 3212 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3213 sd_process_sdconf_table(un); 3214 } 3215 3216 /* check for LSI device */ 3217 sd_is_lsi(un); 3218 3219 /* 3220 * Set this in sd.conf to 0 in order to disable kstats. The default 3221 * is 1, so they are enabled by default. 3222 */ 3223 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 3224 SD_DEVINFO(un), DDI_PROP_DONTPASS, "enable-partition-kstats", 1)); 3225 } 3226 3227 3228 /* 3229 * Function: sd_process_sdconf_file 3230 * 3231 * Description: Use ddi_getlongprop to obtain the properties from the 3232 * driver's config file (ie, sd.conf) and update the driver 3233 * soft state structure accordingly. 3234 * 3235 * Arguments: un - driver soft state (unit) structure 3236 * 3237 * Return Code: SD_SUCCESS - The properties were successfully set according 3238 * to the driver configuration file. 3239 * SD_FAILURE - The driver config list was not obtained or 3240 * there was no vid/pid match. This indicates that 3241 * the static config table should be used. 3242 * 3243 * The config file has a property, "sd-config-list", which consists of 3244 * one or more duplets as follows: 3245 * 3246 * sd-config-list= 3247 * <duplet>, 3248 * [<duplet>,] 3249 * [<duplet>]; 3250 * 3251 * The structure of each duplet is as follows: 3252 * 3253 * <duplet>:= <vid+pid>,<data-property-name_list> 3254 * 3255 * The first entry of the duplet is the device ID string (the concatenated 3256 * vid & pid; not to be confused with a device_id). This is defined in 3257 * the same way as in the sd_disk_table. 3258 * 3259 * The second part of the duplet is a string that identifies a 3260 * data-property-name-list. The data-property-name-list is defined as 3261 * follows: 3262 * 3263 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3264 * 3265 * The syntax of <data-property-name> depends on the <version> field. 3266 * 3267 * If version = SD_CONF_VERSION_1 we have the following syntax: 3268 * 3269 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3270 * 3271 * where the prop0 value will be used to set prop0 if bit0 set in the 3272 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3273 * 3274 */ 3275 3276 static int 3277 sd_process_sdconf_file(struct sd_lun *un) 3278 { 3279 char *config_list = NULL; 3280 int config_list_len; 3281 int len; 3282 int dupletlen = 0; 3283 char *vidptr; 3284 int vidlen; 3285 char *dnlist_ptr; 3286 char *dataname_ptr; 3287 int dnlist_len; 3288 int dataname_len; 3289 int *data_list; 3290 int data_list_len; 3291 int rval = SD_FAILURE; 3292 int i; 3293 3294 ASSERT(un != NULL); 3295 3296 /* Obtain the configuration list associated with the .conf file */ 3297 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3298 sd_config_list, (caddr_t)&config_list, &config_list_len) 3299 != DDI_PROP_SUCCESS) { 3300 return (SD_FAILURE); 3301 } 3302 3303 /* 3304 * Compare vids in each duplet to the inquiry vid - if a match is 3305 * made, get the data value and update the soft state structure 3306 * accordingly. 3307 * 3308 * Note: This algorithm is complex and difficult to maintain. It should 3309 * be replaced with a more robust implementation. 3310 */ 3311 for (len = config_list_len, vidptr = config_list; len > 0; 3312 vidptr += dupletlen, len -= dupletlen) { 3313 /* 3314 * Note: The assumption here is that each vid entry is on 3315 * a unique line from its associated duplet. 3316 */ 3317 vidlen = dupletlen = (int)strlen(vidptr); 3318 if ((vidlen == 0) || 3319 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3320 dupletlen++; 3321 continue; 3322 } 3323 3324 /* 3325 * dnlist contains 1 or more blank separated 3326 * data-property-name entries 3327 */ 3328 dnlist_ptr = vidptr + vidlen + 1; 3329 dnlist_len = (int)strlen(dnlist_ptr); 3330 dupletlen += dnlist_len + 2; 3331 3332 /* 3333 * Set a pointer for the first data-property-name 3334 * entry in the list 3335 */ 3336 dataname_ptr = dnlist_ptr; 3337 dataname_len = 0; 3338 3339 /* 3340 * Loop through all data-property-name entries in the 3341 * data-property-name-list setting the properties for each. 3342 */ 3343 while (dataname_len < dnlist_len) { 3344 int version; 3345 3346 /* 3347 * Determine the length of the current 3348 * data-property-name entry by indexing until a 3349 * blank or NULL is encountered. When the space is 3350 * encountered reset it to a NULL for compliance 3351 * with ddi_getlongprop(). 3352 */ 3353 for (i = 0; ((dataname_ptr[i] != ' ') && 3354 (dataname_ptr[i] != '\0')); i++) { 3355 ; 3356 } 3357 3358 dataname_len += i; 3359 /* If not null terminated, Make it so */ 3360 if (dataname_ptr[i] == ' ') { 3361 dataname_ptr[i] = '\0'; 3362 } 3363 dataname_len++; 3364 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3365 "sd_process_sdconf_file: disk:%s, data:%s\n", 3366 vidptr, dataname_ptr); 3367 3368 /* Get the data list */ 3369 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3370 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3371 != DDI_PROP_SUCCESS) { 3372 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3373 "sd_process_sdconf_file: data property (%s)" 3374 " has no value\n", dataname_ptr); 3375 dataname_ptr = dnlist_ptr + dataname_len; 3376 continue; 3377 } 3378 3379 version = data_list[0]; 3380 3381 if (version == SD_CONF_VERSION_1) { 3382 sd_tunables values; 3383 3384 /* Set the properties */ 3385 if (sd_chk_vers1_data(un, data_list[1], 3386 &data_list[2], data_list_len, dataname_ptr) 3387 == SD_SUCCESS) { 3388 sd_get_tunables_from_conf(un, 3389 data_list[1], &data_list[2], 3390 &values); 3391 sd_set_vers1_properties(un, 3392 data_list[1], &values); 3393 rval = SD_SUCCESS; 3394 } else { 3395 rval = SD_FAILURE; 3396 } 3397 } else { 3398 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3399 "data property %s version 0x%x is invalid.", 3400 dataname_ptr, version); 3401 rval = SD_FAILURE; 3402 } 3403 kmem_free(data_list, data_list_len); 3404 dataname_ptr = dnlist_ptr + dataname_len; 3405 } 3406 } 3407 3408 /* free up the memory allocated by ddi_getlongprop */ 3409 if (config_list) { 3410 kmem_free(config_list, config_list_len); 3411 } 3412 3413 return (rval); 3414 } 3415 3416 /* 3417 * Function: sd_get_tunables_from_conf() 3418 * 3419 * 3420 * This function reads the data list from the sd.conf file and pulls 3421 * the values that can have numeric values as arguments and places 3422 * the values in the apropriate sd_tunables member. 3423 * Since the order of the data list members varies across platforms 3424 * This function reads them from the data list in a platform specific 3425 * order and places them into the correct sd_tunable member that is 3426 * a consistant across all platforms. 3427 */ 3428 static void 3429 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3430 sd_tunables *values) 3431 { 3432 int i; 3433 int mask; 3434 3435 bzero(values, sizeof (sd_tunables)); 3436 3437 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3438 3439 mask = 1 << i; 3440 if (mask > flags) { 3441 break; 3442 } 3443 3444 switch (mask & flags) { 3445 case 0: /* This mask bit not set in flags */ 3446 continue; 3447 case SD_CONF_BSET_THROTTLE: 3448 values->sdt_throttle = data_list[i]; 3449 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3450 "sd_get_tunables_from_conf: throttle = %d\n", 3451 values->sdt_throttle); 3452 break; 3453 case SD_CONF_BSET_CTYPE: 3454 values->sdt_ctype = data_list[i]; 3455 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3456 "sd_get_tunables_from_conf: ctype = %d\n", 3457 values->sdt_ctype); 3458 break; 3459 case SD_CONF_BSET_NRR_COUNT: 3460 values->sdt_not_rdy_retries = data_list[i]; 3461 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3462 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3463 values->sdt_not_rdy_retries); 3464 break; 3465 case SD_CONF_BSET_BSY_RETRY_COUNT: 3466 values->sdt_busy_retries = data_list[i]; 3467 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3468 "sd_get_tunables_from_conf: busy_retries = %d\n", 3469 values->sdt_busy_retries); 3470 break; 3471 case SD_CONF_BSET_RST_RETRIES: 3472 values->sdt_reset_retries = data_list[i]; 3473 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3474 "sd_get_tunables_from_conf: reset_retries = %d\n", 3475 values->sdt_reset_retries); 3476 break; 3477 case SD_CONF_BSET_RSV_REL_TIME: 3478 values->sdt_reserv_rel_time = data_list[i]; 3479 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3480 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3481 values->sdt_reserv_rel_time); 3482 break; 3483 case SD_CONF_BSET_MIN_THROTTLE: 3484 values->sdt_min_throttle = data_list[i]; 3485 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3486 "sd_get_tunables_from_conf: min_throttle = %d\n", 3487 values->sdt_min_throttle); 3488 break; 3489 case SD_CONF_BSET_DISKSORT_DISABLED: 3490 values->sdt_disk_sort_dis = data_list[i]; 3491 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3492 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3493 values->sdt_disk_sort_dis); 3494 break; 3495 case SD_CONF_BSET_LUN_RESET_ENABLED: 3496 values->sdt_lun_reset_enable = data_list[i]; 3497 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3498 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3499 "\n", values->sdt_lun_reset_enable); 3500 break; 3501 } 3502 } 3503 } 3504 3505 /* 3506 * Function: sd_process_sdconf_table 3507 * 3508 * Description: Search the static configuration table for a match on the 3509 * inquiry vid/pid and update the driver soft state structure 3510 * according to the table property values for the device. 3511 * 3512 * The form of a configuration table entry is: 3513 * <vid+pid>,<flags>,<property-data> 3514 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3515 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3516 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3517 * 3518 * Arguments: un - driver soft state (unit) structure 3519 */ 3520 3521 static void 3522 sd_process_sdconf_table(struct sd_lun *un) 3523 { 3524 char *id = NULL; 3525 int table_index; 3526 int idlen; 3527 3528 ASSERT(un != NULL); 3529 for (table_index = 0; table_index < sd_disk_table_size; 3530 table_index++) { 3531 id = sd_disk_table[table_index].device_id; 3532 idlen = strlen(id); 3533 if (idlen == 0) { 3534 continue; 3535 } 3536 3537 /* 3538 * The static configuration table currently does not 3539 * implement version 10 properties. Additionally, 3540 * multiple data-property-name entries are not 3541 * implemented in the static configuration table. 3542 */ 3543 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3544 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3545 "sd_process_sdconf_table: disk %s\n", id); 3546 sd_set_vers1_properties(un, 3547 sd_disk_table[table_index].flags, 3548 sd_disk_table[table_index].properties); 3549 break; 3550 } 3551 } 3552 } 3553 3554 3555 /* 3556 * Function: sd_sdconf_id_match 3557 * 3558 * Description: This local function implements a case sensitive vid/pid 3559 * comparison as well as the boundary cases of wild card and 3560 * multiple blanks. 3561 * 3562 * Note: An implicit assumption made here is that the scsi 3563 * inquiry structure will always keep the vid, pid and 3564 * revision strings in consecutive sequence, so they can be 3565 * read as a single string. If this assumption is not the 3566 * case, a separate string, to be used for the check, needs 3567 * to be built with these strings concatenated. 3568 * 3569 * Arguments: un - driver soft state (unit) structure 3570 * id - table or config file vid/pid 3571 * idlen - length of the vid/pid (bytes) 3572 * 3573 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3574 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3575 */ 3576 3577 static int 3578 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3579 { 3580 struct scsi_inquiry *sd_inq; 3581 int rval = SD_SUCCESS; 3582 3583 ASSERT(un != NULL); 3584 sd_inq = un->un_sd->sd_inq; 3585 ASSERT(id != NULL); 3586 3587 /* 3588 * We use the inq_vid as a pointer to a buffer containing the 3589 * vid and pid and use the entire vid/pid length of the table 3590 * entry for the comparison. This works because the inq_pid 3591 * data member follows inq_vid in the scsi_inquiry structure. 3592 */ 3593 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3594 /* 3595 * The user id string is compared to the inquiry vid/pid 3596 * using a case insensitive comparison and ignoring 3597 * multiple spaces. 3598 */ 3599 rval = sd_blank_cmp(un, id, idlen); 3600 if (rval != SD_SUCCESS) { 3601 /* 3602 * User id strings that start and end with a "*" 3603 * are a special case. These do not have a 3604 * specific vendor, and the product string can 3605 * appear anywhere in the 16 byte PID portion of 3606 * the inquiry data. This is a simple strstr() 3607 * type search for the user id in the inquiry data. 3608 */ 3609 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3610 char *pidptr = &id[1]; 3611 int i; 3612 int j; 3613 int pidstrlen = idlen - 2; 3614 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3615 pidstrlen; 3616 3617 if (j < 0) { 3618 return (SD_FAILURE); 3619 } 3620 for (i = 0; i < j; i++) { 3621 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3622 pidptr, pidstrlen) == 0) { 3623 rval = SD_SUCCESS; 3624 break; 3625 } 3626 } 3627 } 3628 } 3629 } 3630 return (rval); 3631 } 3632 3633 3634 /* 3635 * Function: sd_blank_cmp 3636 * 3637 * Description: If the id string starts and ends with a space, treat 3638 * multiple consecutive spaces as equivalent to a single 3639 * space. For example, this causes a sd_disk_table entry 3640 * of " NEC CDROM " to match a device's id string of 3641 * "NEC CDROM". 3642 * 3643 * Note: The success exit condition for this routine is if 3644 * the pointer to the table entry is '\0' and the cnt of 3645 * the inquiry length is zero. This will happen if the inquiry 3646 * string returned by the device is padded with spaces to be 3647 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3648 * SCSI spec states that the inquiry string is to be padded with 3649 * spaces. 3650 * 3651 * Arguments: un - driver soft state (unit) structure 3652 * id - table or config file vid/pid 3653 * idlen - length of the vid/pid (bytes) 3654 * 3655 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3656 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3657 */ 3658 3659 static int 3660 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3661 { 3662 char *p1; 3663 char *p2; 3664 int cnt; 3665 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3666 sizeof (SD_INQUIRY(un)->inq_pid); 3667 3668 ASSERT(un != NULL); 3669 p2 = un->un_sd->sd_inq->inq_vid; 3670 ASSERT(id != NULL); 3671 p1 = id; 3672 3673 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3674 /* 3675 * Note: string p1 is terminated by a NUL but string p2 3676 * isn't. The end of p2 is determined by cnt. 3677 */ 3678 for (;;) { 3679 /* skip over any extra blanks in both strings */ 3680 while ((*p1 != '\0') && (*p1 == ' ')) { 3681 p1++; 3682 } 3683 while ((cnt != 0) && (*p2 == ' ')) { 3684 p2++; 3685 cnt--; 3686 } 3687 3688 /* compare the two strings */ 3689 if ((cnt == 0) || 3690 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3691 break; 3692 } 3693 while ((cnt > 0) && 3694 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3695 p1++; 3696 p2++; 3697 cnt--; 3698 } 3699 } 3700 } 3701 3702 /* return SD_SUCCESS if both strings match */ 3703 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3704 } 3705 3706 3707 /* 3708 * Function: sd_chk_vers1_data 3709 * 3710 * Description: Verify the version 1 device properties provided by the 3711 * user via the configuration file 3712 * 3713 * Arguments: un - driver soft state (unit) structure 3714 * flags - integer mask indicating properties to be set 3715 * prop_list - integer list of property values 3716 * list_len - length of user provided data 3717 * 3718 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3719 * SD_FAILURE - Indicates the user provided data is invalid 3720 */ 3721 3722 static int 3723 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3724 int list_len, char *dataname_ptr) 3725 { 3726 int i; 3727 int mask = 1; 3728 int index = 0; 3729 3730 ASSERT(un != NULL); 3731 3732 /* Check for a NULL property name and list */ 3733 if (dataname_ptr == NULL) { 3734 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3735 "sd_chk_vers1_data: NULL data property name."); 3736 return (SD_FAILURE); 3737 } 3738 if (prop_list == NULL) { 3739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3740 "sd_chk_vers1_data: %s NULL data property list.", 3741 dataname_ptr); 3742 return (SD_FAILURE); 3743 } 3744 3745 /* Display a warning if undefined bits are set in the flags */ 3746 if (flags & ~SD_CONF_BIT_MASK) { 3747 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3748 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3749 "Properties not set.", 3750 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3751 return (SD_FAILURE); 3752 } 3753 3754 /* 3755 * Verify the length of the list by identifying the highest bit set 3756 * in the flags and validating that the property list has a length 3757 * up to the index of this bit. 3758 */ 3759 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3760 if (flags & mask) { 3761 index++; 3762 } 3763 mask = 1 << i; 3764 } 3765 if ((list_len / sizeof (int)) < (index + 2)) { 3766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3767 "sd_chk_vers1_data: " 3768 "Data property list %s size is incorrect. " 3769 "Properties not set.", dataname_ptr); 3770 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3771 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3772 return (SD_FAILURE); 3773 } 3774 return (SD_SUCCESS); 3775 } 3776 3777 3778 /* 3779 * Function: sd_set_vers1_properties 3780 * 3781 * Description: Set version 1 device properties based on a property list 3782 * retrieved from the driver configuration file or static 3783 * configuration table. Version 1 properties have the format: 3784 * 3785 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3786 * 3787 * where the prop0 value will be used to set prop0 if bit0 3788 * is set in the flags 3789 * 3790 * Arguments: un - driver soft state (unit) structure 3791 * flags - integer mask indicating properties to be set 3792 * prop_list - integer list of property values 3793 */ 3794 3795 static void 3796 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3797 { 3798 ASSERT(un != NULL); 3799 3800 /* 3801 * Set the flag to indicate cache is to be disabled. An attempt 3802 * to disable the cache via sd_disable_caching() will be made 3803 * later during attach once the basic initialization is complete. 3804 */ 3805 if (flags & SD_CONF_BSET_NOCACHE) { 3806 un->un_f_opt_disable_cache = TRUE; 3807 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3808 "sd_set_vers1_properties: caching disabled flag set\n"); 3809 } 3810 3811 /* CD-specific configuration parameters */ 3812 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3813 un->un_f_cfg_playmsf_bcd = TRUE; 3814 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3815 "sd_set_vers1_properties: playmsf_bcd set\n"); 3816 } 3817 if (flags & SD_CONF_BSET_READSUB_BCD) { 3818 un->un_f_cfg_readsub_bcd = TRUE; 3819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3820 "sd_set_vers1_properties: readsub_bcd set\n"); 3821 } 3822 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3823 un->un_f_cfg_read_toc_trk_bcd = TRUE; 3824 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3825 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 3826 } 3827 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 3828 un->un_f_cfg_read_toc_addr_bcd = TRUE; 3829 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3830 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 3831 } 3832 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 3833 un->un_f_cfg_no_read_header = TRUE; 3834 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3835 "sd_set_vers1_properties: no_read_header set\n"); 3836 } 3837 if (flags & SD_CONF_BSET_READ_CD_XD4) { 3838 un->un_f_cfg_read_cd_xd4 = TRUE; 3839 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3840 "sd_set_vers1_properties: read_cd_xd4 set\n"); 3841 } 3842 3843 /* Support for devices which do not have valid/unique serial numbers */ 3844 if (flags & SD_CONF_BSET_FAB_DEVID) { 3845 un->un_f_opt_fab_devid = TRUE; 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3847 "sd_set_vers1_properties: fab_devid bit set\n"); 3848 } 3849 3850 /* Support for user throttle configuration */ 3851 if (flags & SD_CONF_BSET_THROTTLE) { 3852 ASSERT(prop_list != NULL); 3853 un->un_saved_throttle = un->un_throttle = 3854 prop_list->sdt_throttle; 3855 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3856 "sd_set_vers1_properties: throttle set to %d\n", 3857 prop_list->sdt_throttle); 3858 } 3859 3860 /* Set the per disk retry count according to the conf file or table. */ 3861 if (flags & SD_CONF_BSET_NRR_COUNT) { 3862 ASSERT(prop_list != NULL); 3863 if (prop_list->sdt_not_rdy_retries) { 3864 un->un_notready_retry_count = 3865 prop_list->sdt_not_rdy_retries; 3866 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3867 "sd_set_vers1_properties: not ready retry count" 3868 " set to %d\n", un->un_notready_retry_count); 3869 } 3870 } 3871 3872 /* The controller type is reported for generic disk driver ioctls */ 3873 if (flags & SD_CONF_BSET_CTYPE) { 3874 ASSERT(prop_list != NULL); 3875 switch (prop_list->sdt_ctype) { 3876 case CTYPE_CDROM: 3877 un->un_ctype = prop_list->sdt_ctype; 3878 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3879 "sd_set_vers1_properties: ctype set to " 3880 "CTYPE_CDROM\n"); 3881 break; 3882 case CTYPE_CCS: 3883 un->un_ctype = prop_list->sdt_ctype; 3884 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3885 "sd_set_vers1_properties: ctype set to " 3886 "CTYPE_CCS\n"); 3887 break; 3888 case CTYPE_ROD: /* RW optical */ 3889 un->un_ctype = prop_list->sdt_ctype; 3890 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3891 "sd_set_vers1_properties: ctype set to " 3892 "CTYPE_ROD\n"); 3893 break; 3894 default: 3895 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3896 "sd_set_vers1_properties: Could not set " 3897 "invalid ctype value (%d)", 3898 prop_list->sdt_ctype); 3899 } 3900 } 3901 3902 /* Purple failover timeout */ 3903 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 3904 ASSERT(prop_list != NULL); 3905 un->un_busy_retry_count = 3906 prop_list->sdt_busy_retries; 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3908 "sd_set_vers1_properties: " 3909 "busy retry count set to %d\n", 3910 un->un_busy_retry_count); 3911 } 3912 3913 /* Purple reset retry count */ 3914 if (flags & SD_CONF_BSET_RST_RETRIES) { 3915 ASSERT(prop_list != NULL); 3916 un->un_reset_retry_count = 3917 prop_list->sdt_reset_retries; 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3919 "sd_set_vers1_properties: " 3920 "reset retry count set to %d\n", 3921 un->un_reset_retry_count); 3922 } 3923 3924 /* Purple reservation release timeout */ 3925 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 3926 ASSERT(prop_list != NULL); 3927 un->un_reserve_release_time = 3928 prop_list->sdt_reserv_rel_time; 3929 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3930 "sd_set_vers1_properties: " 3931 "reservation release timeout set to %d\n", 3932 un->un_reserve_release_time); 3933 } 3934 3935 /* 3936 * Driver flag telling the driver to verify that no commands are pending 3937 * for a device before issuing a Test Unit Ready. This is a workaround 3938 * for a firmware bug in some Seagate eliteI drives. 3939 */ 3940 if (flags & SD_CONF_BSET_TUR_CHECK) { 3941 un->un_f_cfg_tur_check = TRUE; 3942 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3943 "sd_set_vers1_properties: tur queue check set\n"); 3944 } 3945 3946 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 3947 un->un_min_throttle = prop_list->sdt_min_throttle; 3948 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3949 "sd_set_vers1_properties: min throttle set to %d\n", 3950 un->un_min_throttle); 3951 } 3952 3953 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 3954 un->un_f_disksort_disabled = 3955 (prop_list->sdt_disk_sort_dis != 0) ? 3956 TRUE : FALSE; 3957 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3958 "sd_set_vers1_properties: disksort disabled " 3959 "flag set to %d\n", 3960 prop_list->sdt_disk_sort_dis); 3961 } 3962 3963 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 3964 un->un_f_lun_reset_enabled = 3965 (prop_list->sdt_lun_reset_enable != 0) ? 3966 TRUE : FALSE; 3967 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3968 "sd_set_vers1_properties: lun reset enabled " 3969 "flag set to %d\n", 3970 prop_list->sdt_lun_reset_enable); 3971 } 3972 3973 /* 3974 * Validate the throttle values. 3975 * If any of the numbers are invalid, set everything to defaults. 3976 */ 3977 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3978 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3979 (un->un_min_throttle > un->un_throttle)) { 3980 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3981 un->un_min_throttle = sd_min_throttle; 3982 } 3983 } 3984 3985 /* 3986 * Function: sd_is_lsi() 3987 * 3988 * Description: Check for lsi devices, step throught the static device 3989 * table to match vid/pid. 3990 * 3991 * Args: un - ptr to sd_lun 3992 * 3993 * Notes: When creating new LSI property, need to add the new LSI property 3994 * to this function. 3995 */ 3996 static void 3997 sd_is_lsi(struct sd_lun *un) 3998 { 3999 char *id = NULL; 4000 int table_index; 4001 int idlen; 4002 void *prop; 4003 4004 ASSERT(un != NULL); 4005 for (table_index = 0; table_index < sd_disk_table_size; 4006 table_index++) { 4007 id = sd_disk_table[table_index].device_id; 4008 idlen = strlen(id); 4009 if (idlen == 0) { 4010 continue; 4011 } 4012 4013 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4014 prop = sd_disk_table[table_index].properties; 4015 if (prop == &lsi_properties || 4016 prop == &lsi_oem_properties || 4017 prop == &lsi_properties_scsi || 4018 prop == &symbios_properties) { 4019 un->un_f_cfg_is_lsi = TRUE; 4020 } 4021 break; 4022 } 4023 } 4024 } 4025 4026 4027 /* 4028 * The following routines support reading and interpretation of disk labels, 4029 * including Solaris BE (8-slice) vtoc's, Solaris LE (16-slice) vtoc's, and 4030 * fdisk tables. 4031 */ 4032 4033 /* 4034 * Function: sd_validate_geometry 4035 * 4036 * Description: Read the label from the disk (if present). Update the unit's 4037 * geometry and vtoc information from the data in the label. 4038 * Verify that the label is valid. 4039 * 4040 * Arguments: un - driver soft state (unit) structure 4041 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4042 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4043 * to use the USCSI "direct" chain and bypass the normal 4044 * command waitq. 4045 * 4046 * Return Code: 0 - Successful completion 4047 * EINVAL - Invalid value in un->un_tgt_blocksize or 4048 * un->un_blockcount; or label on disk is corrupted 4049 * or unreadable. 4050 * EACCES - Reservation conflict at the device. 4051 * ENOMEM - Resource allocation error 4052 * ENOTSUP - geometry not applicable 4053 * 4054 * Context: Kernel thread only (can sleep). 4055 */ 4056 4057 static int 4058 sd_validate_geometry(struct sd_lun *un, int path_flag) 4059 { 4060 static char labelstring[128]; 4061 static char buf[256]; 4062 char *label = NULL; 4063 int label_error = 0; 4064 int gvalid = un->un_f_geometry_is_valid; 4065 int lbasize; 4066 uint_t capacity; 4067 int count; 4068 4069 ASSERT(un != NULL); 4070 ASSERT(mutex_owned(SD_MUTEX(un))); 4071 4072 /* 4073 * If the required values are not valid, then try getting them 4074 * once via read capacity. If that fails, then fail this call. 4075 * This is necessary with the new mpxio failover behavior in 4076 * the T300 where we can get an attach for the inactive path 4077 * before the active path. The inactive path fails commands with 4078 * sense data of 02,04,88 which happens to the read capacity 4079 * before mpxio has had sufficient knowledge to know if it should 4080 * force a fail over or not. (Which it won't do at attach anyhow). 4081 * If the read capacity at attach time fails, un_tgt_blocksize and 4082 * un_blockcount won't be valid. 4083 */ 4084 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4085 (un->un_f_blockcount_is_valid != TRUE)) { 4086 uint64_t cap; 4087 uint32_t lbasz; 4088 int rval; 4089 4090 mutex_exit(SD_MUTEX(un)); 4091 rval = sd_send_scsi_READ_CAPACITY(un, &cap, 4092 &lbasz, SD_PATH_DIRECT); 4093 mutex_enter(SD_MUTEX(un)); 4094 if (rval == 0) { 4095 /* 4096 * The following relies on 4097 * sd_send_scsi_READ_CAPACITY never 4098 * returning 0 for capacity and/or lbasize. 4099 */ 4100 sd_update_block_info(un, lbasz, cap); 4101 } 4102 4103 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4104 (un->un_f_blockcount_is_valid != TRUE)) { 4105 return (EINVAL); 4106 } 4107 } 4108 4109 /* 4110 * Copy the lbasize and capacity so that if they're reset while we're 4111 * not holding the SD_MUTEX, we will continue to use valid values 4112 * after the SD_MUTEX is reacquired. (4119659) 4113 */ 4114 lbasize = un->un_tgt_blocksize; 4115 capacity = un->un_blockcount; 4116 4117 #if defined(_SUNOS_VTOC_16) 4118 /* 4119 * Set up the "whole disk" fdisk partition; this should always 4120 * exist, regardless of whether the disk contains an fdisk table 4121 * or vtoc. 4122 */ 4123 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 4124 un->un_map[P0_RAW_DISK].dkl_nblk = capacity; 4125 #endif 4126 4127 /* 4128 * Refresh the logical and physical geometry caches. 4129 * (data from MODE SENSE format/rigid disk geometry pages, 4130 * and scsi_ifgetcap("geometry"). 4131 */ 4132 sd_resync_geom_caches(un, capacity, lbasize, path_flag); 4133 4134 label_error = sd_use_efi(un, path_flag); 4135 if (label_error == 0) { 4136 /* found a valid EFI label */ 4137 SD_TRACE(SD_LOG_IO_PARTITION, un, 4138 "sd_validate_geometry: found EFI label\n"); 4139 un->un_solaris_offset = 0; 4140 un->un_solaris_size = capacity; 4141 return (ENOTSUP); 4142 } 4143 if (un->un_blockcount > DK_MAX_BLOCKS) { 4144 if (label_error == ESRCH) { 4145 /* 4146 * they've configured a LUN over 1TB, but used 4147 * format.dat to restrict format's view of the 4148 * capacity to be under 1TB 4149 */ 4150 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4151 "is >1TB and has a VTOC label: use format(1M) to either decrease the"); 4152 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 4153 "size to be < 1TB or relabel the disk with an EFI label"); 4154 } else { 4155 /* unlabeled disk over 1TB */ 4156 return (ENOTSUP); 4157 } 4158 } 4159 label_error = 0; 4160 4161 /* 4162 * at this point it is either labeled with a VTOC or it is 4163 * under 1TB 4164 */ 4165 4166 /* 4167 * Only DIRECT ACCESS devices will have Sun labels. 4168 * CD's supposedly have a Sun label, too 4169 */ 4170 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 4171 struct dk_label *dkl; 4172 offset_t dkl1; 4173 offset_t label_addr, real_addr; 4174 int rval; 4175 size_t buffer_size; 4176 4177 /* 4178 * Note: This will set up un->un_solaris_size and 4179 * un->un_solaris_offset. 4180 */ 4181 switch (sd_read_fdisk(un, capacity, lbasize, path_flag)) { 4182 case SD_CMD_RESERVATION_CONFLICT: 4183 ASSERT(mutex_owned(SD_MUTEX(un))); 4184 return (EACCES); 4185 case SD_CMD_FAILURE: 4186 ASSERT(mutex_owned(SD_MUTEX(un))); 4187 return (ENOMEM); 4188 } 4189 4190 if (un->un_solaris_size <= DK_LABEL_LOC) { 4191 /* 4192 * Found fdisk table but no Solaris partition entry, 4193 * so don't call sd_uselabel() and don't create 4194 * a default label. 4195 */ 4196 label_error = 0; 4197 un->un_f_geometry_is_valid = TRUE; 4198 goto no_solaris_partition; 4199 } 4200 label_addr = (daddr_t)(un->un_solaris_offset + DK_LABEL_LOC); 4201 4202 /* 4203 * sys_blocksize != tgt_blocksize, need to re-adjust 4204 * blkno and save the index to beginning of dk_label 4205 */ 4206 real_addr = SD_SYS2TGTBLOCK(un, label_addr); 4207 buffer_size = SD_REQBYTES2TGTBYTES(un, 4208 sizeof (struct dk_label)); 4209 4210 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_validate_geometry: " 4211 "label_addr: 0x%x allocation size: 0x%x\n", 4212 label_addr, buffer_size); 4213 dkl = kmem_zalloc(buffer_size, KM_NOSLEEP); 4214 if (dkl == NULL) { 4215 return (ENOMEM); 4216 } 4217 4218 mutex_exit(SD_MUTEX(un)); 4219 rval = sd_send_scsi_READ(un, dkl, buffer_size, real_addr, 4220 path_flag); 4221 mutex_enter(SD_MUTEX(un)); 4222 4223 switch (rval) { 4224 case 0: 4225 /* 4226 * sd_uselabel will establish that the geometry 4227 * is valid. 4228 * For sys_blocksize != tgt_blocksize, need 4229 * to index into the beginning of dk_label 4230 */ 4231 dkl1 = (daddr_t)dkl 4232 + SD_TGTBYTEOFFSET(un, label_addr, real_addr); 4233 if (sd_uselabel(un, (struct dk_label *)(uintptr_t)dkl1, 4234 path_flag) != SD_LABEL_IS_VALID) { 4235 label_error = EINVAL; 4236 } 4237 break; 4238 case EACCES: 4239 label_error = EACCES; 4240 break; 4241 default: 4242 label_error = EINVAL; 4243 break; 4244 } 4245 4246 kmem_free(dkl, buffer_size); 4247 4248 #if defined(_SUNOS_VTOC_8) 4249 label = (char *)un->un_asciilabel; 4250 #elif defined(_SUNOS_VTOC_16) 4251 label = (char *)un->un_vtoc.v_asciilabel; 4252 #else 4253 #error "No VTOC format defined." 4254 #endif 4255 } 4256 4257 /* 4258 * If a valid label was not found, AND if no reservation conflict 4259 * was detected, then go ahead and create a default label (4069506). 4260 * 4261 * Note: currently, for VTOC_8 devices, the default label is created 4262 * for removables only. For VTOC_16 devices, the default label will 4263 * be created for both removables and non-removables alike. 4264 * (see sd_build_default_label) 4265 */ 4266 #if defined(_SUNOS_VTOC_8) 4267 if (ISREMOVABLE(un) && (label_error != EACCES)) { 4268 #elif defined(_SUNOS_VTOC_16) 4269 if (label_error != EACCES) { 4270 #endif 4271 if (un->un_f_geometry_is_valid == FALSE) { 4272 sd_build_default_label(un); 4273 } 4274 label_error = 0; 4275 } 4276 4277 no_solaris_partition: 4278 if ((!ISREMOVABLE(un) || 4279 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 4280 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 4281 /* 4282 * Print out a message indicating who and what we are. 4283 * We do this only when we happen to really validate the 4284 * geometry. We may call sd_validate_geometry() at other 4285 * times, e.g., ioctl()'s like Get VTOC in which case we 4286 * don't want to print the label. 4287 * If the geometry is valid, print the label string, 4288 * else print vendor and product info, if available 4289 */ 4290 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 4291 SD_INFO(SD_LOG_ATTACH_DETACH, un, "?<%s>\n", label); 4292 } else { 4293 mutex_enter(&sd_label_mutex); 4294 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 4295 labelstring); 4296 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 4297 &labelstring[64]); 4298 (void) sprintf(buf, "?Vendor '%s', product '%s'", 4299 labelstring, &labelstring[64]); 4300 if (un->un_f_blockcount_is_valid == TRUE) { 4301 (void) sprintf(&buf[strlen(buf)], 4302 ", %llu %u byte blocks\n", 4303 (longlong_t)un->un_blockcount, 4304 un->un_tgt_blocksize); 4305 } else { 4306 (void) sprintf(&buf[strlen(buf)], 4307 ", (unknown capacity)\n"); 4308 } 4309 SD_INFO(SD_LOG_ATTACH_DETACH, un, buf); 4310 mutex_exit(&sd_label_mutex); 4311 } 4312 } 4313 4314 #if defined(_SUNOS_VTOC_16) 4315 /* 4316 * If we have valid geometry, set up the remaining fdisk partitions. 4317 * Note that dkl_cylno is not used for the fdisk map entries, so 4318 * we set it to an entirely bogus value. 4319 */ 4320 for (count = 0; count < FD_NUMPART; count++) { 4321 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 4322 un->un_map[FDISK_P1 + count].dkl_nblk = 4323 un->un_fmap[count].fmap_nblk; 4324 4325 un->un_offset[FDISK_P1 + count] = 4326 un->un_fmap[count].fmap_start; 4327 } 4328 #endif 4329 4330 for (count = 0; count < NDKMAP; count++) { 4331 #if defined(_SUNOS_VTOC_8) 4332 struct dk_map *lp = &un->un_map[count]; 4333 un->un_offset[count] = 4334 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 4335 #elif defined(_SUNOS_VTOC_16) 4336 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 4337 4338 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 4339 #else 4340 #error "No VTOC format defined." 4341 #endif 4342 } 4343 4344 return (label_error); 4345 } 4346 4347 4348 #if defined(_SUNOS_VTOC_16) 4349 /* 4350 * Macro: MAX_BLKS 4351 * 4352 * This macro is used for table entries where we need to have the largest 4353 * possible sector value for that head & SPT (sectors per track) 4354 * combination. Other entries for some smaller disk sizes are set by 4355 * convention to match those used by X86 BIOS usage. 4356 */ 4357 #define MAX_BLKS(heads, spt) UINT16_MAX * heads * spt, heads, spt 4358 4359 /* 4360 * Function: sd_convert_geometry 4361 * 4362 * Description: Convert physical geometry into a dk_geom structure. In 4363 * other words, make sure we don't wrap 16-bit values. 4364 * e.g. converting from geom_cache to dk_geom 4365 * 4366 * Context: Kernel thread only 4367 */ 4368 static void 4369 sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g) 4370 { 4371 int i; 4372 static const struct chs_values { 4373 uint_t max_cap; /* Max Capacity for this HS. */ 4374 uint_t nhead; /* Heads to use. */ 4375 uint_t nsect; /* SPT to use. */ 4376 } CHS_values[] = { 4377 {0x00200000, 64, 32}, /* 1GB or smaller disk. */ 4378 {0x01000000, 128, 32}, /* 8GB or smaller disk. */ 4379 {MAX_BLKS(255, 63)}, /* 502.02GB or smaller disk. */ 4380 {MAX_BLKS(255, 126)}, /* .98TB or smaller disk. */ 4381 {DK_MAX_BLOCKS, 255, 189} /* Max size is just under 1TB */ 4382 }; 4383 4384 /* Unlabeled SCSI floppy device */ 4385 if (capacity <= 0x1000) { 4386 un_g->dkg_nhead = 2; 4387 un_g->dkg_ncyl = 80; 4388 un_g->dkg_nsect = capacity / (un_g->dkg_nhead * un_g->dkg_ncyl); 4389 return; 4390 } 4391 4392 /* 4393 * For all devices we calculate cylinders using the 4394 * heads and sectors we assign based on capacity of the 4395 * device. The table is designed to be compatible with the 4396 * way other operating systems lay out fdisk tables for X86 4397 * and to insure that the cylinders never exceed 65535 to 4398 * prevent problems with X86 ioctls that report geometry. 4399 * We use SPT that are multiples of 63, since other OSes that 4400 * are not limited to 16-bits for cylinders stop at 63 SPT 4401 * we make do by using multiples of 63 SPT. 4402 * 4403 * Note than capacities greater than or equal to 1TB will simply 4404 * get the largest geometry from the table. This should be okay 4405 * since disks this large shouldn't be using CHS values anyway. 4406 */ 4407 for (i = 0; CHS_values[i].max_cap < capacity && 4408 CHS_values[i].max_cap != DK_MAX_BLOCKS; i++) 4409 ; 4410 4411 un_g->dkg_nhead = CHS_values[i].nhead; 4412 un_g->dkg_nsect = CHS_values[i].nsect; 4413 } 4414 #endif 4415 4416 4417 /* 4418 * Function: sd_resync_geom_caches 4419 * 4420 * Description: (Re)initialize both geometry caches: the virtual geometry 4421 * information is extracted from the HBA (the "geometry" 4422 * capability), and the physical geometry cache data is 4423 * generated by issuing MODE SENSE commands. 4424 * 4425 * Arguments: un - driver soft state (unit) structure 4426 * capacity - disk capacity in #blocks 4427 * lbasize - disk block size in bytes 4428 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4429 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4430 * to use the USCSI "direct" chain and bypass the normal 4431 * command waitq. 4432 * 4433 * Context: Kernel thread only (can sleep). 4434 */ 4435 4436 static void 4437 sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 4438 int path_flag) 4439 { 4440 struct geom_cache pgeom; 4441 struct geom_cache *pgeom_p = &pgeom; 4442 int spc; 4443 unsigned short nhead; 4444 unsigned short nsect; 4445 4446 ASSERT(un != NULL); 4447 ASSERT(mutex_owned(SD_MUTEX(un))); 4448 4449 /* 4450 * Ask the controller for its logical geometry. 4451 * Note: if the HBA does not support scsi_ifgetcap("geometry"), 4452 * then the lgeom cache will be invalid. 4453 */ 4454 sd_get_virtual_geometry(un, capacity, lbasize); 4455 4456 /* 4457 * Initialize the pgeom cache from lgeom, so that if MODE SENSE 4458 * doesn't work, DKIOCG_PHYSGEOM can return reasonable values. 4459 */ 4460 if (un->un_lgeom.g_nsect == 0 || un->un_lgeom.g_nhead == 0) { 4461 /* 4462 * Note: Perhaps this needs to be more adaptive? The rationale 4463 * is that, if there's no HBA geometry from the HBA driver, any 4464 * guess is good, since this is the physical geometry. If MODE 4465 * SENSE fails this gives a max cylinder size for non-LBA access 4466 */ 4467 nhead = 255; 4468 nsect = 63; 4469 } else { 4470 nhead = un->un_lgeom.g_nhead; 4471 nsect = un->un_lgeom.g_nsect; 4472 } 4473 4474 if (ISCD(un)) { 4475 pgeom_p->g_nhead = 1; 4476 pgeom_p->g_nsect = nsect * nhead; 4477 } else { 4478 pgeom_p->g_nhead = nhead; 4479 pgeom_p->g_nsect = nsect; 4480 } 4481 4482 spc = pgeom_p->g_nhead * pgeom_p->g_nsect; 4483 pgeom_p->g_capacity = capacity; 4484 pgeom_p->g_ncyl = pgeom_p->g_capacity / spc; 4485 pgeom_p->g_acyl = 0; 4486 4487 /* 4488 * Retrieve fresh geometry data from the hardware, stash it 4489 * here temporarily before we rebuild the incore label. 4490 * 4491 * We want to use the MODE SENSE commands to derive the 4492 * physical geometry of the device, but if either command 4493 * fails, the logical geometry is used as the fallback for 4494 * disk label geometry. 4495 */ 4496 mutex_exit(SD_MUTEX(un)); 4497 sd_get_physical_geometry(un, pgeom_p, capacity, lbasize, path_flag); 4498 mutex_enter(SD_MUTEX(un)); 4499 4500 /* 4501 * Now update the real copy while holding the mutex. This 4502 * way the global copy is never in an inconsistent state. 4503 */ 4504 bcopy(pgeom_p, &un->un_pgeom, sizeof (un->un_pgeom)); 4505 4506 SD_INFO(SD_LOG_COMMON, un, "sd_resync_geom_caches: " 4507 "(cached from lgeom)\n"); 4508 SD_INFO(SD_LOG_COMMON, un, 4509 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4510 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4511 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4512 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4513 "intrlv: %d; rpm: %d\n", un->un_pgeom.g_secsize, 4514 un->un_pgeom.g_capacity, un->un_pgeom.g_intrlv, 4515 un->un_pgeom.g_rpm); 4516 } 4517 4518 4519 /* 4520 * Function: sd_read_fdisk 4521 * 4522 * Description: utility routine to read the fdisk table. 4523 * 4524 * Arguments: un - driver soft state (unit) structure 4525 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4526 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4527 * to use the USCSI "direct" chain and bypass the normal 4528 * command waitq. 4529 * 4530 * Return Code: SD_CMD_SUCCESS 4531 * SD_CMD_FAILURE 4532 * 4533 * Context: Kernel thread only (can sleep). 4534 */ 4535 /* ARGSUSED */ 4536 static int 4537 sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, int path_flag) 4538 { 4539 #if defined(_NO_FDISK_PRESENT) 4540 4541 un->un_solaris_offset = 0; 4542 un->un_solaris_size = capacity; 4543 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4544 return (SD_CMD_SUCCESS); 4545 4546 #elif defined(_FIRMWARE_NEEDS_FDISK) 4547 4548 struct ipart *fdp; 4549 struct mboot *mbp; 4550 struct ipart fdisk[FD_NUMPART]; 4551 int i; 4552 char sigbuf[2]; 4553 caddr_t bufp; 4554 int uidx; 4555 int rval; 4556 int lba = 0; 4557 uint_t solaris_offset; /* offset to solaris part. */ 4558 daddr_t solaris_size; /* size of solaris partition */ 4559 uint32_t blocksize; 4560 4561 ASSERT(un != NULL); 4562 ASSERT(mutex_owned(SD_MUTEX(un))); 4563 ASSERT(un->un_f_tgt_blocksize_is_valid == TRUE); 4564 4565 blocksize = un->un_tgt_blocksize; 4566 4567 /* 4568 * Start off assuming no fdisk table 4569 */ 4570 solaris_offset = 0; 4571 solaris_size = capacity; 4572 4573 mutex_exit(SD_MUTEX(un)); 4574 bufp = kmem_zalloc(blocksize, KM_SLEEP); 4575 rval = sd_send_scsi_READ(un, bufp, blocksize, 0, path_flag); 4576 mutex_enter(SD_MUTEX(un)); 4577 4578 if (rval != 0) { 4579 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4580 "sd_read_fdisk: fdisk read err\n"); 4581 kmem_free(bufp, blocksize); 4582 return (SD_CMD_FAILURE); 4583 } 4584 4585 mbp = (struct mboot *)bufp; 4586 4587 /* 4588 * The fdisk table does not begin on a 4-byte boundary within the 4589 * master boot record, so we copy it to an aligned structure to avoid 4590 * alignment exceptions on some processors. 4591 */ 4592 bcopy(&mbp->parts[0], fdisk, sizeof (fdisk)); 4593 4594 /* 4595 * Check for lba support before verifying sig; sig might not be 4596 * there, say on a blank disk, but the max_chs mark may still 4597 * be present. 4598 * 4599 * Note: LBA support and BEFs are an x86-only concept but this 4600 * code should work OK on SPARC as well. 4601 */ 4602 4603 /* 4604 * First, check for lba-access-ok on root node (or prom root node) 4605 * if present there, don't need to search fdisk table. 4606 */ 4607 if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0, 4608 "lba-access-ok", 0) != 0) { 4609 /* All drives do LBA; don't search fdisk table */ 4610 lba = 1; 4611 } else { 4612 /* Okay, look for mark in fdisk table */ 4613 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4614 /* accumulate "lba" value from all partitions */ 4615 lba = (lba || sd_has_max_chs_vals(fdp)); 4616 } 4617 } 4618 4619 /* 4620 * Next, look for 'no-bef-lba-access' prop on parent. 4621 * Its presence means the realmode driver doesn't support 4622 * LBA, so the target driver shouldn't advertise it as ok. 4623 * This should be a temporary condition; one day all 4624 * BEFs should support the LBA access functions. 4625 */ 4626 if ((lba != 0) && (ddi_getprop(DDI_DEV_T_ANY, 4627 ddi_get_parent(SD_DEVINFO(un)), DDI_PROP_DONTPASS, 4628 "no-bef-lba-access", 0) != 0)) { 4629 /* BEF doesn't support LBA; don't advertise it as ok */ 4630 lba = 0; 4631 } 4632 4633 if (lba != 0) { 4634 dev_t dev = sd_make_device(SD_DEVINFO(un)); 4635 4636 if (ddi_getprop(dev, SD_DEVINFO(un), DDI_PROP_DONTPASS, 4637 "lba-access-ok", 0) == 0) { 4638 /* not found; create it */ 4639 if (ddi_prop_create(dev, SD_DEVINFO(un), 0, 4640 "lba-access-ok", (caddr_t)NULL, 0) != 4641 DDI_PROP_SUCCESS) { 4642 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4643 "sd_read_fdisk: Can't create lba property " 4644 "for instance %d\n", 4645 ddi_get_instance(SD_DEVINFO(un))); 4646 } 4647 } 4648 } 4649 4650 bcopy(&mbp->signature, sigbuf, sizeof (sigbuf)); 4651 4652 /* 4653 * Endian-independent signature check 4654 */ 4655 if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) || 4656 (sigbuf[0] != (MBB_MAGIC & 0xFF))) { 4657 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4658 "sd_read_fdisk: no fdisk\n"); 4659 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4660 rval = SD_CMD_SUCCESS; 4661 goto done; 4662 } 4663 4664 #ifdef SDDEBUG 4665 if (sd_level_mask & SD_LOGMASK_INFO) { 4666 fdp = fdisk; 4667 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_read_fdisk:\n"); 4668 SD_INFO(SD_LOG_ATTACH_DETACH, un, " relsect " 4669 "numsect sysid bootid\n"); 4670 for (i = 0; i < FD_NUMPART; i++, fdp++) { 4671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4672 " %d: %8d %8d 0x%08x 0x%08x\n", 4673 i, fdp->relsect, fdp->numsect, 4674 fdp->systid, fdp->bootid); 4675 } 4676 } 4677 #endif 4678 4679 /* 4680 * Try to find the unix partition 4681 */ 4682 uidx = -1; 4683 solaris_offset = 0; 4684 solaris_size = 0; 4685 4686 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4687 int relsect; 4688 int numsect; 4689 4690 if (fdp->numsect == 0) { 4691 un->un_fmap[i].fmap_start = 0; 4692 un->un_fmap[i].fmap_nblk = 0; 4693 continue; 4694 } 4695 4696 /* 4697 * Data in the fdisk table is little-endian. 4698 */ 4699 relsect = LE_32(fdp->relsect); 4700 numsect = LE_32(fdp->numsect); 4701 4702 un->un_fmap[i].fmap_start = relsect; 4703 un->un_fmap[i].fmap_nblk = numsect; 4704 4705 if (fdp->systid != SUNIXOS && 4706 fdp->systid != SUNIXOS2 && 4707 fdp->systid != EFI_PMBR) { 4708 continue; 4709 } 4710 4711 /* 4712 * use the last active solaris partition id found 4713 * (there should only be 1 active partition id) 4714 * 4715 * if there are no active solaris partition id 4716 * then use the first inactive solaris partition id 4717 */ 4718 if ((uidx == -1) || (fdp->bootid == ACTIVE)) { 4719 uidx = i; 4720 solaris_offset = relsect; 4721 solaris_size = numsect; 4722 } 4723 } 4724 4725 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk 0x%x 0x%lx", 4726 un->un_solaris_offset, un->un_solaris_size); 4727 4728 rval = SD_CMD_SUCCESS; 4729 4730 done: 4731 4732 /* 4733 * Clear the VTOC info, only if the Solaris partition entry 4734 * has moved, changed size, been deleted, or if the size of 4735 * the partition is too small to even fit the label sector. 4736 */ 4737 if ((un->un_solaris_offset != solaris_offset) || 4738 (un->un_solaris_size != solaris_size) || 4739 solaris_size <= DK_LABEL_LOC) { 4740 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk moved 0x%x 0x%lx", 4741 solaris_offset, solaris_size); 4742 bzero(&un->un_g, sizeof (struct dk_geom)); 4743 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 4744 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 4745 un->un_f_geometry_is_valid = FALSE; 4746 } 4747 un->un_solaris_offset = solaris_offset; 4748 un->un_solaris_size = solaris_size; 4749 kmem_free(bufp, blocksize); 4750 return (rval); 4751 4752 #else /* #elif defined(_FIRMWARE_NEEDS_FDISK) */ 4753 #error "fdisk table presence undetermined for this platform." 4754 #endif /* #if defined(_NO_FDISK_PRESENT) */ 4755 } 4756 4757 4758 /* 4759 * Function: sd_get_physical_geometry 4760 * 4761 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4762 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4763 * target, and use this information to initialize the physical 4764 * geometry cache specified by pgeom_p. 4765 * 4766 * MODE SENSE is an optional command, so failure in this case 4767 * does not necessarily denote an error. We want to use the 4768 * MODE SENSE commands to derive the physical geometry of the 4769 * device, but if either command fails, the logical geometry is 4770 * used as the fallback for disk label geometry. 4771 * 4772 * This requires that un->un_blockcount and un->un_tgt_blocksize 4773 * have already been initialized for the current target and 4774 * that the current values be passed as args so that we don't 4775 * end up ever trying to use -1 as a valid value. This could 4776 * happen if either value is reset while we're not holding 4777 * the mutex. 4778 * 4779 * Arguments: un - driver soft state (unit) structure 4780 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4781 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4782 * to use the USCSI "direct" chain and bypass the normal 4783 * command waitq. 4784 * 4785 * Context: Kernel thread only (can sleep). 4786 */ 4787 4788 static void 4789 sd_get_physical_geometry(struct sd_lun *un, struct geom_cache *pgeom_p, 4790 int capacity, int lbasize, int path_flag) 4791 { 4792 struct mode_format *page3p; 4793 struct mode_geometry *page4p; 4794 struct mode_header *headerp; 4795 int sector_size; 4796 int nsect; 4797 int nhead; 4798 int ncyl; 4799 int intrlv; 4800 int spc; 4801 int modesense_capacity; 4802 int rpm; 4803 int bd_len; 4804 int mode_header_length; 4805 uchar_t *p3bufp; 4806 uchar_t *p4bufp; 4807 int cdbsize; 4808 4809 ASSERT(un != NULL); 4810 ASSERT(!(mutex_owned(SD_MUTEX(un)))); 4811 4812 if (un->un_f_blockcount_is_valid != TRUE) { 4813 return; 4814 } 4815 4816 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 4817 return; 4818 } 4819 4820 if (lbasize == 0) { 4821 if (ISCD(un)) { 4822 lbasize = 2048; 4823 } else { 4824 lbasize = un->un_sys_blocksize; 4825 } 4826 } 4827 pgeom_p->g_secsize = (unsigned short)lbasize; 4828 4829 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4830 4831 /* 4832 * Retrieve MODE SENSE page 3 - Format Device Page 4833 */ 4834 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4835 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4836 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4837 != 0) { 4838 SD_ERROR(SD_LOG_COMMON, un, 4839 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4840 goto page3_exit; 4841 } 4842 4843 /* 4844 * Determine size of Block Descriptors in order to locate the mode 4845 * page data. ATAPI devices return 0, SCSI devices should return 4846 * MODE_BLK_DESC_LENGTH. 4847 */ 4848 headerp = (struct mode_header *)p3bufp; 4849 if (un->un_f_cfg_is_atapi == TRUE) { 4850 struct mode_header_grp2 *mhp = 4851 (struct mode_header_grp2 *)headerp; 4852 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4853 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4854 } else { 4855 mode_header_length = MODE_HEADER_LENGTH; 4856 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4857 } 4858 4859 if (bd_len > MODE_BLK_DESC_LENGTH) { 4860 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4861 "received unexpected bd_len of %d, page3\n", bd_len); 4862 goto page3_exit; 4863 } 4864 4865 page3p = (struct mode_format *) 4866 ((caddr_t)headerp + mode_header_length + bd_len); 4867 4868 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4869 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4870 "mode sense pg3 code mismatch %d\n", 4871 page3p->mode_page.code); 4872 goto page3_exit; 4873 } 4874 4875 /* 4876 * Use this physical geometry data only if BOTH MODE SENSE commands 4877 * complete successfully; otherwise, revert to the logical geometry. 4878 * So, we need to save everything in temporary variables. 4879 */ 4880 sector_size = BE_16(page3p->data_bytes_sect); 4881 4882 /* 4883 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4884 */ 4885 if (sector_size == 0) { 4886 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4887 } else { 4888 sector_size &= ~(un->un_sys_blocksize - 1); 4889 } 4890 4891 nsect = BE_16(page3p->sect_track); 4892 intrlv = BE_16(page3p->interleave); 4893 4894 SD_INFO(SD_LOG_COMMON, un, 4895 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4896 SD_INFO(SD_LOG_COMMON, un, 4897 " mode page: %d; nsect: %d; sector size: %d;\n", 4898 page3p->mode_page.code, nsect, sector_size); 4899 SD_INFO(SD_LOG_COMMON, un, 4900 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4901 BE_16(page3p->track_skew), 4902 BE_16(page3p->cylinder_skew)); 4903 4904 4905 /* 4906 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4907 */ 4908 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4909 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4910 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4911 != 0) { 4912 SD_ERROR(SD_LOG_COMMON, un, 4913 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4914 goto page4_exit; 4915 } 4916 4917 /* 4918 * Determine size of Block Descriptors in order to locate the mode 4919 * page data. ATAPI devices return 0, SCSI devices should return 4920 * MODE_BLK_DESC_LENGTH. 4921 */ 4922 headerp = (struct mode_header *)p4bufp; 4923 if (un->un_f_cfg_is_atapi == TRUE) { 4924 struct mode_header_grp2 *mhp = 4925 (struct mode_header_grp2 *)headerp; 4926 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4927 } else { 4928 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4929 } 4930 4931 if (bd_len > MODE_BLK_DESC_LENGTH) { 4932 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4933 "received unexpected bd_len of %d, page4\n", bd_len); 4934 goto page4_exit; 4935 } 4936 4937 page4p = (struct mode_geometry *) 4938 ((caddr_t)headerp + mode_header_length + bd_len); 4939 4940 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4941 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4942 "mode sense pg4 code mismatch %d\n", 4943 page4p->mode_page.code); 4944 goto page4_exit; 4945 } 4946 4947 /* 4948 * Stash the data now, after we know that both commands completed. 4949 */ 4950 4951 mutex_enter(SD_MUTEX(un)); 4952 4953 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4954 spc = nhead * nsect; 4955 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4956 rpm = BE_16(page4p->rpm); 4957 4958 modesense_capacity = spc * ncyl; 4959 4960 SD_INFO(SD_LOG_COMMON, un, 4961 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4962 SD_INFO(SD_LOG_COMMON, un, 4963 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4964 SD_INFO(SD_LOG_COMMON, un, 4965 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4966 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4967 (void *)pgeom_p, capacity); 4968 4969 /* 4970 * Compensate if the drive's geometry is not rectangular, i.e., 4971 * the product of C * H * S returned by MODE SENSE >= that returned 4972 * by read capacity. This is an idiosyncrasy of the original x86 4973 * disk subsystem. 4974 */ 4975 if (modesense_capacity >= capacity) { 4976 SD_INFO(SD_LOG_COMMON, un, 4977 "sd_get_physical_geometry: adjusting acyl; " 4978 "old: %d; new: %d\n", pgeom_p->g_acyl, 4979 (modesense_capacity - capacity + spc - 1) / spc); 4980 if (sector_size != 0) { 4981 /* 1243403: NEC D38x7 drives don't support sec size */ 4982 pgeom_p->g_secsize = (unsigned short)sector_size; 4983 } 4984 pgeom_p->g_nsect = (unsigned short)nsect; 4985 pgeom_p->g_nhead = (unsigned short)nhead; 4986 pgeom_p->g_capacity = capacity; 4987 pgeom_p->g_acyl = 4988 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4989 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4990 } 4991 4992 pgeom_p->g_rpm = (unsigned short)rpm; 4993 pgeom_p->g_intrlv = (unsigned short)intrlv; 4994 4995 SD_INFO(SD_LOG_COMMON, un, 4996 "sd_get_physical_geometry: mode sense geometry:\n"); 4997 SD_INFO(SD_LOG_COMMON, un, 4998 " nsect: %d; sector size: %d; interlv: %d\n", 4999 nsect, sector_size, intrlv); 5000 SD_INFO(SD_LOG_COMMON, un, 5001 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5002 nhead, ncyl, rpm, modesense_capacity); 5003 SD_INFO(SD_LOG_COMMON, un, 5004 "sd_get_physical_geometry: (cached)\n"); 5005 SD_INFO(SD_LOG_COMMON, un, 5006 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5007 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 5008 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 5009 SD_INFO(SD_LOG_COMMON, un, 5010 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5011 un->un_pgeom.g_secsize, un->un_pgeom.g_capacity, 5012 un->un_pgeom.g_intrlv, un->un_pgeom.g_rpm); 5013 5014 mutex_exit(SD_MUTEX(un)); 5015 5016 page4_exit: 5017 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5018 page3_exit: 5019 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5020 } 5021 5022 5023 /* 5024 * Function: sd_get_virtual_geometry 5025 * 5026 * Description: Ask the controller to tell us about the target device. 5027 * 5028 * Arguments: un - pointer to softstate 5029 * capacity - disk capacity in #blocks 5030 * lbasize - disk block size in bytes 5031 * 5032 * Context: Kernel thread only 5033 */ 5034 5035 static void 5036 sd_get_virtual_geometry(struct sd_lun *un, int capacity, int lbasize) 5037 { 5038 struct geom_cache *lgeom_p = &un->un_lgeom; 5039 uint_t geombuf; 5040 int spc; 5041 5042 ASSERT(un != NULL); 5043 ASSERT(mutex_owned(SD_MUTEX(un))); 5044 5045 mutex_exit(SD_MUTEX(un)); 5046 5047 /* Set sector size, and total number of sectors */ 5048 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5049 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5050 5051 /* Let the HBA tell us its geometry */ 5052 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5053 5054 mutex_enter(SD_MUTEX(un)); 5055 5056 /* A value of -1 indicates an undefined "geometry" property */ 5057 if (geombuf == (-1)) { 5058 return; 5059 } 5060 5061 /* Initialize the logical geometry cache. */ 5062 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5063 lgeom_p->g_nsect = geombuf & 0xffff; 5064 lgeom_p->g_secsize = un->un_sys_blocksize; 5065 5066 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5067 5068 /* 5069 * Note: The driver originally converted the capacity value from 5070 * target blocks to system blocks. However, the capacity value passed 5071 * to this routine is already in terms of system blocks (this scaling 5072 * is done when the READ CAPACITY command is issued and processed). 5073 * This 'error' may have gone undetected because the usage of g_ncyl 5074 * (which is based upon g_capacity) is very limited within the driver 5075 */ 5076 lgeom_p->g_capacity = capacity; 5077 5078 /* 5079 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5080 * hba may return zero values if the device has been removed. 5081 */ 5082 if (spc == 0) { 5083 lgeom_p->g_ncyl = 0; 5084 } else { 5085 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5086 } 5087 lgeom_p->g_acyl = 0; 5088 5089 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5090 SD_INFO(SD_LOG_COMMON, un, 5091 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5092 un->un_lgeom.g_ncyl, un->un_lgeom.g_acyl, 5093 un->un_lgeom.g_nhead, un->un_lgeom.g_nsect); 5094 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 5095 "intrlv: %d; rpm: %d\n", un->un_lgeom.g_secsize, 5096 un->un_lgeom.g_capacity, un->un_lgeom.g_intrlv, un->un_lgeom.g_rpm); 5097 } 5098 5099 5100 /* 5101 * Function: sd_update_block_info 5102 * 5103 * Description: Calculate a byte count to sector count bitshift value 5104 * from sector size. 5105 * 5106 * Arguments: un: unit struct. 5107 * lbasize: new target sector size 5108 * capacity: new target capacity, ie. block count 5109 * 5110 * Context: Kernel thread context 5111 */ 5112 5113 static void 5114 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5115 { 5116 if (lbasize != 0) { 5117 un->un_tgt_blocksize = lbasize; 5118 un->un_f_tgt_blocksize_is_valid = TRUE; 5119 } 5120 5121 if (capacity != 0) { 5122 un->un_blockcount = capacity; 5123 un->un_f_blockcount_is_valid = TRUE; 5124 } 5125 } 5126 5127 5128 static void 5129 sd_swap_efi_gpt(efi_gpt_t *e) 5130 { 5131 _NOTE(ASSUMING_PROTECTED(*e)) 5132 e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature); 5133 e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision); 5134 e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize); 5135 e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32); 5136 e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA); 5137 e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA); 5138 e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA); 5139 e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA); 5140 UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID); 5141 e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA); 5142 e->efi_gpt_NumberOfPartitionEntries = 5143 LE_32(e->efi_gpt_NumberOfPartitionEntries); 5144 e->efi_gpt_SizeOfPartitionEntry = 5145 LE_32(e->efi_gpt_SizeOfPartitionEntry); 5146 e->efi_gpt_PartitionEntryArrayCRC32 = 5147 LE_32(e->efi_gpt_PartitionEntryArrayCRC32); 5148 } 5149 5150 static void 5151 sd_swap_efi_gpe(int nparts, efi_gpe_t *p) 5152 { 5153 int i; 5154 5155 _NOTE(ASSUMING_PROTECTED(*p)) 5156 for (i = 0; i < nparts; i++) { 5157 UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID, 5158 p[i].efi_gpe_PartitionTypeGUID); 5159 p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA); 5160 p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA); 5161 /* PartitionAttrs */ 5162 } 5163 } 5164 5165 static int 5166 sd_validate_efi(efi_gpt_t *labp) 5167 { 5168 if (labp->efi_gpt_Signature != EFI_SIGNATURE) 5169 return (EINVAL); 5170 /* at least 96 bytes in this version of the spec. */ 5171 if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) > 5172 labp->efi_gpt_HeaderSize) 5173 return (EINVAL); 5174 /* this should be 128 bytes */ 5175 if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t)) 5176 return (EINVAL); 5177 return (0); 5178 } 5179 5180 static int 5181 sd_use_efi(struct sd_lun *un, int path_flag) 5182 { 5183 int i; 5184 int rval = 0; 5185 efi_gpe_t *partitions; 5186 uchar_t *buf; 5187 uint_t lbasize; 5188 uint64_t cap; 5189 uint_t nparts; 5190 diskaddr_t gpe_lba; 5191 5192 ASSERT(mutex_owned(SD_MUTEX(un))); 5193 lbasize = un->un_tgt_blocksize; 5194 5195 mutex_exit(SD_MUTEX(un)); 5196 5197 buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 5198 5199 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 5200 rval = EINVAL; 5201 goto done_err; 5202 } 5203 5204 rval = sd_send_scsi_READ(un, buf, lbasize, 0, path_flag); 5205 if (rval) { 5206 goto done_err; 5207 } 5208 if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) { 5209 /* not ours */ 5210 rval = ESRCH; 5211 goto done_err; 5212 } 5213 5214 rval = sd_send_scsi_READ(un, buf, lbasize, 1, path_flag); 5215 if (rval) { 5216 goto done_err; 5217 } 5218 sd_swap_efi_gpt((efi_gpt_t *)buf); 5219 5220 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) { 5221 /* 5222 * Couldn't read the primary, try the backup. Our 5223 * capacity at this point could be based on CHS, so 5224 * check what the device reports. 5225 */ 5226 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 5227 path_flag); 5228 if (rval) { 5229 goto done_err; 5230 } 5231 if ((rval = sd_send_scsi_READ(un, buf, lbasize, 5232 cap - 1, path_flag)) != 0) { 5233 goto done_err; 5234 } 5235 sd_swap_efi_gpt((efi_gpt_t *)buf); 5236 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) 5237 goto done_err; 5238 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5239 "primary label corrupt; using backup\n"); 5240 } 5241 5242 nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries; 5243 gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA; 5244 5245 rval = sd_send_scsi_READ(un, buf, EFI_MIN_ARRAY_SIZE, gpe_lba, 5246 path_flag); 5247 if (rval) { 5248 goto done_err; 5249 } 5250 partitions = (efi_gpe_t *)buf; 5251 5252 if (nparts > MAXPART) { 5253 nparts = MAXPART; 5254 } 5255 sd_swap_efi_gpe(nparts, partitions); 5256 5257 mutex_enter(SD_MUTEX(un)); 5258 5259 /* Fill in partition table. */ 5260 for (i = 0; i < nparts; i++) { 5261 if (partitions->efi_gpe_StartingLBA != 0 || 5262 partitions->efi_gpe_EndingLBA != 0) { 5263 un->un_map[i].dkl_cylno = 5264 partitions->efi_gpe_StartingLBA; 5265 un->un_map[i].dkl_nblk = 5266 partitions->efi_gpe_EndingLBA - 5267 partitions->efi_gpe_StartingLBA + 1; 5268 un->un_offset[i] = 5269 partitions->efi_gpe_StartingLBA; 5270 } 5271 if (i == WD_NODE) { 5272 /* 5273 * minor number 7 corresponds to the whole disk 5274 */ 5275 un->un_map[i].dkl_cylno = 0; 5276 un->un_map[i].dkl_nblk = un->un_blockcount; 5277 un->un_offset[i] = 0; 5278 } 5279 partitions++; 5280 } 5281 un->un_solaris_offset = 0; 5282 un->un_solaris_size = cap; 5283 un->un_f_geometry_is_valid = TRUE; 5284 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5285 return (0); 5286 5287 done_err: 5288 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5289 mutex_enter(SD_MUTEX(un)); 5290 /* 5291 * if we didn't find something that could look like a VTOC 5292 * and the disk is over 1TB, we know there isn't a valid label. 5293 * Otherwise let sd_uselabel decide what to do. We only 5294 * want to invalidate this if we're certain the label isn't 5295 * valid because sd_prop_op will now fail, which in turn 5296 * causes things like opens and stats on the partition to fail. 5297 */ 5298 if ((un->un_blockcount > DK_MAX_BLOCKS) && (rval != ESRCH)) { 5299 un->un_f_geometry_is_valid = FALSE; 5300 } 5301 return (rval); 5302 } 5303 5304 5305 /* 5306 * Function: sd_uselabel 5307 * 5308 * Description: Validate the disk label and update the relevant data (geometry, 5309 * partition, vtoc, and capacity data) in the sd_lun struct. 5310 * Marks the geometry of the unit as being valid. 5311 * 5312 * Arguments: un: unit struct. 5313 * dk_label: disk label 5314 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 5315 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 5316 * to use the USCSI "direct" chain and bypass the normal 5317 * command waitq. 5318 * 5319 * Return Code: SD_LABEL_IS_VALID: Label read from disk is OK; geometry, 5320 * partition, vtoc, and capacity data are good. 5321 * 5322 * SD_LABEL_IS_INVALID: Magic number or checksum error in the 5323 * label; or computed capacity does not jibe with capacity 5324 * reported from the READ CAPACITY command. 5325 * 5326 * Context: Kernel thread only (can sleep). 5327 */ 5328 5329 static int 5330 sd_uselabel(struct sd_lun *un, struct dk_label *labp, int path_flag) 5331 { 5332 short *sp; 5333 short sum; 5334 short count; 5335 int label_error = SD_LABEL_IS_VALID; 5336 int i; 5337 int capacity; 5338 int part_end; 5339 int track_capacity; 5340 int err; 5341 #if defined(_SUNOS_VTOC_16) 5342 struct dkl_partition *vpartp; 5343 #endif 5344 ASSERT(un != NULL); 5345 ASSERT(mutex_owned(SD_MUTEX(un))); 5346 5347 /* Validate the magic number of the label. */ 5348 if (labp->dkl_magic != DKL_MAGIC) { 5349 #if defined(__sparc) 5350 if ((un->un_state == SD_STATE_NORMAL) && 5351 !ISREMOVABLE(un)) { 5352 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5353 "Corrupt label; wrong magic number\n"); 5354 } 5355 #endif 5356 return (SD_LABEL_IS_INVALID); 5357 } 5358 5359 /* Validate the checksum of the label. */ 5360 sp = (short *)labp; 5361 sum = 0; 5362 count = sizeof (struct dk_label) / sizeof (short); 5363 while (count--) { 5364 sum ^= *sp++; 5365 } 5366 5367 if (sum != 0) { 5368 #if defined(_SUNOS_VTOC_16) 5369 if (un->un_state == SD_STATE_NORMAL && !ISCD(un)) { 5370 #elif defined(_SUNOS_VTOC_8) 5371 if (un->un_state == SD_STATE_NORMAL && !ISREMOVABLE(un)) { 5372 #endif 5373 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5374 "Corrupt label - label checksum failed\n"); 5375 } 5376 return (SD_LABEL_IS_INVALID); 5377 } 5378 5379 5380 /* 5381 * Fill in geometry structure with data from label. 5382 */ 5383 bzero(&un->un_g, sizeof (struct dk_geom)); 5384 un->un_g.dkg_ncyl = labp->dkl_ncyl; 5385 un->un_g.dkg_acyl = labp->dkl_acyl; 5386 un->un_g.dkg_bcyl = 0; 5387 un->un_g.dkg_nhead = labp->dkl_nhead; 5388 un->un_g.dkg_nsect = labp->dkl_nsect; 5389 un->un_g.dkg_intrlv = labp->dkl_intrlv; 5390 5391 #if defined(_SUNOS_VTOC_8) 5392 un->un_g.dkg_gap1 = labp->dkl_gap1; 5393 un->un_g.dkg_gap2 = labp->dkl_gap2; 5394 un->un_g.dkg_bhead = labp->dkl_bhead; 5395 #endif 5396 #if defined(_SUNOS_VTOC_16) 5397 un->un_dkg_skew = labp->dkl_skew; 5398 #endif 5399 5400 #if defined(__i386) || defined(__amd64) 5401 un->un_g.dkg_apc = labp->dkl_apc; 5402 #endif 5403 5404 /* 5405 * Currently we rely on the values in the label being accurate. If 5406 * dlk_rpm or dlk_pcly are zero in the label, use a default value. 5407 * 5408 * Note: In the future a MODE SENSE may be used to retrieve this data, 5409 * although this command is optional in SCSI-2. 5410 */ 5411 un->un_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600; 5412 un->un_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl : 5413 (un->un_g.dkg_ncyl + un->un_g.dkg_acyl); 5414 5415 /* 5416 * The Read and Write reinstruct values may not be valid 5417 * for older disks. 5418 */ 5419 un->un_g.dkg_read_reinstruct = labp->dkl_read_reinstruct; 5420 un->un_g.dkg_write_reinstruct = labp->dkl_write_reinstruct; 5421 5422 /* Fill in partition table. */ 5423 #if defined(_SUNOS_VTOC_8) 5424 for (i = 0; i < NDKMAP; i++) { 5425 un->un_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno; 5426 un->un_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk; 5427 } 5428 #endif 5429 #if defined(_SUNOS_VTOC_16) 5430 vpartp = labp->dkl_vtoc.v_part; 5431 track_capacity = labp->dkl_nhead * labp->dkl_nsect; 5432 5433 for (i = 0; i < NDKMAP; i++, vpartp++) { 5434 un->un_map[i].dkl_cylno = vpartp->p_start / track_capacity; 5435 un->un_map[i].dkl_nblk = vpartp->p_size; 5436 } 5437 #endif 5438 5439 /* Fill in VTOC Structure. */ 5440 bcopy(&labp->dkl_vtoc, &un->un_vtoc, sizeof (struct dk_vtoc)); 5441 #if defined(_SUNOS_VTOC_8) 5442 /* 5443 * The 8-slice vtoc does not include the ascii label; save it into 5444 * the device's soft state structure here. 5445 */ 5446 bcopy(labp->dkl_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 5447 #endif 5448 5449 /* Mark the geometry as valid. */ 5450 un->un_f_geometry_is_valid = TRUE; 5451 5452 /* Now look for a valid capacity. */ 5453 track_capacity = (un->un_g.dkg_nhead * un->un_g.dkg_nsect); 5454 capacity = (un->un_g.dkg_ncyl * track_capacity); 5455 5456 if (un->un_g.dkg_acyl) { 5457 #if defined(__i386) || defined(__amd64) 5458 /* we may have > 1 alts cylinder */ 5459 capacity += (track_capacity * un->un_g.dkg_acyl); 5460 #else 5461 capacity += track_capacity; 5462 #endif 5463 } 5464 5465 /* 5466 * At this point, un->un_blockcount should contain valid data from 5467 * the READ CAPACITY command. 5468 */ 5469 if (un->un_f_blockcount_is_valid != TRUE) { 5470 /* 5471 * We have a situation where the target didn't give us a good 5472 * READ CAPACITY value, yet there appears to be a valid label. 5473 * In this case, we'll fake the capacity. 5474 */ 5475 un->un_blockcount = capacity; 5476 un->un_f_blockcount_is_valid = TRUE; 5477 goto done; 5478 } 5479 5480 5481 if ((capacity <= un->un_blockcount) || 5482 (un->un_state != SD_STATE_NORMAL)) { 5483 #if defined(_SUNOS_VTOC_8) 5484 /* 5485 * We can't let this happen on drives that are subdivided 5486 * into logical disks (i.e., that have an fdisk table). 5487 * The un_blockcount field should always hold the full media 5488 * size in sectors, period. This code would overwrite 5489 * un_blockcount with the size of the Solaris fdisk partition. 5490 */ 5491 SD_ERROR(SD_LOG_COMMON, un, 5492 "sd_uselabel: Label %d blocks; Drive %d blocks\n", 5493 capacity, un->un_blockcount); 5494 un->un_blockcount = capacity; 5495 un->un_f_blockcount_is_valid = TRUE; 5496 #endif /* defined(_SUNOS_VTOC_8) */ 5497 goto done; 5498 } 5499 5500 if (ISCD(un)) { 5501 /* For CDROMs, we trust that the data in the label is OK. */ 5502 #if defined(_SUNOS_VTOC_8) 5503 for (i = 0; i < NDKMAP; i++) { 5504 part_end = labp->dkl_nhead * labp->dkl_nsect * 5505 labp->dkl_map[i].dkl_cylno + 5506 labp->dkl_map[i].dkl_nblk - 1; 5507 5508 if ((labp->dkl_map[i].dkl_nblk) && 5509 (part_end > un->un_blockcount)) { 5510 un->un_f_geometry_is_valid = FALSE; 5511 break; 5512 } 5513 } 5514 #endif 5515 #if defined(_SUNOS_VTOC_16) 5516 vpartp = &(labp->dkl_vtoc.v_part[0]); 5517 for (i = 0; i < NDKMAP; i++, vpartp++) { 5518 part_end = vpartp->p_start + vpartp->p_size; 5519 if ((vpartp->p_size > 0) && 5520 (part_end > un->un_blockcount)) { 5521 un->un_f_geometry_is_valid = FALSE; 5522 break; 5523 } 5524 } 5525 #endif 5526 } else { 5527 uint64_t t_capacity; 5528 uint32_t t_lbasize; 5529 5530 mutex_exit(SD_MUTEX(un)); 5531 err = sd_send_scsi_READ_CAPACITY(un, &t_capacity, &t_lbasize, 5532 path_flag); 5533 ASSERT(t_capacity <= DK_MAX_BLOCKS); 5534 mutex_enter(SD_MUTEX(un)); 5535 5536 if (err == 0) { 5537 sd_update_block_info(un, t_lbasize, t_capacity); 5538 } 5539 5540 if (capacity > un->un_blockcount) { 5541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5542 "Corrupt label - bad geometry\n"); 5543 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 5544 "Label says %u blocks; Drive says %llu blocks\n", 5545 capacity, (unsigned long long)un->un_blockcount); 5546 un->un_f_geometry_is_valid = FALSE; 5547 label_error = SD_LABEL_IS_INVALID; 5548 } 5549 } 5550 5551 done: 5552 5553 SD_INFO(SD_LOG_COMMON, un, "sd_uselabel: (label geometry)\n"); 5554 SD_INFO(SD_LOG_COMMON, un, 5555 " ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n", 5556 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5557 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5558 SD_INFO(SD_LOG_COMMON, un, 5559 " lbasize: %d; capacity: %d; intrlv: %d; rpm: %d\n", 5560 un->un_tgt_blocksize, un->un_blockcount, 5561 un->un_g.dkg_intrlv, un->un_g.dkg_rpm); 5562 SD_INFO(SD_LOG_COMMON, un, " wrt_reinstr: %d; rd_reinstr: %d\n", 5563 un->un_g.dkg_write_reinstruct, un->un_g.dkg_read_reinstruct); 5564 5565 ASSERT(mutex_owned(SD_MUTEX(un))); 5566 5567 return (label_error); 5568 } 5569 5570 5571 /* 5572 * Function: sd_build_default_label 5573 * 5574 * Description: Generate a default label for those devices that do not have 5575 * one, e.g., new media, removable cartridges, etc.. 5576 * 5577 * Context: Kernel thread only 5578 */ 5579 5580 static void 5581 sd_build_default_label(struct sd_lun *un) 5582 { 5583 #if defined(_SUNOS_VTOC_16) 5584 uint_t phys_spc; 5585 uint_t disksize; 5586 struct dk_geom un_g; 5587 #endif 5588 5589 ASSERT(un != NULL); 5590 ASSERT(mutex_owned(SD_MUTEX(un))); 5591 5592 #if defined(_SUNOS_VTOC_8) 5593 /* 5594 * Note: This is a legacy check for non-removable devices on VTOC_8 5595 * only. This may be a valid check for VTOC_16 as well. 5596 */ 5597 if (!ISREMOVABLE(un)) { 5598 return; 5599 } 5600 #endif 5601 5602 bzero(&un->un_g, sizeof (struct dk_geom)); 5603 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 5604 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 5605 5606 #if defined(_SUNOS_VTOC_8) 5607 5608 /* 5609 * It's a REMOVABLE media, therefore no label (on sparc, anyway). 5610 * But it is still necessary to set up various geometry information, 5611 * and we are doing this here. 5612 */ 5613 5614 /* 5615 * For the rpm, we use the minimum for the disk. For the head, cyl, 5616 * and number of sector per track, if the capacity <= 1GB, head = 64, 5617 * sect = 32. else head = 255, sect 63 Note: the capacity should be 5618 * equal to C*H*S values. This will cause some truncation of size due 5619 * to round off errors. For CD-ROMs, this truncation can have adverse 5620 * side effects, so returning ncyl and nhead as 1. The nsect will 5621 * overflow for most of CD-ROMs as nsect is of type ushort. (4190569) 5622 */ 5623 if (ISCD(un)) { 5624 /* 5625 * Preserve the old behavior for non-writable 5626 * medias. Since dkg_nsect is a ushort, it 5627 * will lose bits as cdroms have more than 5628 * 65536 sectors. So if we recalculate 5629 * capacity, it will become much shorter. 5630 * But the dkg_* information is not 5631 * used for CDROMs so it is OK. But for 5632 * Writable CDs we need this information 5633 * to be valid (for newfs say). So we 5634 * make nsect and nhead > 1 that way 5635 * nsect can still stay within ushort limit 5636 * without losing any bits. 5637 */ 5638 if (un->un_f_mmc_writable_media == TRUE) { 5639 un->un_g.dkg_nhead = 64; 5640 un->un_g.dkg_nsect = 32; 5641 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5642 un->un_blockcount = un->un_g.dkg_ncyl * 5643 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5644 } else { 5645 un->un_g.dkg_ncyl = 1; 5646 un->un_g.dkg_nhead = 1; 5647 un->un_g.dkg_nsect = un->un_blockcount; 5648 } 5649 } else { 5650 if (un->un_blockcount <= 0x1000) { 5651 /* unlabeled SCSI floppy device */ 5652 un->un_g.dkg_nhead = 2; 5653 un->un_g.dkg_ncyl = 80; 5654 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 5655 } else if (un->un_blockcount <= 0x200000) { 5656 un->un_g.dkg_nhead = 64; 5657 un->un_g.dkg_nsect = 32; 5658 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5659 } else { 5660 un->un_g.dkg_nhead = 255; 5661 un->un_g.dkg_nsect = 63; 5662 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 5663 } 5664 un->un_blockcount = 5665 un->un_g.dkg_ncyl * un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5666 } 5667 5668 un->un_g.dkg_acyl = 0; 5669 un->un_g.dkg_bcyl = 0; 5670 un->un_g.dkg_rpm = 200; 5671 un->un_asciilabel[0] = '\0'; 5672 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl; 5673 5674 un->un_map[0].dkl_cylno = 0; 5675 un->un_map[0].dkl_nblk = un->un_blockcount; 5676 un->un_map[2].dkl_cylno = 0; 5677 un->un_map[2].dkl_nblk = un->un_blockcount; 5678 5679 #elif defined(_SUNOS_VTOC_16) 5680 5681 if (un->un_solaris_size == 0) { 5682 /* 5683 * Got fdisk table but no solaris entry therefore 5684 * don't create a default label 5685 */ 5686 un->un_f_geometry_is_valid = TRUE; 5687 return; 5688 } 5689 5690 /* 5691 * For CDs we continue to use the physical geometry to calculate 5692 * number of cylinders. All other devices must convert the 5693 * physical geometry (geom_cache) to values that will fit 5694 * in a dk_geom structure. 5695 */ 5696 if (ISCD(un)) { 5697 phys_spc = un->un_pgeom.g_nhead * un->un_pgeom.g_nsect; 5698 } else { 5699 /* Convert physical geometry to disk geometry */ 5700 bzero(&un_g, sizeof (struct dk_geom)); 5701 sd_convert_geometry(un->un_blockcount, &un_g); 5702 bcopy(&un_g, &un->un_g, sizeof (un->un_g)); 5703 phys_spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5704 } 5705 5706 un->un_g.dkg_pcyl = un->un_solaris_size / phys_spc; 5707 un->un_g.dkg_acyl = DK_ACYL; 5708 un->un_g.dkg_ncyl = un->un_g.dkg_pcyl - DK_ACYL; 5709 disksize = un->un_g.dkg_ncyl * phys_spc; 5710 5711 if (ISCD(un)) { 5712 /* 5713 * CD's don't use the "heads * sectors * cyls"-type of 5714 * geometry, but instead use the entire capacity of the media. 5715 */ 5716 disksize = un->un_solaris_size; 5717 un->un_g.dkg_nhead = 1; 5718 un->un_g.dkg_nsect = 1; 5719 un->un_g.dkg_rpm = 5720 (un->un_pgeom.g_rpm == 0) ? 200 : un->un_pgeom.g_rpm; 5721 5722 un->un_vtoc.v_part[0].p_start = 0; 5723 un->un_vtoc.v_part[0].p_size = disksize; 5724 un->un_vtoc.v_part[0].p_tag = V_BACKUP; 5725 un->un_vtoc.v_part[0].p_flag = V_UNMNT; 5726 5727 un->un_map[0].dkl_cylno = 0; 5728 un->un_map[0].dkl_nblk = disksize; 5729 un->un_offset[0] = 0; 5730 5731 } else { 5732 /* 5733 * Hard disks and removable media cartridges 5734 */ 5735 un->un_g.dkg_rpm = 5736 (un->un_pgeom.g_rpm == 0) ? 3600: un->un_pgeom.g_rpm; 5737 un->un_vtoc.v_sectorsz = un->un_sys_blocksize; 5738 5739 /* Add boot slice */ 5740 un->un_vtoc.v_part[8].p_start = 0; 5741 un->un_vtoc.v_part[8].p_size = phys_spc; 5742 un->un_vtoc.v_part[8].p_tag = V_BOOT; 5743 un->un_vtoc.v_part[8].p_flag = V_UNMNT; 5744 5745 un->un_map[8].dkl_cylno = 0; 5746 un->un_map[8].dkl_nblk = phys_spc; 5747 un->un_offset[8] = 0; 5748 } 5749 5750 un->un_g.dkg_apc = 0; 5751 un->un_vtoc.v_nparts = V_NUMPAR; 5752 un->un_vtoc.v_version = V_VERSION; 5753 5754 /* Add backup slice */ 5755 un->un_vtoc.v_part[2].p_start = 0; 5756 un->un_vtoc.v_part[2].p_size = disksize; 5757 un->un_vtoc.v_part[2].p_tag = V_BACKUP; 5758 un->un_vtoc.v_part[2].p_flag = V_UNMNT; 5759 5760 un->un_map[2].dkl_cylno = 0; 5761 un->un_map[2].dkl_nblk = disksize; 5762 un->un_offset[2] = 0; 5763 5764 (void) sprintf(un->un_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d" 5765 " hd %d sec %d", un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5766 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5767 5768 #else 5769 #error "No VTOC format defined." 5770 #endif 5771 5772 un->un_g.dkg_read_reinstruct = 0; 5773 un->un_g.dkg_write_reinstruct = 0; 5774 5775 un->un_g.dkg_intrlv = 1; 5776 5777 un->un_vtoc.v_sanity = VTOC_SANE; 5778 5779 un->un_f_geometry_is_valid = TRUE; 5780 5781 SD_INFO(SD_LOG_COMMON, un, 5782 "sd_build_default_label: Default label created: " 5783 "cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n", 5784 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, un->un_g.dkg_nhead, 5785 un->un_g.dkg_nsect, un->un_blockcount); 5786 } 5787 5788 5789 #if defined(_FIRMWARE_NEEDS_FDISK) 5790 /* 5791 * Max CHS values, as they are encoded into bytes, for 1022/254/63 5792 */ 5793 #define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2)) 5794 #define LBA_MAX_CYL (1022 & 0xFF) 5795 #define LBA_MAX_HEAD (254) 5796 5797 5798 /* 5799 * Function: sd_has_max_chs_vals 5800 * 5801 * Description: Return TRUE if Cylinder-Head-Sector values are all at maximum. 5802 * 5803 * Arguments: fdp - ptr to CHS info 5804 * 5805 * Return Code: True or false 5806 * 5807 * Context: Any. 5808 */ 5809 5810 static int 5811 sd_has_max_chs_vals(struct ipart *fdp) 5812 { 5813 return ((fdp->begcyl == LBA_MAX_CYL) && 5814 (fdp->beghead == LBA_MAX_HEAD) && 5815 (fdp->begsect == LBA_MAX_SECT) && 5816 (fdp->endcyl == LBA_MAX_CYL) && 5817 (fdp->endhead == LBA_MAX_HEAD) && 5818 (fdp->endsect == LBA_MAX_SECT)); 5819 } 5820 #endif 5821 5822 5823 /* 5824 * Function: sd_inq_fill 5825 * 5826 * Description: Print a piece of inquiry data, cleaned up for non-printable 5827 * characters and stopping at the first space character after 5828 * the beginning of the passed string; 5829 * 5830 * Arguments: p - source string 5831 * l - maximum length to copy 5832 * s - destination string 5833 * 5834 * Context: Any. 5835 */ 5836 5837 static void 5838 sd_inq_fill(char *p, int l, char *s) 5839 { 5840 unsigned i = 0; 5841 char c; 5842 5843 while (i++ < l) { 5844 if ((c = *p++) < ' ' || c >= 0x7F) { 5845 c = '*'; 5846 } else if (i != 1 && c == ' ') { 5847 break; 5848 } 5849 *s++ = c; 5850 } 5851 *s++ = 0; 5852 } 5853 5854 5855 /* 5856 * Function: sd_register_devid 5857 * 5858 * Description: This routine will obtain the device id information from the 5859 * target, obtain the serial number, and register the device 5860 * id with the ddi framework. 5861 * 5862 * Arguments: devi - the system's dev_info_t for the device. 5863 * un - driver soft state (unit) structure 5864 * reservation_flag - indicates if a reservation conflict 5865 * occurred during attach 5866 * 5867 * Context: Kernel Thread 5868 */ 5869 static void 5870 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 5871 { 5872 int rval = 0; 5873 uchar_t *inq80 = NULL; 5874 size_t inq80_len = MAX_INQUIRY_SIZE; 5875 size_t inq80_resid = 0; 5876 uchar_t *inq83 = NULL; 5877 size_t inq83_len = MAX_INQUIRY_SIZE; 5878 size_t inq83_resid = 0; 5879 5880 ASSERT(un != NULL); 5881 ASSERT(mutex_owned(SD_MUTEX(un))); 5882 ASSERT((SD_DEVINFO(un)) == devi); 5883 5884 /* 5885 * This is the case of antiquated Sun disk drives that have the 5886 * FAB_DEVID property set in the disk_table. These drives 5887 * manage the devid's by storing them in last 2 available sectors 5888 * on the drive and have them fabricated by the ddi layer by calling 5889 * ddi_devid_init and passing the DEVID_FAB flag. 5890 */ 5891 if (un->un_f_opt_fab_devid == TRUE) { 5892 /* 5893 * Depending on EINVAL isn't reliable, since a reserved disk 5894 * may result in invalid geometry, so check to make sure a 5895 * reservation conflict did not occur during attach. 5896 */ 5897 if ((sd_get_devid(un) == EINVAL) && 5898 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5899 /* 5900 * The devid is invalid AND there is no reservation 5901 * conflict. Fabricate a new devid. 5902 */ 5903 (void) sd_create_devid(un); 5904 } 5905 5906 /* Register the devid if it exists */ 5907 if (un->un_devid != NULL) { 5908 (void) ddi_devid_register(SD_DEVINFO(un), 5909 un->un_devid); 5910 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5911 "sd_register_devid: Devid Fabricated\n"); 5912 } 5913 return; 5914 } 5915 5916 /* 5917 * We check the availibility of the World Wide Name (0x83) and Unit 5918 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5919 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5920 * 0x83 is availible, that is the best choice. Our next choice is 5921 * 0x80. If neither are availible, we munge the devid from the device 5922 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5923 * to fabricate a devid for non-Sun qualified disks. 5924 */ 5925 if (sd_check_vpd_page_support(un) == 0) { 5926 /* collect page 80 data if available */ 5927 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5928 5929 mutex_exit(SD_MUTEX(un)); 5930 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5931 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 5932 0x01, 0x80, &inq80_resid); 5933 5934 if (rval != 0) { 5935 kmem_free(inq80, inq80_len); 5936 inq80 = NULL; 5937 inq80_len = 0; 5938 } 5939 mutex_enter(SD_MUTEX(un)); 5940 } 5941 5942 /* collect page 83 data if available */ 5943 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5944 5945 mutex_exit(SD_MUTEX(un)); 5946 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5947 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 5948 0x01, 0x83, &inq83_resid); 5949 5950 if (rval != 0) { 5951 kmem_free(inq83, inq83_len); 5952 inq83 = NULL; 5953 inq83_len = 0; 5954 } 5955 mutex_enter(SD_MUTEX(un)); 5956 } 5957 } 5958 5959 /* encode best devid possible based on data available */ 5960 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5961 (char *)ddi_driver_name(SD_DEVINFO(un)), 5962 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5963 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5964 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5965 5966 /* devid successfully encoded, register devid */ 5967 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5968 5969 } else { 5970 /* 5971 * Unable to encode a devid based on data available. 5972 * This is not a Sun qualified disk. Older Sun disk 5973 * drives that have the SD_FAB_DEVID property 5974 * set in the disk_table and non Sun qualified 5975 * disks are treated in the same manner. These 5976 * drives manage the devid's by storing them in 5977 * last 2 available sectors on the drive and 5978 * have them fabricated by the ddi layer by 5979 * calling ddi_devid_init and passing the 5980 * DEVID_FAB flag. 5981 * Create a fabricate devid only if there's no 5982 * fabricate devid existed. 5983 */ 5984 if (sd_get_devid(un) == EINVAL) { 5985 (void) sd_create_devid(un); 5986 un->un_f_opt_fab_devid = TRUE; 5987 } 5988 5989 /* Register the devid if it exists */ 5990 if (un->un_devid != NULL) { 5991 (void) ddi_devid_register(SD_DEVINFO(un), 5992 un->un_devid); 5993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5994 "sd_register_devid: devid fabricated using " 5995 "ddi framework\n"); 5996 } 5997 } 5998 5999 /* clean up resources */ 6000 if (inq80 != NULL) { 6001 kmem_free(inq80, inq80_len); 6002 } 6003 if (inq83 != NULL) { 6004 kmem_free(inq83, inq83_len); 6005 } 6006 } 6007 6008 static daddr_t 6009 sd_get_devid_block(struct sd_lun *un) 6010 { 6011 daddr_t spc, blk, head, cyl; 6012 6013 if (un->un_blockcount <= DK_MAX_BLOCKS) { 6014 /* this geometry doesn't allow us to write a devid */ 6015 if (un->un_g.dkg_acyl < 2) { 6016 return (-1); 6017 } 6018 6019 /* 6020 * Subtract 2 guarantees that the next to last cylinder 6021 * is used 6022 */ 6023 cyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl - 2; 6024 spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 6025 head = un->un_g.dkg_nhead - 1; 6026 blk = (cyl * (spc - un->un_g.dkg_apc)) + 6027 (head * un->un_g.dkg_nsect) + 1; 6028 } else { 6029 if (un->un_reserved != -1) { 6030 blk = un->un_map[un->un_reserved].dkl_cylno + 1; 6031 } else { 6032 return (-1); 6033 } 6034 } 6035 return (blk); 6036 } 6037 6038 /* 6039 * Function: sd_get_devid 6040 * 6041 * Description: This routine will return 0 if a valid device id has been 6042 * obtained from the target and stored in the soft state. If a 6043 * valid device id has not been previously read and stored, a 6044 * read attempt will be made. 6045 * 6046 * Arguments: un - driver soft state (unit) structure 6047 * 6048 * Return Code: 0 if we successfully get the device id 6049 * 6050 * Context: Kernel Thread 6051 */ 6052 6053 static int 6054 sd_get_devid(struct sd_lun *un) 6055 { 6056 struct dk_devid *dkdevid; 6057 ddi_devid_t tmpid; 6058 uint_t *ip; 6059 size_t sz; 6060 daddr_t blk; 6061 int status; 6062 int chksum; 6063 int i; 6064 size_t buffer_size; 6065 6066 ASSERT(un != NULL); 6067 ASSERT(mutex_owned(SD_MUTEX(un))); 6068 6069 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 6070 un); 6071 6072 if (un->un_devid != NULL) { 6073 return (0); 6074 } 6075 6076 blk = sd_get_devid_block(un); 6077 if (blk < 0) 6078 return (EINVAL); 6079 6080 /* 6081 * Read and verify device id, stored in the reserved cylinders at the 6082 * end of the disk. Backup label is on the odd sectors of the last 6083 * track of the last cylinder. Device id will be on track of the next 6084 * to last cylinder. 6085 */ 6086 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 6087 mutex_exit(SD_MUTEX(un)); 6088 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 6089 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 6090 SD_PATH_DIRECT); 6091 if (status != 0) { 6092 goto error; 6093 } 6094 6095 /* Validate the revision */ 6096 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 6097 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 6098 status = EINVAL; 6099 goto error; 6100 } 6101 6102 /* Calculate the checksum */ 6103 chksum = 0; 6104 ip = (uint_t *)dkdevid; 6105 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6106 i++) { 6107 chksum ^= ip[i]; 6108 } 6109 6110 /* Compare the checksums */ 6111 if (DKD_GETCHKSUM(dkdevid) != chksum) { 6112 status = EINVAL; 6113 goto error; 6114 } 6115 6116 /* Validate the device id */ 6117 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 6118 status = EINVAL; 6119 goto error; 6120 } 6121 6122 /* 6123 * Store the device id in the driver soft state 6124 */ 6125 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 6126 tmpid = kmem_alloc(sz, KM_SLEEP); 6127 6128 mutex_enter(SD_MUTEX(un)); 6129 6130 un->un_devid = tmpid; 6131 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 6132 6133 kmem_free(dkdevid, buffer_size); 6134 6135 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 6136 6137 return (status); 6138 error: 6139 mutex_enter(SD_MUTEX(un)); 6140 kmem_free(dkdevid, buffer_size); 6141 return (status); 6142 } 6143 6144 6145 /* 6146 * Function: sd_create_devid 6147 * 6148 * Description: This routine will fabricate the device id and write it 6149 * to the disk. 6150 * 6151 * Arguments: un - driver soft state (unit) structure 6152 * 6153 * Return Code: value of the fabricated device id 6154 * 6155 * Context: Kernel Thread 6156 */ 6157 6158 static ddi_devid_t 6159 sd_create_devid(struct sd_lun *un) 6160 { 6161 ASSERT(un != NULL); 6162 6163 /* Fabricate the devid */ 6164 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 6165 == DDI_FAILURE) { 6166 return (NULL); 6167 } 6168 6169 /* Write the devid to disk */ 6170 if (sd_write_deviceid(un) != 0) { 6171 ddi_devid_free(un->un_devid); 6172 un->un_devid = NULL; 6173 } 6174 6175 return (un->un_devid); 6176 } 6177 6178 6179 /* 6180 * Function: sd_write_deviceid 6181 * 6182 * Description: This routine will write the device id to the disk 6183 * reserved sector. 6184 * 6185 * Arguments: un - driver soft state (unit) structure 6186 * 6187 * Return Code: EINVAL 6188 * value returned by sd_send_scsi_cmd 6189 * 6190 * Context: Kernel Thread 6191 */ 6192 6193 static int 6194 sd_write_deviceid(struct sd_lun *un) 6195 { 6196 struct dk_devid *dkdevid; 6197 daddr_t blk; 6198 uint_t *ip, chksum; 6199 int status; 6200 int i; 6201 6202 ASSERT(mutex_owned(SD_MUTEX(un))); 6203 6204 blk = sd_get_devid_block(un); 6205 if (blk < 0) 6206 return (-1); 6207 mutex_exit(SD_MUTEX(un)); 6208 6209 /* Allocate the buffer */ 6210 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 6211 6212 /* Fill in the revision */ 6213 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 6214 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 6215 6216 /* Copy in the device id */ 6217 mutex_enter(SD_MUTEX(un)); 6218 bcopy(un->un_devid, &dkdevid->dkd_devid, 6219 ddi_devid_sizeof(un->un_devid)); 6220 mutex_exit(SD_MUTEX(un)); 6221 6222 /* Calculate the checksum */ 6223 chksum = 0; 6224 ip = (uint_t *)dkdevid; 6225 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6226 i++) { 6227 chksum ^= ip[i]; 6228 } 6229 6230 /* Fill-in checksum */ 6231 DKD_FORMCHKSUM(chksum, dkdevid); 6232 6233 /* Write the reserved sector */ 6234 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 6235 SD_PATH_DIRECT); 6236 6237 kmem_free(dkdevid, un->un_sys_blocksize); 6238 6239 mutex_enter(SD_MUTEX(un)); 6240 return (status); 6241 } 6242 6243 6244 /* 6245 * Function: sd_check_vpd_page_support 6246 * 6247 * Description: This routine sends an inquiry command with the EVPD bit set and 6248 * a page code of 0x00 to the device. It is used to determine which 6249 * vital product pages are availible to find the devid. We are 6250 * looking for pages 0x83 or 0x80. If we return a negative 1, the 6251 * device does not support that command. 6252 * 6253 * Arguments: un - driver soft state (unit) structure 6254 * 6255 * Return Code: 0 - success 6256 * 1 - check condition 6257 * 6258 * Context: This routine can sleep. 6259 */ 6260 6261 static int 6262 sd_check_vpd_page_support(struct sd_lun *un) 6263 { 6264 uchar_t *page_list = NULL; 6265 uchar_t page_length = 0xff; /* Use max possible length */ 6266 uchar_t evpd = 0x01; /* Set the EVPD bit */ 6267 uchar_t page_code = 0x00; /* Supported VPD Pages */ 6268 int rval = 0; 6269 int counter; 6270 6271 ASSERT(un != NULL); 6272 ASSERT(mutex_owned(SD_MUTEX(un))); 6273 6274 mutex_exit(SD_MUTEX(un)); 6275 6276 /* 6277 * We'll set the page length to the maximum to save figuring it out 6278 * with an additional call. 6279 */ 6280 page_list = kmem_zalloc(page_length, KM_SLEEP); 6281 6282 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 6283 page_code, NULL); 6284 6285 mutex_enter(SD_MUTEX(un)); 6286 6287 /* 6288 * Now we must validate that the device accepted the command, as some 6289 * drives do not support it. If the drive does support it, we will 6290 * return 0, and the supported pages will be in un_vpd_page_mask. If 6291 * not, we return -1. 6292 */ 6293 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 6294 /* Loop to find one of the 2 pages we need */ 6295 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 6296 6297 /* 6298 * Pages are returned in ascending order, and 0x83 is what we 6299 * are hoping for. 6300 */ 6301 while ((page_list[counter] <= 0x83) && 6302 (counter <= (page_list[VPD_PAGE_LENGTH] + 6303 VPD_HEAD_OFFSET))) { 6304 /* 6305 * Add 3 because page_list[3] is the number of 6306 * pages minus 3 6307 */ 6308 6309 switch (page_list[counter]) { 6310 case 0x00: 6311 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 6312 break; 6313 case 0x80: 6314 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 6315 break; 6316 case 0x81: 6317 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 6318 break; 6319 case 0x82: 6320 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 6321 break; 6322 case 0x83: 6323 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 6324 break; 6325 } 6326 counter++; 6327 } 6328 6329 } else { 6330 rval = -1; 6331 6332 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6333 "sd_check_vpd_page_support: This drive does not implement " 6334 "VPD pages.\n"); 6335 } 6336 6337 kmem_free(page_list, page_length); 6338 6339 return (rval); 6340 } 6341 6342 6343 /* 6344 * Function: sd_setup_pm 6345 * 6346 * Description: Initialize Power Management on the device 6347 * 6348 * Context: Kernel Thread 6349 */ 6350 6351 static void 6352 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 6353 { 6354 uint_t log_page_size; 6355 uchar_t *log_page_data; 6356 int rval; 6357 6358 /* 6359 * Since we are called from attach, holding a mutex for 6360 * un is unnecessary. Because some of the routines called 6361 * from here require SD_MUTEX to not be held, assert this 6362 * right up front. 6363 */ 6364 ASSERT(!mutex_owned(SD_MUTEX(un))); 6365 /* 6366 * Since the sd device does not have the 'reg' property, 6367 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6368 * The following code is to tell cpr that this device 6369 * DOES need to be suspended and resumed. 6370 */ 6371 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6372 "pm-hardware-state", "needs-suspend-resume"); 6373 6374 /* 6375 * Check if HBA has set the "pm-capable" property. 6376 * If "pm-capable" exists and is non-zero then we can 6377 * power manage the device without checking the start/stop 6378 * cycle count log sense page. 6379 * 6380 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 6381 * then we should not power manage the device. 6382 * 6383 * If "pm-capable" doesn't exist then un->un_pm_capable_prop will 6384 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, sd will 6385 * check the start/stop cycle count log sense page and power manage 6386 * the device if the cycle count limit has not been exceeded. 6387 */ 6388 un->un_pm_capable_prop = 6389 ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6390 "pm-capable", SD_PM_CAPABLE_UNDEFINED); 6391 if (un->un_pm_capable_prop != SD_PM_CAPABLE_UNDEFINED) { 6392 /* 6393 * pm-capable property exists. 6394 * 6395 * Convert "TRUE" values for un_pm_capable_prop to 6396 * SD_PM_CAPABLE_TRUE (1) to make it easier to check later. 6397 * "TRUE" values are any values except SD_PM_CAPABLE_FALSE (0) 6398 * and SD_PM_CAPABLE_UNDEFINED (-1) 6399 */ 6400 if (un->un_pm_capable_prop != SD_PM_CAPABLE_FALSE) { 6401 un->un_pm_capable_prop = SD_PM_CAPABLE_TRUE; 6402 } 6403 6404 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6405 "sd_unit_attach: un:0x%p pm-capable " 6406 "property set to %d.\n", un, un->un_pm_capable_prop); 6407 } 6408 6409 /* 6410 * This complies with the new power management framework 6411 * for certain desktop machines. Create the pm_components 6412 * property as a string array property. 6413 * 6414 * If this is a removable device or if the pm-capable property 6415 * is SD_PM_CAPABLE_TRUE (1) then we should create the 6416 * pm_components property without checking for the existance of 6417 * the start-stop cycle counter log page 6418 */ 6419 if (ISREMOVABLE(un) || 6420 un->un_pm_capable_prop == SD_PM_CAPABLE_TRUE) { 6421 /* 6422 * not all devices have a motor, try it first. 6423 * some devices may return ILLEGAL REQUEST, some 6424 * will hang 6425 */ 6426 un->un_f_start_stop_supported = TRUE; 6427 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 6428 SD_PATH_DIRECT) != 0) { 6429 un->un_f_start_stop_supported = FALSE; 6430 } 6431 6432 /* 6433 * create pm properties anyways otherwise the parent can't 6434 * go to sleep 6435 */ 6436 (void) sd_create_pm_components(devi, un); 6437 un->un_f_pm_is_enabled = TRUE; 6438 6439 /* 6440 * Need to create a zero length (Boolean) property 6441 * removable-media for the removable media devices. 6442 * Note that the return value of the property is not being 6443 * checked, since if unable to create the property 6444 * then do not want the attach to fail altogether. Consistent 6445 * with other property creation in attach. 6446 */ 6447 if (ISREMOVABLE(un)) { 6448 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 6449 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 6450 } 6451 return; 6452 } 6453 6454 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 6455 6456 #ifdef SDDEBUG 6457 if (sd_force_pm_supported) { 6458 /* Force a successful result */ 6459 rval = 1; 6460 } 6461 #endif 6462 6463 /* 6464 * If the start-stop cycle counter log page is not supported 6465 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 6466 * then we should not create the pm_components property. 6467 */ 6468 if (rval == -1 || un->un_pm_capable_prop == SD_PM_CAPABLE_FALSE) { 6469 /* 6470 * Error. 6471 * Reading log sense failed, most likely this is 6472 * an older drive that does not support log sense. 6473 * If this fails auto-pm is not supported. 6474 */ 6475 un->un_power_level = SD_SPINDLE_ON; 6476 un->un_f_pm_is_enabled = FALSE; 6477 6478 } else if (rval == 0) { 6479 /* 6480 * Page not found. 6481 * The start stop cycle counter is implemented as page 6482 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6483 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6484 */ 6485 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 6486 /* 6487 * Page found, use this one. 6488 */ 6489 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6490 un->un_f_pm_is_enabled = TRUE; 6491 } else { 6492 /* 6493 * Error or page not found. 6494 * auto-pm is not supported for this device. 6495 */ 6496 un->un_power_level = SD_SPINDLE_ON; 6497 un->un_f_pm_is_enabled = FALSE; 6498 } 6499 } else { 6500 /* 6501 * Page found, use it. 6502 */ 6503 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6504 un->un_f_pm_is_enabled = TRUE; 6505 } 6506 6507 6508 if (un->un_f_pm_is_enabled == TRUE) { 6509 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6510 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6511 6512 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6513 log_page_size, un->un_start_stop_cycle_page, 6514 0x01, 0, SD_PATH_DIRECT); 6515 #ifdef SDDEBUG 6516 if (sd_force_pm_supported) { 6517 /* Force a successful result */ 6518 rval = 0; 6519 } 6520 #endif 6521 6522 /* 6523 * If the Log sense for Page( Start/stop cycle counter page) 6524 * succeeds, then power managment is supported and we can 6525 * enable auto-pm. 6526 */ 6527 if (rval == 0) { 6528 (void) sd_create_pm_components(devi, un); 6529 } else { 6530 un->un_power_level = SD_SPINDLE_ON; 6531 un->un_f_pm_is_enabled = FALSE; 6532 } 6533 6534 kmem_free(log_page_data, log_page_size); 6535 } 6536 } 6537 6538 6539 /* 6540 * Function: sd_create_pm_components 6541 * 6542 * Description: Initialize PM property. 6543 * 6544 * Context: Kernel thread context 6545 */ 6546 6547 static void 6548 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6549 { 6550 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 6551 6552 ASSERT(!mutex_owned(SD_MUTEX(un))); 6553 6554 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6555 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 6556 /* 6557 * When components are initially created they are idle, 6558 * power up any non-removables. 6559 * Note: the return value of pm_raise_power can't be used 6560 * for determining if PM should be enabled for this device. 6561 * Even if you check the return values and remove this 6562 * property created above, the PM framework will not honor the 6563 * change after the first call to pm_raise_power. Hence, 6564 * removal of that property does not help if pm_raise_power 6565 * fails. In the case of removable media, the start/stop 6566 * will fail if the media is not present. 6567 */ 6568 if ((!ISREMOVABLE(un)) && (pm_raise_power(SD_DEVINFO(un), 0, 6569 SD_SPINDLE_ON) == DDI_SUCCESS)) { 6570 mutex_enter(SD_MUTEX(un)); 6571 un->un_power_level = SD_SPINDLE_ON; 6572 mutex_enter(&un->un_pm_mutex); 6573 /* Set to on and not busy. */ 6574 un->un_pm_count = 0; 6575 } else { 6576 mutex_enter(SD_MUTEX(un)); 6577 un->un_power_level = SD_SPINDLE_OFF; 6578 mutex_enter(&un->un_pm_mutex); 6579 /* Set to off. */ 6580 un->un_pm_count = -1; 6581 } 6582 mutex_exit(&un->un_pm_mutex); 6583 mutex_exit(SD_MUTEX(un)); 6584 } else { 6585 un->un_power_level = SD_SPINDLE_ON; 6586 un->un_f_pm_is_enabled = FALSE; 6587 } 6588 } 6589 6590 6591 /* 6592 * Function: sd_ddi_suspend 6593 * 6594 * Description: Performs system power-down operations. This includes 6595 * setting the drive state to indicate its suspended so 6596 * that no new commands will be accepted. Also, wait for 6597 * all commands that are in transport or queued to a timer 6598 * for retry to complete. All timeout threads are cancelled. 6599 * 6600 * Return Code: DDI_FAILURE or DDI_SUCCESS 6601 * 6602 * Context: Kernel thread context 6603 */ 6604 6605 static int 6606 sd_ddi_suspend(dev_info_t *devi) 6607 { 6608 struct sd_lun *un; 6609 clock_t wait_cmds_complete; 6610 6611 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6612 if (un == NULL) { 6613 return (DDI_FAILURE); 6614 } 6615 6616 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6617 6618 mutex_enter(SD_MUTEX(un)); 6619 6620 /* Return success if the device is already suspended. */ 6621 if (un->un_state == SD_STATE_SUSPENDED) { 6622 mutex_exit(SD_MUTEX(un)); 6623 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6624 "device already suspended, exiting\n"); 6625 return (DDI_SUCCESS); 6626 } 6627 6628 /* Return failure if the device is being used by HA */ 6629 if (un->un_resvd_status & 6630 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6631 mutex_exit(SD_MUTEX(un)); 6632 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6633 "device in use by HA, exiting\n"); 6634 return (DDI_FAILURE); 6635 } 6636 6637 /* 6638 * Return failure if the device is in a resource wait 6639 * or power changing state. 6640 */ 6641 if ((un->un_state == SD_STATE_RWAIT) || 6642 (un->un_state == SD_STATE_PM_CHANGING)) { 6643 mutex_exit(SD_MUTEX(un)); 6644 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6645 "device in resource wait state, exiting\n"); 6646 return (DDI_FAILURE); 6647 } 6648 6649 6650 un->un_save_state = un->un_last_state; 6651 New_state(un, SD_STATE_SUSPENDED); 6652 6653 /* 6654 * Wait for all commands that are in transport or queued to a timer 6655 * for retry to complete. 6656 * 6657 * While waiting, no new commands will be accepted or sent because of 6658 * the new state we set above. 6659 * 6660 * Wait till current operation has completed. If we are in the resource 6661 * wait state (with an intr outstanding) then we need to wait till the 6662 * intr completes and starts the next cmd. We want to wait for 6663 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6664 */ 6665 wait_cmds_complete = ddi_get_lbolt() + 6666 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6667 6668 while (un->un_ncmds_in_transport != 0) { 6669 /* 6670 * Fail if commands do not finish in the specified time. 6671 */ 6672 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6673 wait_cmds_complete) == -1) { 6674 /* 6675 * Undo the state changes made above. Everything 6676 * must go back to it's original value. 6677 */ 6678 Restore_state(un); 6679 un->un_last_state = un->un_save_state; 6680 /* Wake up any threads that might be waiting. */ 6681 cv_broadcast(&un->un_suspend_cv); 6682 mutex_exit(SD_MUTEX(un)); 6683 SD_ERROR(SD_LOG_IO_PM, un, 6684 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6685 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6686 return (DDI_FAILURE); 6687 } 6688 } 6689 6690 /* 6691 * Cancel SCSI watch thread and timeouts, if any are active 6692 */ 6693 6694 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6695 opaque_t temp_token = un->un_swr_token; 6696 mutex_exit(SD_MUTEX(un)); 6697 scsi_watch_suspend(temp_token); 6698 mutex_enter(SD_MUTEX(un)); 6699 } 6700 6701 if (un->un_reset_throttle_timeid != NULL) { 6702 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6703 un->un_reset_throttle_timeid = NULL; 6704 mutex_exit(SD_MUTEX(un)); 6705 (void) untimeout(temp_id); 6706 mutex_enter(SD_MUTEX(un)); 6707 } 6708 6709 if (un->un_dcvb_timeid != NULL) { 6710 timeout_id_t temp_id = un->un_dcvb_timeid; 6711 un->un_dcvb_timeid = NULL; 6712 mutex_exit(SD_MUTEX(un)); 6713 (void) untimeout(temp_id); 6714 mutex_enter(SD_MUTEX(un)); 6715 } 6716 6717 mutex_enter(&un->un_pm_mutex); 6718 if (un->un_pm_timeid != NULL) { 6719 timeout_id_t temp_id = un->un_pm_timeid; 6720 un->un_pm_timeid = NULL; 6721 mutex_exit(&un->un_pm_mutex); 6722 mutex_exit(SD_MUTEX(un)); 6723 (void) untimeout(temp_id); 6724 mutex_enter(SD_MUTEX(un)); 6725 } else { 6726 mutex_exit(&un->un_pm_mutex); 6727 } 6728 6729 if (un->un_retry_timeid != NULL) { 6730 timeout_id_t temp_id = un->un_retry_timeid; 6731 un->un_retry_timeid = NULL; 6732 mutex_exit(SD_MUTEX(un)); 6733 (void) untimeout(temp_id); 6734 mutex_enter(SD_MUTEX(un)); 6735 } 6736 6737 if (un->un_direct_priority_timeid != NULL) { 6738 timeout_id_t temp_id = un->un_direct_priority_timeid; 6739 un->un_direct_priority_timeid = NULL; 6740 mutex_exit(SD_MUTEX(un)); 6741 (void) untimeout(temp_id); 6742 mutex_enter(SD_MUTEX(un)); 6743 } 6744 6745 if (un->un_f_is_fibre == TRUE) { 6746 /* 6747 * Remove callbacks for insert and remove events 6748 */ 6749 if (un->un_insert_event != NULL) { 6750 mutex_exit(SD_MUTEX(un)); 6751 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6752 mutex_enter(SD_MUTEX(un)); 6753 un->un_insert_event = NULL; 6754 } 6755 6756 if (un->un_remove_event != NULL) { 6757 mutex_exit(SD_MUTEX(un)); 6758 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6759 mutex_enter(SD_MUTEX(un)); 6760 un->un_remove_event = NULL; 6761 } 6762 } 6763 6764 mutex_exit(SD_MUTEX(un)); 6765 6766 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6767 6768 return (DDI_SUCCESS); 6769 } 6770 6771 6772 /* 6773 * Function: sd_ddi_pm_suspend 6774 * 6775 * Description: Set the drive state to low power. 6776 * Someone else is required to actually change the drive 6777 * power level. 6778 * 6779 * Arguments: un - driver soft state (unit) structure 6780 * 6781 * Return Code: DDI_FAILURE or DDI_SUCCESS 6782 * 6783 * Context: Kernel thread context 6784 */ 6785 6786 static int 6787 sd_ddi_pm_suspend(struct sd_lun *un) 6788 { 6789 ASSERT(un != NULL); 6790 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6791 6792 ASSERT(!mutex_owned(SD_MUTEX(un))); 6793 mutex_enter(SD_MUTEX(un)); 6794 6795 /* 6796 * Exit if power management is not enabled for this device, or if 6797 * the device is being used by HA. 6798 */ 6799 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6800 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6801 mutex_exit(SD_MUTEX(un)); 6802 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6803 return (DDI_SUCCESS); 6804 } 6805 6806 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6807 un->un_ncmds_in_driver); 6808 6809 /* 6810 * See if the device is not busy, ie.: 6811 * - we have no commands in the driver for this device 6812 * - not waiting for resources 6813 */ 6814 if ((un->un_ncmds_in_driver == 0) && 6815 (un->un_state != SD_STATE_RWAIT)) { 6816 /* 6817 * The device is not busy, so it is OK to go to low power state. 6818 * Indicate low power, but rely on someone else to actually 6819 * change it. 6820 */ 6821 mutex_enter(&un->un_pm_mutex); 6822 un->un_pm_count = -1; 6823 mutex_exit(&un->un_pm_mutex); 6824 un->un_power_level = SD_SPINDLE_OFF; 6825 } 6826 6827 mutex_exit(SD_MUTEX(un)); 6828 6829 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6830 6831 return (DDI_SUCCESS); 6832 } 6833 6834 6835 /* 6836 * Function: sd_ddi_resume 6837 * 6838 * Description: Performs system power-up operations.. 6839 * 6840 * Return Code: DDI_SUCCESS 6841 * DDI_FAILURE 6842 * 6843 * Context: Kernel thread context 6844 */ 6845 6846 static int 6847 sd_ddi_resume(dev_info_t *devi) 6848 { 6849 struct sd_lun *un; 6850 6851 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6852 if (un == NULL) { 6853 return (DDI_FAILURE); 6854 } 6855 6856 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6857 6858 mutex_enter(SD_MUTEX(un)); 6859 Restore_state(un); 6860 6861 /* 6862 * Restore the state which was saved to give the 6863 * the right state in un_last_state 6864 */ 6865 un->un_last_state = un->un_save_state; 6866 /* 6867 * Note: throttle comes back at full. 6868 * Also note: this MUST be done before calling pm_raise_power 6869 * otherwise the system can get hung in biowait. The scenario where 6870 * this'll happen is under cpr suspend. Writing of the system 6871 * state goes through sddump, which writes 0 to un_throttle. If 6872 * writing the system state then fails, example if the partition is 6873 * too small, then cpr attempts a resume. If throttle isn't restored 6874 * from the saved value until after calling pm_raise_power then 6875 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6876 * in biowait. 6877 */ 6878 un->un_throttle = un->un_saved_throttle; 6879 6880 /* 6881 * The chance of failure is very rare as the only command done in power 6882 * entry point is START command when you transition from 0->1 or 6883 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6884 * which suspend was done. Ignore the return value as the resume should 6885 * not be failed. In the case of removable media the media need not be 6886 * inserted and hence there is a chance that raise power will fail with 6887 * media not present. 6888 */ 6889 if (!ISREMOVABLE(un)) { 6890 mutex_exit(SD_MUTEX(un)); 6891 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6892 mutex_enter(SD_MUTEX(un)); 6893 } 6894 6895 /* 6896 * Don't broadcast to the suspend cv and therefore possibly 6897 * start I/O until after power has been restored. 6898 */ 6899 cv_broadcast(&un->un_suspend_cv); 6900 cv_broadcast(&un->un_state_cv); 6901 6902 /* restart thread */ 6903 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6904 scsi_watch_resume(un->un_swr_token); 6905 } 6906 6907 #if (defined(__fibre)) 6908 if (un->un_f_is_fibre == TRUE) { 6909 /* 6910 * Add callbacks for insert and remove events 6911 */ 6912 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6913 sd_init_event_callbacks(un); 6914 } 6915 } 6916 #endif 6917 6918 /* 6919 * Transport any pending commands to the target. 6920 * 6921 * If this is a low-activity device commands in queue will have to wait 6922 * until new commands come in, which may take awhile. Also, we 6923 * specifically don't check un_ncmds_in_transport because we know that 6924 * there really are no commands in progress after the unit was 6925 * suspended and we could have reached the throttle level, been 6926 * suspended, and have no new commands coming in for awhile. Highly 6927 * unlikely, but so is the low-activity disk scenario. 6928 */ 6929 ddi_xbuf_dispatch(un->un_xbuf_attr); 6930 6931 sd_start_cmds(un, NULL); 6932 mutex_exit(SD_MUTEX(un)); 6933 6934 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6935 6936 return (DDI_SUCCESS); 6937 } 6938 6939 6940 /* 6941 * Function: sd_ddi_pm_resume 6942 * 6943 * Description: Set the drive state to powered on. 6944 * Someone else is required to actually change the drive 6945 * power level. 6946 * 6947 * Arguments: un - driver soft state (unit) structure 6948 * 6949 * Return Code: DDI_SUCCESS 6950 * 6951 * Context: Kernel thread context 6952 */ 6953 6954 static int 6955 sd_ddi_pm_resume(struct sd_lun *un) 6956 { 6957 ASSERT(un != NULL); 6958 6959 ASSERT(!mutex_owned(SD_MUTEX(un))); 6960 mutex_enter(SD_MUTEX(un)); 6961 un->un_power_level = SD_SPINDLE_ON; 6962 6963 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6964 mutex_enter(&un->un_pm_mutex); 6965 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6966 un->un_pm_count++; 6967 ASSERT(un->un_pm_count == 0); 6968 /* 6969 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6970 * un_suspend_cv is for a system resume, not a power management 6971 * device resume. (4297749) 6972 * cv_broadcast(&un->un_suspend_cv); 6973 */ 6974 } 6975 mutex_exit(&un->un_pm_mutex); 6976 mutex_exit(SD_MUTEX(un)); 6977 6978 return (DDI_SUCCESS); 6979 } 6980 6981 6982 /* 6983 * Function: sd_pm_idletimeout_handler 6984 * 6985 * Description: A timer routine that's active only while a device is busy. 6986 * The purpose is to extend slightly the pm framework's busy 6987 * view of the device to prevent busy/idle thrashing for 6988 * back-to-back commands. Do this by comparing the current time 6989 * to the time at which the last command completed and when the 6990 * difference is greater than sd_pm_idletime, call 6991 * pm_idle_component. In addition to indicating idle to the pm 6992 * framework, update the chain type to again use the internal pm 6993 * layers of the driver. 6994 * 6995 * Arguments: arg - driver soft state (unit) structure 6996 * 6997 * Context: Executes in a timeout(9F) thread context 6998 */ 6999 7000 static void 7001 sd_pm_idletimeout_handler(void *arg) 7002 { 7003 struct sd_lun *un = arg; 7004 7005 time_t now; 7006 7007 mutex_enter(&sd_detach_mutex); 7008 if (un->un_detach_count != 0) { 7009 /* Abort if the instance is detaching */ 7010 mutex_exit(&sd_detach_mutex); 7011 return; 7012 } 7013 mutex_exit(&sd_detach_mutex); 7014 7015 now = ddi_get_time(); 7016 /* 7017 * Grab both mutexes, in the proper order, since we're accessing 7018 * both PM and softstate variables. 7019 */ 7020 mutex_enter(SD_MUTEX(un)); 7021 mutex_enter(&un->un_pm_mutex); 7022 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 7023 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 7024 /* 7025 * Update the chain types. 7026 * This takes affect on the next new command received. 7027 */ 7028 if (ISREMOVABLE(un)) { 7029 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7030 } else { 7031 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7032 } 7033 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7034 7035 SD_TRACE(SD_LOG_IO_PM, un, 7036 "sd_pm_idletimeout_handler: idling device\n"); 7037 (void) pm_idle_component(SD_DEVINFO(un), 0); 7038 un->un_pm_idle_timeid = NULL; 7039 } else { 7040 un->un_pm_idle_timeid = 7041 timeout(sd_pm_idletimeout_handler, un, 7042 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 7043 } 7044 mutex_exit(&un->un_pm_mutex); 7045 mutex_exit(SD_MUTEX(un)); 7046 } 7047 7048 7049 /* 7050 * Function: sd_pm_timeout_handler 7051 * 7052 * Description: Callback to tell framework we are idle. 7053 * 7054 * Context: timeout(9f) thread context. 7055 */ 7056 7057 static void 7058 sd_pm_timeout_handler(void *arg) 7059 { 7060 struct sd_lun *un = arg; 7061 7062 (void) pm_idle_component(SD_DEVINFO(un), 0); 7063 mutex_enter(&un->un_pm_mutex); 7064 un->un_pm_timeid = NULL; 7065 mutex_exit(&un->un_pm_mutex); 7066 } 7067 7068 7069 /* 7070 * Function: sdpower 7071 * 7072 * Description: PM entry point. 7073 * 7074 * Return Code: DDI_SUCCESS 7075 * DDI_FAILURE 7076 * 7077 * Context: Kernel thread context 7078 */ 7079 7080 static int 7081 sdpower(dev_info_t *devi, int component, int level) 7082 { 7083 struct sd_lun *un; 7084 int instance; 7085 int rval = DDI_SUCCESS; 7086 uint_t i, log_page_size, maxcycles, ncycles; 7087 uchar_t *log_page_data; 7088 int log_sense_page; 7089 int medium_present; 7090 time_t intvlp; 7091 dev_t dev; 7092 struct pm_trans_data sd_pm_tran_data; 7093 uchar_t save_state; 7094 int sval; 7095 uchar_t state_before_pm; 7096 int got_semaphore_here; 7097 7098 instance = ddi_get_instance(devi); 7099 7100 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 7101 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 7102 component != 0) { 7103 return (DDI_FAILURE); 7104 } 7105 7106 dev = sd_make_device(SD_DEVINFO(un)); 7107 7108 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 7109 7110 /* 7111 * Must synchronize power down with close. 7112 * Attempt to decrement/acquire the open/close semaphore, 7113 * but do NOT wait on it. If it's not greater than zero, 7114 * ie. it can't be decremented without waiting, then 7115 * someone else, either open or close, already has it 7116 * and the try returns 0. Use that knowledge here to determine 7117 * if it's OK to change the device power level. 7118 * Also, only increment it on exit if it was decremented, ie. gotten, 7119 * here. 7120 */ 7121 got_semaphore_here = sema_tryp(&un->un_semoclose); 7122 7123 mutex_enter(SD_MUTEX(un)); 7124 7125 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 7126 un->un_ncmds_in_driver); 7127 7128 /* 7129 * If un_ncmds_in_driver is non-zero it indicates commands are 7130 * already being processed in the driver, or if the semaphore was 7131 * not gotten here it indicates an open or close is being processed. 7132 * At the same time somebody is requesting to go low power which 7133 * can't happen, therefore we need to return failure. 7134 */ 7135 if ((level == SD_SPINDLE_OFF) && 7136 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 7137 mutex_exit(SD_MUTEX(un)); 7138 7139 if (got_semaphore_here != 0) { 7140 sema_v(&un->un_semoclose); 7141 } 7142 SD_TRACE(SD_LOG_IO_PM, un, 7143 "sdpower: exit, device has queued cmds.\n"); 7144 return (DDI_FAILURE); 7145 } 7146 7147 /* 7148 * if it is OFFLINE that means the disk is completely dead 7149 * in our case we have to put the disk in on or off by sending commands 7150 * Of course that will fail anyway so return back here. 7151 * 7152 * Power changes to a device that's OFFLINE or SUSPENDED 7153 * are not allowed. 7154 */ 7155 if ((un->un_state == SD_STATE_OFFLINE) || 7156 (un->un_state == SD_STATE_SUSPENDED)) { 7157 mutex_exit(SD_MUTEX(un)); 7158 7159 if (got_semaphore_here != 0) { 7160 sema_v(&un->un_semoclose); 7161 } 7162 SD_TRACE(SD_LOG_IO_PM, un, 7163 "sdpower: exit, device is off-line.\n"); 7164 return (DDI_FAILURE); 7165 } 7166 7167 /* 7168 * Change the device's state to indicate it's power level 7169 * is being changed. Do this to prevent a power off in the 7170 * middle of commands, which is especially bad on devices 7171 * that are really powered off instead of just spun down. 7172 */ 7173 state_before_pm = un->un_state; 7174 un->un_state = SD_STATE_PM_CHANGING; 7175 7176 mutex_exit(SD_MUTEX(un)); 7177 7178 /* 7179 * Bypass checking the log sense information for removables 7180 * and devices for which the HBA set the pm-capable property. 7181 * If un->un_pm_capable_prop is SD_PM_CAPABLE_UNDEFINED (-1) 7182 * then the HBA did not create the property. 7183 */ 7184 if ((level == SD_SPINDLE_OFF) && (!ISREMOVABLE(un)) && 7185 un->un_pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 7186 /* 7187 * Get the log sense information to understand whether the 7188 * the powercycle counts have gone beyond the threshhold. 7189 */ 7190 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 7191 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 7192 7193 mutex_enter(SD_MUTEX(un)); 7194 log_sense_page = un->un_start_stop_cycle_page; 7195 mutex_exit(SD_MUTEX(un)); 7196 7197 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 7198 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 7199 #ifdef SDDEBUG 7200 if (sd_force_pm_supported) { 7201 /* Force a successful result */ 7202 rval = 0; 7203 } 7204 #endif 7205 if (rval != 0) { 7206 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 7207 "Log Sense Failed\n"); 7208 kmem_free(log_page_data, log_page_size); 7209 /* Cannot support power management on those drives */ 7210 7211 if (got_semaphore_here != 0) { 7212 sema_v(&un->un_semoclose); 7213 } 7214 /* 7215 * On exit put the state back to it's original value 7216 * and broadcast to anyone waiting for the power 7217 * change completion. 7218 */ 7219 mutex_enter(SD_MUTEX(un)); 7220 un->un_state = state_before_pm; 7221 cv_broadcast(&un->un_suspend_cv); 7222 mutex_exit(SD_MUTEX(un)); 7223 SD_TRACE(SD_LOG_IO_PM, un, 7224 "sdpower: exit, Log Sense Failed.\n"); 7225 return (DDI_FAILURE); 7226 } 7227 7228 /* 7229 * From the page data - Convert the essential information to 7230 * pm_trans_data 7231 */ 7232 maxcycles = 7233 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 7234 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 7235 7236 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 7237 7238 ncycles = 7239 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 7240 (log_page_data[0x26] << 8) | log_page_data[0x27]; 7241 7242 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 7243 7244 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 7245 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 7246 log_page_data[8+i]; 7247 } 7248 7249 kmem_free(log_page_data, log_page_size); 7250 7251 /* 7252 * Call pm_trans_check routine to get the Ok from 7253 * the global policy 7254 */ 7255 7256 sd_pm_tran_data.format = DC_SCSI_FORMAT; 7257 sd_pm_tran_data.un.scsi_cycles.flag = 0; 7258 7259 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 7260 #ifdef SDDEBUG 7261 if (sd_force_pm_supported) { 7262 /* Force a successful result */ 7263 rval = 1; 7264 } 7265 #endif 7266 switch (rval) { 7267 case 0: 7268 /* 7269 * Not Ok to Power cycle or error in parameters passed 7270 * Would have given the advised time to consider power 7271 * cycle. Based on the new intvlp parameter we are 7272 * supposed to pretend we are busy so that pm framework 7273 * will never call our power entry point. Because of 7274 * that install a timeout handler and wait for the 7275 * recommended time to elapse so that power management 7276 * can be effective again. 7277 * 7278 * To effect this behavior, call pm_busy_component to 7279 * indicate to the framework this device is busy. 7280 * By not adjusting un_pm_count the rest of PM in 7281 * the driver will function normally, and independant 7282 * of this but because the framework is told the device 7283 * is busy it won't attempt powering down until it gets 7284 * a matching idle. The timeout handler sends this. 7285 * Note: sd_pm_entry can't be called here to do this 7286 * because sdpower may have been called as a result 7287 * of a call to pm_raise_power from within sd_pm_entry. 7288 * 7289 * If a timeout handler is already active then 7290 * don't install another. 7291 */ 7292 mutex_enter(&un->un_pm_mutex); 7293 if (un->un_pm_timeid == NULL) { 7294 un->un_pm_timeid = 7295 timeout(sd_pm_timeout_handler, 7296 un, intvlp * drv_usectohz(1000000)); 7297 mutex_exit(&un->un_pm_mutex); 7298 (void) pm_busy_component(SD_DEVINFO(un), 0); 7299 } else { 7300 mutex_exit(&un->un_pm_mutex); 7301 } 7302 if (got_semaphore_here != 0) { 7303 sema_v(&un->un_semoclose); 7304 } 7305 /* 7306 * On exit put the state back to it's original value 7307 * and broadcast to anyone waiting for the power 7308 * change completion. 7309 */ 7310 mutex_enter(SD_MUTEX(un)); 7311 un->un_state = state_before_pm; 7312 cv_broadcast(&un->un_suspend_cv); 7313 mutex_exit(SD_MUTEX(un)); 7314 7315 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 7316 "trans check Failed, not ok to power cycle.\n"); 7317 return (DDI_FAILURE); 7318 7319 case -1: 7320 if (got_semaphore_here != 0) { 7321 sema_v(&un->un_semoclose); 7322 } 7323 /* 7324 * On exit put the state back to it's original value 7325 * and broadcast to anyone waiting for the power 7326 * change completion. 7327 */ 7328 mutex_enter(SD_MUTEX(un)); 7329 un->un_state = state_before_pm; 7330 cv_broadcast(&un->un_suspend_cv); 7331 mutex_exit(SD_MUTEX(un)); 7332 SD_TRACE(SD_LOG_IO_PM, un, 7333 "sdpower: exit, trans check command Failed.\n"); 7334 return (DDI_FAILURE); 7335 } 7336 } 7337 7338 if (level == SD_SPINDLE_OFF) { 7339 /* 7340 * Save the last state... if the STOP FAILS we need it 7341 * for restoring 7342 */ 7343 mutex_enter(SD_MUTEX(un)); 7344 save_state = un->un_last_state; 7345 /* 7346 * There must not be any cmds. getting processed 7347 * in the driver when we get here. Power to the 7348 * device is potentially going off. 7349 */ 7350 ASSERT(un->un_ncmds_in_driver == 0); 7351 mutex_exit(SD_MUTEX(un)); 7352 7353 /* 7354 * For now suspend the device completely before spindle is 7355 * turned off 7356 */ 7357 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 7358 if (got_semaphore_here != 0) { 7359 sema_v(&un->un_semoclose); 7360 } 7361 /* 7362 * On exit put the state back to it's original value 7363 * and broadcast to anyone waiting for the power 7364 * change completion. 7365 */ 7366 mutex_enter(SD_MUTEX(un)); 7367 un->un_state = state_before_pm; 7368 cv_broadcast(&un->un_suspend_cv); 7369 mutex_exit(SD_MUTEX(un)); 7370 SD_TRACE(SD_LOG_IO_PM, un, 7371 "sdpower: exit, PM suspend Failed.\n"); 7372 return (DDI_FAILURE); 7373 } 7374 } 7375 7376 /* 7377 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7378 * close, or strategy. Dump no long uses this routine, it uses it's 7379 * own code so it can be done in polled mode. 7380 */ 7381 7382 medium_present = TRUE; 7383 7384 /* 7385 * When powering up, issue a TUR in case the device is at unit 7386 * attention. Don't do retries. Bypass the PM layer, otherwise 7387 * a deadlock on un_pm_busy_cv will occur. 7388 */ 7389 if (level == SD_SPINDLE_ON) { 7390 (void) sd_send_scsi_TEST_UNIT_READY(un, 7391 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7392 } 7393 7394 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7395 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7396 7397 sval = sd_send_scsi_START_STOP_UNIT(un, 7398 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 7399 SD_PATH_DIRECT); 7400 /* Command failed, check for media present. */ 7401 if ((sval == ENXIO) && ISREMOVABLE(un)) { 7402 medium_present = FALSE; 7403 } 7404 7405 /* 7406 * The conditions of interest here are: 7407 * if a spindle off with media present fails, 7408 * then restore the state and return an error. 7409 * else if a spindle on fails, 7410 * then return an error (there's no state to restore). 7411 * In all other cases we setup for the new state 7412 * and return success. 7413 */ 7414 switch (level) { 7415 case SD_SPINDLE_OFF: 7416 if ((medium_present == TRUE) && (sval != 0)) { 7417 /* The stop command from above failed */ 7418 rval = DDI_FAILURE; 7419 /* 7420 * The stop command failed, and we have media 7421 * present. Put the level back by calling the 7422 * sd_pm_resume() and set the state back to 7423 * it's previous value. 7424 */ 7425 (void) sd_ddi_pm_resume(un); 7426 mutex_enter(SD_MUTEX(un)); 7427 un->un_last_state = save_state; 7428 mutex_exit(SD_MUTEX(un)); 7429 break; 7430 } 7431 /* 7432 * The stop command from above succeeded. 7433 */ 7434 if (ISREMOVABLE(un)) { 7435 /* 7436 * Terminate watch thread in case of removable media 7437 * devices going into low power state. This is as per 7438 * the requirements of pm framework, otherwise commands 7439 * will be generated for the device (through watch 7440 * thread), even when the device is in low power state. 7441 */ 7442 mutex_enter(SD_MUTEX(un)); 7443 un->un_f_watcht_stopped = FALSE; 7444 if (un->un_swr_token != NULL) { 7445 opaque_t temp_token = un->un_swr_token; 7446 un->un_f_watcht_stopped = TRUE; 7447 un->un_swr_token = NULL; 7448 mutex_exit(SD_MUTEX(un)); 7449 (void) scsi_watch_request_terminate(temp_token, 7450 SCSI_WATCH_TERMINATE_WAIT); 7451 } else { 7452 mutex_exit(SD_MUTEX(un)); 7453 } 7454 } 7455 break; 7456 7457 default: /* The level requested is spindle on... */ 7458 /* 7459 * Legacy behavior: return success on a failed spinup 7460 * if there is no media in the drive. 7461 * Do this by looking at medium_present here. 7462 */ 7463 if ((sval != 0) && medium_present) { 7464 /* The start command from above failed */ 7465 rval = DDI_FAILURE; 7466 break; 7467 } 7468 /* 7469 * The start command from above succeeded 7470 * Resume the devices now that we have 7471 * started the disks 7472 */ 7473 (void) sd_ddi_pm_resume(un); 7474 7475 /* 7476 * Resume the watch thread since it was suspended 7477 * when the device went into low power mode. 7478 */ 7479 if (ISREMOVABLE(un)) { 7480 mutex_enter(SD_MUTEX(un)); 7481 if (un->un_f_watcht_stopped == TRUE) { 7482 opaque_t temp_token; 7483 7484 un->un_f_watcht_stopped = FALSE; 7485 mutex_exit(SD_MUTEX(un)); 7486 temp_token = scsi_watch_request_submit( 7487 SD_SCSI_DEVP(un), 7488 sd_check_media_time, 7489 SENSE_LENGTH, sd_media_watch_cb, 7490 (caddr_t)dev); 7491 mutex_enter(SD_MUTEX(un)); 7492 un->un_swr_token = temp_token; 7493 } 7494 mutex_exit(SD_MUTEX(un)); 7495 } 7496 } 7497 if (got_semaphore_here != 0) { 7498 sema_v(&un->un_semoclose); 7499 } 7500 /* 7501 * On exit put the state back to it's original value 7502 * and broadcast to anyone waiting for the power 7503 * change completion. 7504 */ 7505 mutex_enter(SD_MUTEX(un)); 7506 un->un_state = state_before_pm; 7507 cv_broadcast(&un->un_suspend_cv); 7508 mutex_exit(SD_MUTEX(un)); 7509 7510 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7511 7512 return (rval); 7513 } 7514 7515 7516 7517 /* 7518 * Function: sdattach 7519 * 7520 * Description: Driver's attach(9e) entry point function. 7521 * 7522 * Arguments: devi - opaque device info handle 7523 * cmd - attach type 7524 * 7525 * Return Code: DDI_SUCCESS 7526 * DDI_FAILURE 7527 * 7528 * Context: Kernel thread context 7529 */ 7530 7531 static int 7532 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7533 { 7534 switch (cmd) { 7535 case DDI_ATTACH: 7536 return (sd_unit_attach(devi)); 7537 case DDI_RESUME: 7538 return (sd_ddi_resume(devi)); 7539 default: 7540 break; 7541 } 7542 return (DDI_FAILURE); 7543 } 7544 7545 7546 /* 7547 * Function: sddetach 7548 * 7549 * Description: Driver's detach(9E) entry point function. 7550 * 7551 * Arguments: devi - opaque device info handle 7552 * cmd - detach type 7553 * 7554 * Return Code: DDI_SUCCESS 7555 * DDI_FAILURE 7556 * 7557 * Context: Kernel thread context 7558 */ 7559 7560 static int 7561 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7562 { 7563 switch (cmd) { 7564 case DDI_DETACH: 7565 return (sd_unit_detach(devi)); 7566 case DDI_SUSPEND: 7567 return (sd_ddi_suspend(devi)); 7568 default: 7569 break; 7570 } 7571 return (DDI_FAILURE); 7572 } 7573 7574 7575 /* 7576 * Function: sd_sync_with_callback 7577 * 7578 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7579 * state while the callback routine is active. 7580 * 7581 * Arguments: un: softstate structure for the instance 7582 * 7583 * Context: Kernel thread context 7584 */ 7585 7586 static void 7587 sd_sync_with_callback(struct sd_lun *un) 7588 { 7589 ASSERT(un != NULL); 7590 7591 mutex_enter(SD_MUTEX(un)); 7592 7593 ASSERT(un->un_in_callback >= 0); 7594 7595 while (un->un_in_callback > 0) { 7596 mutex_exit(SD_MUTEX(un)); 7597 delay(2); 7598 mutex_enter(SD_MUTEX(un)); 7599 } 7600 7601 mutex_exit(SD_MUTEX(un)); 7602 } 7603 7604 /* 7605 * Function: sd_unit_attach 7606 * 7607 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7608 * the soft state structure for the device and performs 7609 * all necessary structure and device initializations. 7610 * 7611 * Arguments: devi: the system's dev_info_t for the device. 7612 * 7613 * Return Code: DDI_SUCCESS if attach is successful. 7614 * DDI_FAILURE if any part of the attach fails. 7615 * 7616 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7617 * Kernel thread context only. Can sleep. 7618 */ 7619 7620 static int 7621 sd_unit_attach(dev_info_t *devi) 7622 { 7623 struct scsi_device *devp; 7624 struct sd_lun *un; 7625 char *variantp; 7626 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7627 int instance; 7628 int rval; 7629 uint64_t capacity; 7630 uint_t lbasize; 7631 7632 /* 7633 * Retrieve the target driver's private data area. This was set 7634 * up by the HBA. 7635 */ 7636 devp = ddi_get_driver_private(devi); 7637 7638 /* 7639 * Since we have no idea what state things were left in by the last 7640 * user of the device, set up some 'default' settings, ie. turn 'em 7641 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7642 * Do this before the scsi_probe, which sends an inquiry. 7643 * This is a fix for bug (4430280). 7644 * Of special importance is wide-xfer. The drive could have been left 7645 * in wide transfer mode by the last driver to communicate with it, 7646 * this includes us. If that's the case, and if the following is not 7647 * setup properly or we don't re-negotiate with the drive prior to 7648 * transferring data to/from the drive, it causes bus parity errors, 7649 * data overruns, and unexpected interrupts. This first occurred when 7650 * the fix for bug (4378686) was made. 7651 */ 7652 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7653 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7654 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7655 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7656 7657 /* 7658 * Use scsi_probe() to issue an INQUIRY command to the device. 7659 * This call will allocate and fill in the scsi_inquiry structure 7660 * and point the sd_inq member of the scsi_device structure to it. 7661 * If the attach succeeds, then this memory will not be de-allocated 7662 * (via scsi_unprobe()) until the instance is detached. 7663 */ 7664 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7665 goto probe_failed; 7666 } 7667 7668 /* 7669 * Check the device type as specified in the inquiry data and 7670 * claim it if it is of a type that we support. 7671 */ 7672 switch (devp->sd_inq->inq_dtype) { 7673 case DTYPE_DIRECT: 7674 break; 7675 case DTYPE_RODIRECT: 7676 break; 7677 case DTYPE_OPTICAL: 7678 break; 7679 case DTYPE_NOTPRESENT: 7680 default: 7681 /* Unsupported device type; fail the attach. */ 7682 goto probe_failed; 7683 } 7684 7685 /* 7686 * Allocate the soft state structure for this unit. 7687 * 7688 * We rely upon this memory being set to all zeroes by 7689 * ddi_soft_state_zalloc(). We assume that any member of the 7690 * soft state structure that is not explicitly initialized by 7691 * this routine will have a value of zero. 7692 */ 7693 instance = ddi_get_instance(devp->sd_dev); 7694 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7695 goto probe_failed; 7696 } 7697 7698 /* 7699 * Retrieve a pointer to the newly-allocated soft state. 7700 * 7701 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7702 * was successful, unless something has gone horribly wrong and the 7703 * ddi's soft state internals are corrupt (in which case it is 7704 * probably better to halt here than just fail the attach....) 7705 */ 7706 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7707 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7708 instance); 7709 /*NOTREACHED*/ 7710 } 7711 7712 /* 7713 * Link the back ptr of the driver soft state to the scsi_device 7714 * struct for this lun. 7715 * Save a pointer to the softstate in the driver-private area of 7716 * the scsi_device struct. 7717 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7718 * we first set un->un_sd below. 7719 */ 7720 un->un_sd = devp; 7721 devp->sd_private = (opaque_t)un; 7722 7723 /* 7724 * The following must be after devp is stored in the soft state struct. 7725 */ 7726 #ifdef SDDEBUG 7727 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7728 "%s_unit_attach: un:0x%p instance:%d\n", 7729 ddi_driver_name(devi), un, instance); 7730 #endif 7731 7732 /* 7733 * Set up the device type and node type (for the minor nodes). 7734 * By default we assume that the device can at least support the 7735 * Common Command Set. Call it a CD-ROM if it reports itself 7736 * as a RODIRECT device. 7737 */ 7738 switch (devp->sd_inq->inq_dtype) { 7739 case DTYPE_RODIRECT: 7740 un->un_node_type = DDI_NT_CD_CHAN; 7741 un->un_ctype = CTYPE_CDROM; 7742 break; 7743 case DTYPE_OPTICAL: 7744 un->un_node_type = DDI_NT_BLOCK_CHAN; 7745 un->un_ctype = CTYPE_ROD; 7746 break; 7747 default: 7748 un->un_node_type = DDI_NT_BLOCK_CHAN; 7749 un->un_ctype = CTYPE_CCS; 7750 break; 7751 } 7752 7753 /* 7754 * Try to read the interconnect type from the HBA. 7755 * 7756 * Note: This driver is currently compiled as two binaries, a parallel 7757 * scsi version (sd) and a fibre channel version (ssd). All functional 7758 * differences are determined at compile time. In the future a single 7759 * binary will be provided and the inteconnect type will be used to 7760 * differentiate between fibre and parallel scsi behaviors. At that time 7761 * it will be necessary for all fibre channel HBAs to support this 7762 * property. 7763 * 7764 * set un_f_is_fiber to TRUE ( default fiber ) 7765 */ 7766 un->un_f_is_fibre = TRUE; 7767 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7768 case INTERCONNECT_SSA: 7769 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7770 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7771 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7772 break; 7773 case INTERCONNECT_PARALLEL: 7774 un->un_f_is_fibre = FALSE; 7775 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7776 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7777 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7778 break; 7779 case INTERCONNECT_FIBRE: 7780 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7781 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7782 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7783 break; 7784 case INTERCONNECT_FABRIC: 7785 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7786 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7787 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7788 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7789 break; 7790 default: 7791 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7792 /* 7793 * The HBA does not support the "interconnect-type" property 7794 * (or did not provide a recognized type). 7795 * 7796 * Note: This will be obsoleted when a single fibre channel 7797 * and parallel scsi driver is delivered. In the meantime the 7798 * interconnect type will be set to the platform default.If that 7799 * type is not parallel SCSI, it means that we should be 7800 * assuming "ssd" semantics. However, here this also means that 7801 * the FC HBA is not supporting the "interconnect-type" property 7802 * like we expect it to, so log this occurrence. 7803 */ 7804 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7805 if (!SD_IS_PARALLEL_SCSI(un)) { 7806 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7807 "sd_unit_attach: un:0x%p Assuming " 7808 "INTERCONNECT_FIBRE\n", un); 7809 } else { 7810 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7811 "sd_unit_attach: un:0x%p Assuming " 7812 "INTERCONNECT_PARALLEL\n", un); 7813 un->un_f_is_fibre = FALSE; 7814 } 7815 #else 7816 /* 7817 * Note: This source will be implemented when a single fibre 7818 * channel and parallel scsi driver is delivered. The default 7819 * will be to assume that if a device does not support the 7820 * "interconnect-type" property it is a parallel SCSI HBA and 7821 * we will set the interconnect type for parallel scsi. 7822 */ 7823 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7824 un->un_f_is_fibre = FALSE; 7825 #endif 7826 break; 7827 } 7828 7829 if (un->un_f_is_fibre == TRUE) { 7830 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7831 SCSI_VERSION_3) { 7832 switch (un->un_interconnect_type) { 7833 case SD_INTERCONNECT_FIBRE: 7834 case SD_INTERCONNECT_SSA: 7835 un->un_node_type = DDI_NT_BLOCK_WWN; 7836 break; 7837 default: 7838 break; 7839 } 7840 } 7841 } 7842 7843 /* 7844 * Initialize the Request Sense command for the target 7845 */ 7846 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7847 goto alloc_rqs_failed; 7848 } 7849 7850 /* 7851 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7852 * with seperate binary for sd and ssd. 7853 * 7854 * x86 has 1 binary, un_retry_count is set base on connection type. 7855 * The hardcoded values will go away when Sparc uses 1 binary 7856 * for sd and ssd. This hardcoded values need to match 7857 * SD_RETRY_COUNT in sddef.h 7858 * The value used is base on interconnect type. 7859 * fibre = 3, parallel = 5 7860 */ 7861 #if defined(__i386) || defined(__amd64) 7862 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7863 #else 7864 un->un_retry_count = SD_RETRY_COUNT; 7865 #endif 7866 7867 /* 7868 * Set the per disk retry count to the default number of retries 7869 * for disks and CDROMs. This value can be overridden by the 7870 * disk property list or an entry in sd.conf. 7871 */ 7872 un->un_notready_retry_count = 7873 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7874 : DISK_NOT_READY_RETRY_COUNT(un); 7875 7876 /* 7877 * Set the busy retry count to the default value of un_retry_count. 7878 * This can be overridden by entries in sd.conf or the device 7879 * config table. 7880 */ 7881 un->un_busy_retry_count = un->un_retry_count; 7882 7883 /* 7884 * Init the reset threshold for retries. This number determines 7885 * how many retries must be performed before a reset can be issued 7886 * (for certain error conditions). This can be overridden by entries 7887 * in sd.conf or the device config table. 7888 */ 7889 un->un_reset_retry_count = (un->un_retry_count / 2); 7890 7891 /* 7892 * Set the victim_retry_count to the default un_retry_count 7893 */ 7894 un->un_victim_retry_count = (2 * un->un_retry_count); 7895 7896 /* 7897 * Set the reservation release timeout to the default value of 7898 * 5 seconds. This can be overridden by entries in ssd.conf or the 7899 * device config table. 7900 */ 7901 un->un_reserve_release_time = 5; 7902 7903 /* 7904 * Set up the default maximum transfer size. Note that this may 7905 * get updated later in the attach, when setting up default wide 7906 * operations for disks. 7907 */ 7908 #if defined(__i386) || defined(__amd64) 7909 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7910 #else 7911 un->un_max_xfer_size = (uint_t)maxphys; 7912 #endif 7913 7914 /* 7915 * Get "allow bus device reset" property (defaults to "enabled" if 7916 * the property was not defined). This is to disable bus resets for 7917 * certain kinds of error recovery. Note: In the future when a run-time 7918 * fibre check is available the soft state flag should default to 7919 * enabled. 7920 */ 7921 if (un->un_f_is_fibre == TRUE) { 7922 un->un_f_allow_bus_device_reset = TRUE; 7923 } else { 7924 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7925 "allow-bus-device-reset", 1) != 0) { 7926 un->un_f_allow_bus_device_reset = TRUE; 7927 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7928 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 7929 un); 7930 } else { 7931 un->un_f_allow_bus_device_reset = FALSE; 7932 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7933 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 7934 un); 7935 } 7936 } 7937 7938 /* 7939 * Check if this is an ATAPI device. ATAPI devices use Group 1 7940 * Read/Write commands and Group 2 Mode Sense/Select commands. 7941 * 7942 * Note: The "obsolete" way of doing this is to check for the "atapi" 7943 * property. The new "variant" property with a value of "atapi" has been 7944 * introduced so that future 'variants' of standard SCSI behavior (like 7945 * atapi) could be specified by the underlying HBA drivers by supplying 7946 * a new value for the "variant" property, instead of having to define a 7947 * new property. 7948 */ 7949 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7950 un->un_f_cfg_is_atapi = TRUE; 7951 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7952 "sd_unit_attach: un:0x%p Atapi device\n", un); 7953 } 7954 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7955 &variantp) == DDI_PROP_SUCCESS) { 7956 if (strcmp(variantp, "atapi") == 0) { 7957 un->un_f_cfg_is_atapi = TRUE; 7958 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7959 "sd_unit_attach: un:0x%p Atapi device\n", un); 7960 } 7961 ddi_prop_free(variantp); 7962 } 7963 7964 /* 7965 * Assume doorlock commands are supported. If not, the first 7966 * call to sd_send_scsi_DOORLOCK() will set to FALSE 7967 */ 7968 un->un_f_doorlock_supported = TRUE; 7969 7970 un->un_cmd_timeout = SD_IO_TIME; 7971 7972 /* Info on current states, statuses, etc. (Updated frequently) */ 7973 un->un_state = SD_STATE_NORMAL; 7974 un->un_last_state = SD_STATE_NORMAL; 7975 7976 /* Control & status info for command throttling */ 7977 un->un_throttle = sd_max_throttle; 7978 un->un_saved_throttle = sd_max_throttle; 7979 un->un_min_throttle = sd_min_throttle; 7980 7981 if (un->un_f_is_fibre == TRUE) { 7982 un->un_f_use_adaptive_throttle = TRUE; 7983 } else { 7984 un->un_f_use_adaptive_throttle = FALSE; 7985 } 7986 7987 /* Removable media support. */ 7988 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7989 un->un_mediastate = DKIO_NONE; 7990 un->un_specified_mediastate = DKIO_NONE; 7991 7992 /* CVs for suspend/resume (PM or DR) */ 7993 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7994 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7995 7996 /* Power management support. */ 7997 un->un_power_level = SD_SPINDLE_UNINIT; 7998 7999 /* 8000 * The open/close semaphore is used to serialize threads executing 8001 * in the driver's open & close entry point routines for a given 8002 * instance. 8003 */ 8004 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 8005 8006 /* 8007 * The conf file entry and softstate variable is a forceful override, 8008 * meaning a non-zero value must be entered to change the default. 8009 */ 8010 un->un_f_disksort_disabled = FALSE; 8011 8012 /* 8013 * Retrieve the properties from the static driver table or the driver 8014 * configuration file (.conf) for this unit and update the soft state 8015 * for the device as needed for the indicated properties. 8016 * Note: the property configuration needs to occur here as some of the 8017 * following routines may have dependancies on soft state flags set 8018 * as part of the driver property configuration. 8019 */ 8020 sd_read_unit_properties(un); 8021 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8022 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 8023 8024 /* 8025 * By default, we mark the capacity, lbazize, and geometry 8026 * as invalid. Only if we successfully read a valid capacity 8027 * will we update the un_blockcount and un_tgt_blocksize with the 8028 * valid values (the geometry will be validated later). 8029 */ 8030 un->un_f_blockcount_is_valid = FALSE; 8031 un->un_f_tgt_blocksize_is_valid = FALSE; 8032 un->un_f_geometry_is_valid = FALSE; 8033 8034 /* 8035 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 8036 * otherwise. 8037 */ 8038 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 8039 un->un_blockcount = 0; 8040 8041 /* 8042 * Set up the per-instance info needed to determine the correct 8043 * CDBs and other info for issuing commands to the target. 8044 */ 8045 sd_init_cdb_limits(un); 8046 8047 /* 8048 * Set up the IO chains to use, based upon the target type. 8049 */ 8050 if (ISREMOVABLE(un)) { 8051 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 8052 } else { 8053 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 8054 } 8055 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 8056 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 8057 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 8058 8059 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 8060 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 8061 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 8062 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 8063 8064 8065 if (ISCD(un)) { 8066 un->un_additional_codes = sd_additional_codes; 8067 } else { 8068 un->un_additional_codes = NULL; 8069 } 8070 8071 /* 8072 * Create the kstats here so they can be available for attach-time 8073 * routines that send commands to the unit (either polled or via 8074 * sd_send_scsi_cmd). 8075 * 8076 * Note: This is a critical sequence that needs to be maintained: 8077 * 1) Instantiate the kstats here, before any routines using the 8078 * iopath (i.e. sd_send_scsi_cmd). 8079 * 2) Initialize the error stats (sd_set_errstats) and partition 8080 * stats (sd_set_pstats), following sd_validate_geometry(), 8081 * sd_register_devid(), and sd_disable_caching(). 8082 */ 8083 8084 un->un_stats = kstat_create(sd_label, instance, 8085 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 8086 if (un->un_stats != NULL) { 8087 un->un_stats->ks_lock = SD_MUTEX(un); 8088 kstat_install(un->un_stats); 8089 } 8090 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8091 "sd_unit_attach: un:0x%p un_stats created\n", un); 8092 8093 sd_create_errstats(un, instance); 8094 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8095 "sd_unit_attach: un:0x%p errstats created\n", un); 8096 8097 /* 8098 * The following if/else code was relocated here from below as part 8099 * of the fix for bug (4430280). However with the default setup added 8100 * on entry to this routine, it's no longer absolutely necessary for 8101 * this to be before the call to sd_spin_up_unit. 8102 */ 8103 if (SD_IS_PARALLEL_SCSI(un)) { 8104 /* 8105 * If SCSI-2 tagged queueing is supported by the target 8106 * and by the host adapter then we will enable it. 8107 */ 8108 un->un_tagflags = 0; 8109 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8110 (devp->sd_inq->inq_cmdque) && 8111 (un->un_f_arq_enabled == TRUE)) { 8112 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 8113 1, 1) == 1) { 8114 un->un_tagflags = FLAG_STAG; 8115 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8116 "sd_unit_attach: un:0x%p tag queueing " 8117 "enabled\n", un); 8118 } else if (scsi_ifgetcap(SD_ADDRESS(un), 8119 "untagged-qing", 0) == 1) { 8120 un->un_f_opt_queueing = TRUE; 8121 un->un_saved_throttle = un->un_throttle = 8122 min(un->un_throttle, 3); 8123 } else { 8124 un->un_f_opt_queueing = FALSE; 8125 un->un_saved_throttle = un->un_throttle = 1; 8126 } 8127 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 8128 == 1) && (un->un_f_arq_enabled == TRUE)) { 8129 /* The Host Adapter supports internal queueing. */ 8130 un->un_f_opt_queueing = TRUE; 8131 un->un_saved_throttle = un->un_throttle = 8132 min(un->un_throttle, 3); 8133 } else { 8134 un->un_f_opt_queueing = FALSE; 8135 un->un_saved_throttle = un->un_throttle = 1; 8136 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8137 "sd_unit_attach: un:0x%p no tag queueing\n", un); 8138 } 8139 8140 8141 /* Setup or tear down default wide operations for disks */ 8142 8143 /* 8144 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 8145 * and "ssd_max_xfer_size" to exist simultaneously on the same 8146 * system and be set to different values. In the future this 8147 * code may need to be updated when the ssd module is 8148 * obsoleted and removed from the system. (4299588) 8149 */ 8150 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8151 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 8152 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8153 1, 1) == 1) { 8154 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8155 "sd_unit_attach: un:0x%p Wide Transfer " 8156 "enabled\n", un); 8157 } 8158 8159 /* 8160 * If tagged queuing has also been enabled, then 8161 * enable large xfers 8162 */ 8163 if (un->un_saved_throttle == sd_max_throttle) { 8164 un->un_max_xfer_size = 8165 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8166 sd_max_xfer_size, SD_MAX_XFER_SIZE); 8167 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8168 "sd_unit_attach: un:0x%p max transfer " 8169 "size=0x%x\n", un, un->un_max_xfer_size); 8170 } 8171 } else { 8172 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8173 0, 1) == 1) { 8174 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8175 "sd_unit_attach: un:0x%p " 8176 "Wide Transfer disabled\n", un); 8177 } 8178 } 8179 } else { 8180 un->un_tagflags = FLAG_STAG; 8181 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 8182 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 8183 } 8184 8185 /* 8186 * If this target supports LUN reset, try to enable it. 8187 */ 8188 if (un->un_f_lun_reset_enabled) { 8189 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 8190 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8191 "un:0x%p lun_reset capability set\n", un); 8192 } else { 8193 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8194 "un:0x%p lun-reset capability not set\n", un); 8195 } 8196 } 8197 8198 /* 8199 * At this point in the attach, we have enough info in the 8200 * soft state to be able to issue commands to the target. 8201 * 8202 * All command paths used below MUST issue their commands as 8203 * SD_PATH_DIRECT. This is important as intermediate layers 8204 * are not all initialized yet (such as PM). 8205 */ 8206 8207 /* 8208 * Send a TEST UNIT READY command to the device. This should clear 8209 * any outstanding UNIT ATTENTION that may be present. 8210 * 8211 * Note: Don't check for success, just track if there is a reservation, 8212 * this is a throw away command to clear any unit attentions. 8213 * 8214 * Note: This MUST be the first command issued to the target during 8215 * attach to ensure power on UNIT ATTENTIONS are cleared. 8216 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8217 * with attempts at spinning up a device with no media. 8218 */ 8219 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 8220 reservation_flag = SD_TARGET_IS_RESERVED; 8221 } 8222 8223 /* 8224 * If the device is NOT a removable media device, attempt to spin 8225 * it up (using the START_STOP_UNIT command) and read its capacity 8226 * (using the READ CAPACITY command). Note, however, that either 8227 * of these could fail and in some cases we would continue with 8228 * the attach despite the failure (see below). 8229 */ 8230 if (devp->sd_inq->inq_dtype == DTYPE_DIRECT && !ISREMOVABLE(un)) { 8231 switch (sd_spin_up_unit(un)) { 8232 case 0: 8233 /* 8234 * Spin-up was successful; now try to read the 8235 * capacity. If successful then save the results 8236 * and mark the capacity & lbasize as valid. 8237 */ 8238 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8239 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8240 8241 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 8242 &lbasize, SD_PATH_DIRECT)) { 8243 case 0: { 8244 if (capacity > DK_MAX_BLOCKS) { 8245 #ifdef _LP64 8246 /* 8247 * Enable descriptor format sense data 8248 * so that we can get 64 bit sense 8249 * data fields. 8250 */ 8251 sd_enable_descr_sense(un); 8252 #else 8253 /* 32-bit kernels can't handle this */ 8254 scsi_log(SD_DEVINFO(un), 8255 sd_label, CE_WARN, 8256 "disk has %llu blocks, which " 8257 "is too large for a 32-bit " 8258 "kernel", capacity); 8259 goto spinup_failed; 8260 #endif 8261 } 8262 /* 8263 * The following relies on 8264 * sd_send_scsi_READ_CAPACITY never 8265 * returning 0 for capacity and/or lbasize. 8266 */ 8267 sd_update_block_info(un, lbasize, capacity); 8268 8269 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8270 "sd_unit_attach: un:0x%p capacity = %ld " 8271 "blocks; lbasize= %ld.\n", un, 8272 un->un_blockcount, un->un_tgt_blocksize); 8273 8274 break; 8275 } 8276 case EACCES: 8277 /* 8278 * Should never get here if the spin-up 8279 * succeeded, but code it in anyway. 8280 * From here, just continue with the attach... 8281 */ 8282 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8283 "sd_unit_attach: un:0x%p " 8284 "sd_send_scsi_READ_CAPACITY " 8285 "returned reservation conflict\n", un); 8286 reservation_flag = SD_TARGET_IS_RESERVED; 8287 break; 8288 default: 8289 /* 8290 * Likewise, should never get here if the 8291 * spin-up succeeded. Just continue with 8292 * the attach... 8293 */ 8294 break; 8295 } 8296 break; 8297 case EACCES: 8298 /* 8299 * Device is reserved by another host. In this case 8300 * we could not spin it up or read the capacity, but 8301 * we continue with the attach anyway. 8302 */ 8303 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8304 "sd_unit_attach: un:0x%p spin-up reservation " 8305 "conflict.\n", un); 8306 reservation_flag = SD_TARGET_IS_RESERVED; 8307 break; 8308 default: 8309 /* Fail the attach if the spin-up failed. */ 8310 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8311 "sd_unit_attach: un:0x%p spin-up failed.", un); 8312 goto spinup_failed; 8313 } 8314 } 8315 8316 /* 8317 * Check to see if this is a MMC drive 8318 */ 8319 if (ISCD(un)) { 8320 sd_set_mmc_caps(un); 8321 } 8322 8323 /* 8324 * Create the minor nodes for the device. 8325 * Note: If we want to support fdisk on both sparc and intel, this will 8326 * have to separate out the notion that VTOC8 is always sparc, and 8327 * VTOC16 is always intel (tho these can be the defaults). The vtoc 8328 * type will have to be determined at run-time, and the fdisk 8329 * partitioning will have to have been read & set up before we 8330 * create the minor nodes. (any other inits (such as kstats) that 8331 * also ought to be done before creating the minor nodes?) (Doesn't 8332 * setting up the minor nodes kind of imply that we're ready to 8333 * handle an open from userland?) 8334 */ 8335 if (sd_create_minor_nodes(un, devi) != DDI_SUCCESS) { 8336 goto create_minor_nodes_failed; 8337 } 8338 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8339 "sd_unit_attach: un:0x%p minor nodes created\n", un); 8340 8341 /* 8342 * Add a zero-length attribute to tell the world we support 8343 * kernel ioctls (for layered drivers) 8344 */ 8345 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8346 DDI_KERNEL_IOCTL, NULL, 0); 8347 8348 /* 8349 * Add a boolean property to tell the world we support 8350 * the B_FAILFAST flag (for layered drivers) 8351 */ 8352 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8353 "ddi-failfast-supported", NULL, 0); 8354 8355 /* 8356 * Initialize power management 8357 */ 8358 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8359 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8360 sd_setup_pm(un, devi); 8361 if (un->un_f_pm_is_enabled == FALSE) { 8362 /* 8363 * For performance, point to a jump table that does 8364 * not include pm. 8365 * The direct and priority chains don't change with PM. 8366 * 8367 * Note: this is currently done based on individual device 8368 * capabilities. When an interface for determining system 8369 * power enabled state becomes available, or when additional 8370 * layers are added to the command chain, these values will 8371 * have to be re-evaluated for correctness. 8372 */ 8373 if (ISREMOVABLE(un)) { 8374 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8375 } else { 8376 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8377 } 8378 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8379 } 8380 8381 /* 8382 * This property is set to 0 by HA software to avoid retries 8383 * on a reserved disk. (The preferred property name is 8384 * "retry-on-reservation-conflict") (1189689) 8385 * 8386 * Note: The use of a global here can have unintended consequences. A 8387 * per instance variable is preferrable to match the capabilities of 8388 * different underlying hba's (4402600) 8389 */ 8390 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8391 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8392 sd_retry_on_reservation_conflict); 8393 if (sd_retry_on_reservation_conflict != 0) { 8394 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8395 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8396 sd_retry_on_reservation_conflict); 8397 } 8398 8399 /* Set up options for QFULL handling. */ 8400 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8401 "qfull-retries", -1)) != -1) { 8402 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8403 rval, 1); 8404 } 8405 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8406 "qfull-retry-interval", -1)) != -1) { 8407 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8408 rval, 1); 8409 } 8410 8411 /* 8412 * This just prints a message that announces the existence of the 8413 * device. The message is always printed in the system logfile, but 8414 * only appears on the console if the system is booted with the 8415 * -v (verbose) argument. 8416 */ 8417 ddi_report_dev(devi); 8418 8419 /* 8420 * The framework calls driver attach routines single-threaded 8421 * for a given instance. However we still acquire SD_MUTEX here 8422 * because this required for calling the sd_validate_geometry() 8423 * and sd_register_devid() functions. 8424 */ 8425 mutex_enter(SD_MUTEX(un)); 8426 un->un_f_geometry_is_valid = FALSE; 8427 un->un_mediastate = DKIO_NONE; 8428 un->un_reserved = -1; 8429 if (!ISREMOVABLE(un)) { 8430 /* 8431 * Read and validate the device's geometry (ie, disk label) 8432 * A new unformatted drive will not have a valid geometry, but 8433 * the driver needs to successfully attach to this device so 8434 * the drive can be formatted via ioctls. 8435 */ 8436 if (((sd_validate_geometry(un, SD_PATH_DIRECT) == 8437 ENOTSUP)) && 8438 (un->un_blockcount < DK_MAX_BLOCKS)) { 8439 /* 8440 * We found a small disk with an EFI label on it; 8441 * we need to fix up the minor nodes accordingly. 8442 */ 8443 ddi_remove_minor_node(devi, "h"); 8444 ddi_remove_minor_node(devi, "h,raw"); 8445 (void) ddi_create_minor_node(devi, "wd", 8446 S_IFBLK, 8447 (instance << SDUNIT_SHIFT) | WD_NODE, 8448 un->un_node_type, NULL); 8449 (void) ddi_create_minor_node(devi, "wd,raw", 8450 S_IFCHR, 8451 (instance << SDUNIT_SHIFT) | WD_NODE, 8452 un->un_node_type, NULL); 8453 } 8454 } 8455 8456 /* 8457 * Read and initialize the devid for the unit. 8458 */ 8459 ASSERT(un->un_errstats != NULL); 8460 if (!ISREMOVABLE(un)) { 8461 sd_register_devid(un, devi, reservation_flag); 8462 } 8463 mutex_exit(SD_MUTEX(un)); 8464 8465 #if (defined(__fibre)) 8466 /* 8467 * Register callbacks for fibre only. You can't do this soley 8468 * on the basis of the devid_type because this is hba specific. 8469 * We need to query our hba capabilities to find out whether to 8470 * register or not. 8471 */ 8472 if (un->un_f_is_fibre) { 8473 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8474 sd_init_event_callbacks(un); 8475 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8476 "sd_unit_attach: un:0x%p event callbacks inserted", un); 8477 } 8478 } 8479 #endif 8480 8481 if (un->un_f_opt_disable_cache == TRUE) { 8482 if (sd_disable_caching(un) != 0) { 8483 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8484 "sd_unit_attach: un:0x%p Could not disable " 8485 "caching", un); 8486 goto devid_failed; 8487 } 8488 } 8489 8490 /* 8491 * Set the pstat and error stat values here, so data obtained during the 8492 * previous attach-time routines is available. 8493 * 8494 * Note: This is a critical sequence that needs to be maintained: 8495 * 1) Instantiate the kstats before any routines using the iopath 8496 * (i.e. sd_send_scsi_cmd). 8497 * 2) Initialize the error stats (sd_set_errstats) and partition 8498 * stats (sd_set_pstats)here, following sd_validate_geometry(), 8499 * sd_register_devid(), and sd_disable_caching(). 8500 */ 8501 if (!ISREMOVABLE(un) && (un->un_f_pkstats_enabled == TRUE)) { 8502 sd_set_pstats(un); 8503 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8504 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8505 } 8506 8507 sd_set_errstats(un); 8508 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8509 "sd_unit_attach: un:0x%p errstats set\n", un); 8510 8511 /* 8512 * Find out what type of reservation this disk supports. 8513 */ 8514 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 8515 case 0: 8516 /* 8517 * SCSI-3 reservations are supported. 8518 */ 8519 un->un_reservation_type = SD_SCSI3_RESERVATION; 8520 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8521 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8522 break; 8523 case ENOTSUP: 8524 /* 8525 * The PERSISTENT RESERVE IN command would not be recognized by 8526 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8527 */ 8528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8529 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8530 un->un_reservation_type = SD_SCSI2_RESERVATION; 8531 break; 8532 default: 8533 /* 8534 * default to SCSI-3 reservations 8535 */ 8536 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8537 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8538 un->un_reservation_type = SD_SCSI3_RESERVATION; 8539 break; 8540 } 8541 8542 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8543 "sd_unit_attach: un:0x%p exit success\n", un); 8544 8545 return (DDI_SUCCESS); 8546 8547 /* 8548 * An error occurred during the attach; clean up & return failure. 8549 */ 8550 8551 devid_failed: 8552 8553 setup_pm_failed: 8554 ddi_remove_minor_node(devi, NULL); 8555 8556 create_minor_nodes_failed: 8557 /* 8558 * Cleanup from the scsi_ifsetcap() calls (437868) 8559 */ 8560 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8561 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8562 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8563 8564 if (un->un_f_is_fibre == FALSE) { 8565 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8566 } 8567 8568 spinup_failed: 8569 8570 mutex_enter(SD_MUTEX(un)); 8571 8572 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8573 if (un->un_direct_priority_timeid != NULL) { 8574 timeout_id_t temp_id = un->un_direct_priority_timeid; 8575 un->un_direct_priority_timeid = NULL; 8576 mutex_exit(SD_MUTEX(un)); 8577 (void) untimeout(temp_id); 8578 mutex_enter(SD_MUTEX(un)); 8579 } 8580 8581 /* Cancel any pending start/stop timeouts */ 8582 if (un->un_startstop_timeid != NULL) { 8583 timeout_id_t temp_id = un->un_startstop_timeid; 8584 un->un_startstop_timeid = NULL; 8585 mutex_exit(SD_MUTEX(un)); 8586 (void) untimeout(temp_id); 8587 mutex_enter(SD_MUTEX(un)); 8588 } 8589 8590 /* Cancel any pending reset-throttle timeouts */ 8591 if (un->un_reset_throttle_timeid != NULL) { 8592 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8593 un->un_reset_throttle_timeid = NULL; 8594 mutex_exit(SD_MUTEX(un)); 8595 (void) untimeout(temp_id); 8596 mutex_enter(SD_MUTEX(un)); 8597 } 8598 8599 /* Cancel any pending retry timeouts */ 8600 if (un->un_retry_timeid != NULL) { 8601 timeout_id_t temp_id = un->un_retry_timeid; 8602 un->un_retry_timeid = NULL; 8603 mutex_exit(SD_MUTEX(un)); 8604 (void) untimeout(temp_id); 8605 mutex_enter(SD_MUTEX(un)); 8606 } 8607 8608 /* Cancel any pending delayed cv broadcast timeouts */ 8609 if (un->un_dcvb_timeid != NULL) { 8610 timeout_id_t temp_id = un->un_dcvb_timeid; 8611 un->un_dcvb_timeid = NULL; 8612 mutex_exit(SD_MUTEX(un)); 8613 (void) untimeout(temp_id); 8614 mutex_enter(SD_MUTEX(un)); 8615 } 8616 8617 mutex_exit(SD_MUTEX(un)); 8618 8619 /* There should not be any in-progress I/O so ASSERT this check */ 8620 ASSERT(un->un_ncmds_in_transport == 0); 8621 ASSERT(un->un_ncmds_in_driver == 0); 8622 8623 /* Do not free the softstate if the callback routine is active */ 8624 sd_sync_with_callback(un); 8625 8626 /* 8627 * Partition stats apparently are not used with removables. These would 8628 * not have been created during attach, so no need to clean them up... 8629 */ 8630 if (un->un_stats != NULL) { 8631 kstat_delete(un->un_stats); 8632 un->un_stats = NULL; 8633 } 8634 if (un->un_errstats != NULL) { 8635 kstat_delete(un->un_errstats); 8636 un->un_errstats = NULL; 8637 } 8638 8639 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8640 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8641 8642 ddi_prop_remove_all(devi); 8643 sema_destroy(&un->un_semoclose); 8644 cv_destroy(&un->un_state_cv); 8645 8646 getrbuf_failed: 8647 8648 sd_free_rqs(un); 8649 8650 alloc_rqs_failed: 8651 8652 devp->sd_private = NULL; 8653 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8654 8655 get_softstate_failed: 8656 /* 8657 * Note: the man pages are unclear as to whether or not doing a 8658 * ddi_soft_state_free(sd_state, instance) is the right way to 8659 * clean up after the ddi_soft_state_zalloc() if the subsequent 8660 * ddi_get_soft_state() fails. The implication seems to be 8661 * that the get_soft_state cannot fail if the zalloc succeeds. 8662 */ 8663 ddi_soft_state_free(sd_state, instance); 8664 8665 probe_failed: 8666 scsi_unprobe(devp); 8667 #ifdef SDDEBUG 8668 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 8669 (sd_level_mask & SD_LOGMASK_TRACE)) { 8670 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 8671 (void *)un); 8672 } 8673 #endif 8674 return (DDI_FAILURE); 8675 } 8676 8677 8678 /* 8679 * Function: sd_unit_detach 8680 * 8681 * Description: Performs DDI_DETACH processing for sddetach(). 8682 * 8683 * Return Code: DDI_SUCCESS 8684 * DDI_FAILURE 8685 * 8686 * Context: Kernel thread context 8687 */ 8688 8689 static int 8690 sd_unit_detach(dev_info_t *devi) 8691 { 8692 struct scsi_device *devp; 8693 struct sd_lun *un; 8694 int i; 8695 dev_t dev; 8696 #if !(defined(__i386) || defined(__amd64)) && !defined(__fibre) 8697 int reset_retval; 8698 #endif 8699 int instance = ddi_get_instance(devi); 8700 8701 mutex_enter(&sd_detach_mutex); 8702 8703 /* 8704 * Fail the detach for any of the following: 8705 * - Unable to get the sd_lun struct for the instance 8706 * - A layered driver has an outstanding open on the instance 8707 * - Another thread is already detaching this instance 8708 * - Another thread is currently performing an open 8709 */ 8710 devp = ddi_get_driver_private(devi); 8711 if ((devp == NULL) || 8712 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8713 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8714 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8715 mutex_exit(&sd_detach_mutex); 8716 return (DDI_FAILURE); 8717 } 8718 8719 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8720 8721 /* 8722 * Mark this instance as currently in a detach, to inhibit any 8723 * opens from a layered driver. 8724 */ 8725 un->un_detach_count++; 8726 mutex_exit(&sd_detach_mutex); 8727 8728 dev = sd_make_device(SD_DEVINFO(un)); 8729 8730 _NOTE(COMPETING_THREADS_NOW); 8731 8732 mutex_enter(SD_MUTEX(un)); 8733 8734 /* 8735 * Fail the detach if there are any outstanding layered 8736 * opens on this device. 8737 */ 8738 for (i = 0; i < NDKMAP; i++) { 8739 if (un->un_ocmap.lyropen[i] != 0) { 8740 goto err_notclosed; 8741 } 8742 } 8743 8744 /* 8745 * Verify there are NO outstanding commands issued to this device. 8746 * ie, un_ncmds_in_transport == 0. 8747 * It's possible to have outstanding commands through the physio 8748 * code path, even though everything's closed. 8749 */ 8750 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8751 (un->un_direct_priority_timeid != NULL) || 8752 (un->un_state == SD_STATE_RWAIT)) { 8753 mutex_exit(SD_MUTEX(un)); 8754 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8755 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8756 goto err_stillbusy; 8757 } 8758 8759 /* 8760 * If we have the device reserved, release the reservation. 8761 */ 8762 if ((un->un_resvd_status & SD_RESERVE) && 8763 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8764 mutex_exit(SD_MUTEX(un)); 8765 /* 8766 * Note: sd_reserve_release sends a command to the device 8767 * via the sd_ioctlcmd() path, and can sleep. 8768 */ 8769 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8770 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8771 "sd_dr_detach: Cannot release reservation \n"); 8772 } 8773 } else { 8774 mutex_exit(SD_MUTEX(un)); 8775 } 8776 8777 /* 8778 * Untimeout any reserve recover, throttle reset, restart unit 8779 * and delayed broadcast timeout threads. Protect the timeout pointer 8780 * from getting nulled by their callback functions. 8781 */ 8782 mutex_enter(SD_MUTEX(un)); 8783 if (un->un_resvd_timeid != NULL) { 8784 timeout_id_t temp_id = un->un_resvd_timeid; 8785 un->un_resvd_timeid = NULL; 8786 mutex_exit(SD_MUTEX(un)); 8787 (void) untimeout(temp_id); 8788 mutex_enter(SD_MUTEX(un)); 8789 } 8790 8791 if (un->un_reset_throttle_timeid != NULL) { 8792 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8793 un->un_reset_throttle_timeid = NULL; 8794 mutex_exit(SD_MUTEX(un)); 8795 (void) untimeout(temp_id); 8796 mutex_enter(SD_MUTEX(un)); 8797 } 8798 8799 if (un->un_startstop_timeid != NULL) { 8800 timeout_id_t temp_id = un->un_startstop_timeid; 8801 un->un_startstop_timeid = NULL; 8802 mutex_exit(SD_MUTEX(un)); 8803 (void) untimeout(temp_id); 8804 mutex_enter(SD_MUTEX(un)); 8805 } 8806 8807 if (un->un_dcvb_timeid != NULL) { 8808 timeout_id_t temp_id = un->un_dcvb_timeid; 8809 un->un_dcvb_timeid = NULL; 8810 mutex_exit(SD_MUTEX(un)); 8811 (void) untimeout(temp_id); 8812 } else { 8813 mutex_exit(SD_MUTEX(un)); 8814 } 8815 8816 /* Remove any pending reservation reclaim requests for this device */ 8817 sd_rmv_resv_reclaim_req(dev); 8818 8819 mutex_enter(SD_MUTEX(un)); 8820 8821 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8822 if (un->un_direct_priority_timeid != NULL) { 8823 timeout_id_t temp_id = un->un_direct_priority_timeid; 8824 un->un_direct_priority_timeid = NULL; 8825 mutex_exit(SD_MUTEX(un)); 8826 (void) untimeout(temp_id); 8827 mutex_enter(SD_MUTEX(un)); 8828 } 8829 8830 /* Cancel any active multi-host disk watch thread requests */ 8831 if (un->un_mhd_token != NULL) { 8832 mutex_exit(SD_MUTEX(un)); 8833 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8834 if (scsi_watch_request_terminate(un->un_mhd_token, 8835 SCSI_WATCH_TERMINATE_NOWAIT)) { 8836 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8837 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8838 /* 8839 * Note: We are returning here after having removed 8840 * some driver timeouts above. This is consistent with 8841 * the legacy implementation but perhaps the watch 8842 * terminate call should be made with the wait flag set. 8843 */ 8844 goto err_stillbusy; 8845 } 8846 mutex_enter(SD_MUTEX(un)); 8847 un->un_mhd_token = NULL; 8848 } 8849 8850 if (un->un_swr_token != NULL) { 8851 mutex_exit(SD_MUTEX(un)); 8852 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8853 if (scsi_watch_request_terminate(un->un_swr_token, 8854 SCSI_WATCH_TERMINATE_NOWAIT)) { 8855 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8856 "sd_dr_detach: Cannot cancel swr watch request\n"); 8857 /* 8858 * Note: We are returning here after having removed 8859 * some driver timeouts above. This is consistent with 8860 * the legacy implementation but perhaps the watch 8861 * terminate call should be made with the wait flag set. 8862 */ 8863 goto err_stillbusy; 8864 } 8865 mutex_enter(SD_MUTEX(un)); 8866 un->un_swr_token = NULL; 8867 } 8868 8869 mutex_exit(SD_MUTEX(un)); 8870 8871 /* 8872 * Clear any scsi_reset_notifies. We clear the reset notifies 8873 * if we have not registered one. 8874 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8875 */ 8876 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8877 sd_mhd_reset_notify_cb, (caddr_t)un); 8878 8879 8880 8881 #if defined(__i386) || defined(__amd64) 8882 /* 8883 * Gratuitous bus resets sometimes cause an otherwise 8884 * okay ATA/ATAPI bus to hang. This is due the lack of 8885 * a clear spec of how resets should be implemented by ATA 8886 * disk drives. 8887 */ 8888 #elif !defined(__fibre) /* "#else if" does NOT work! */ 8889 /* 8890 * Reset target/bus. 8891 * 8892 * Note: This is a legacy workaround for Elite III dual-port drives that 8893 * will not come online after an aborted detach and subsequent re-attach 8894 * It should be removed when the Elite III FW is fixed, or the drives 8895 * are no longer supported. 8896 */ 8897 if (un->un_f_cfg_is_atapi == FALSE) { 8898 reset_retval = 0; 8899 8900 /* If the device is in low power mode don't reset it */ 8901 8902 mutex_enter(&un->un_pm_mutex); 8903 if (!SD_DEVICE_IS_IN_LOW_POWER(un)) { 8904 /* 8905 * First try a LUN reset if we can, then move on to a 8906 * target reset if needed; swat the bus as a last 8907 * resort. 8908 */ 8909 mutex_exit(&un->un_pm_mutex); 8910 if (un->un_f_allow_bus_device_reset == TRUE) { 8911 if (un->un_f_lun_reset_enabled == TRUE) { 8912 reset_retval = 8913 scsi_reset(SD_ADDRESS(un), 8914 RESET_LUN); 8915 } 8916 if (reset_retval == 0) { 8917 reset_retval = 8918 scsi_reset(SD_ADDRESS(un), 8919 RESET_TARGET); 8920 } 8921 } 8922 if (reset_retval == 0) { 8923 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 8924 } 8925 } else { 8926 mutex_exit(&un->un_pm_mutex); 8927 } 8928 } 8929 #endif 8930 8931 /* 8932 * protect the timeout pointers from getting nulled by 8933 * their callback functions during the cancellation process. 8934 * In such a scenario untimeout can be invoked with a null value. 8935 */ 8936 _NOTE(NO_COMPETING_THREADS_NOW); 8937 8938 mutex_enter(&un->un_pm_mutex); 8939 if (un->un_pm_idle_timeid != NULL) { 8940 timeout_id_t temp_id = un->un_pm_idle_timeid; 8941 un->un_pm_idle_timeid = NULL; 8942 mutex_exit(&un->un_pm_mutex); 8943 8944 /* 8945 * Timeout is active; cancel it. 8946 * Note that it'll never be active on a device 8947 * that does not support PM therefore we don't 8948 * have to check before calling pm_idle_component. 8949 */ 8950 (void) untimeout(temp_id); 8951 (void) pm_idle_component(SD_DEVINFO(un), 0); 8952 mutex_enter(&un->un_pm_mutex); 8953 } 8954 8955 /* 8956 * Check whether there is already a timeout scheduled for power 8957 * management. If yes then don't lower the power here, that's. 8958 * the timeout handler's job. 8959 */ 8960 if (un->un_pm_timeid != NULL) { 8961 timeout_id_t temp_id = un->un_pm_timeid; 8962 un->un_pm_timeid = NULL; 8963 mutex_exit(&un->un_pm_mutex); 8964 /* 8965 * Timeout is active; cancel it. 8966 * Note that it'll never be active on a device 8967 * that does not support PM therefore we don't 8968 * have to check before calling pm_idle_component. 8969 */ 8970 (void) untimeout(temp_id); 8971 (void) pm_idle_component(SD_DEVINFO(un), 0); 8972 8973 } else { 8974 mutex_exit(&un->un_pm_mutex); 8975 if ((un->un_f_pm_is_enabled == TRUE) && 8976 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8977 DDI_SUCCESS)) { 8978 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8979 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8980 /* 8981 * Fix for bug: 4297749, item # 13 8982 * The above test now includes a check to see if PM is 8983 * supported by this device before call 8984 * pm_lower_power(). 8985 * Note, the following is not dead code. The call to 8986 * pm_lower_power above will generate a call back into 8987 * our sdpower routine which might result in a timeout 8988 * handler getting activated. Therefore the following 8989 * code is valid and necessary. 8990 */ 8991 mutex_enter(&un->un_pm_mutex); 8992 if (un->un_pm_timeid != NULL) { 8993 timeout_id_t temp_id = un->un_pm_timeid; 8994 un->un_pm_timeid = NULL; 8995 mutex_exit(&un->un_pm_mutex); 8996 (void) untimeout(temp_id); 8997 (void) pm_idle_component(SD_DEVINFO(un), 0); 8998 } else { 8999 mutex_exit(&un->un_pm_mutex); 9000 } 9001 } 9002 } 9003 9004 /* 9005 * Cleanup from the scsi_ifsetcap() calls (437868) 9006 * Relocated here from above to be after the call to 9007 * pm_lower_power, which was getting errors. 9008 */ 9009 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 9010 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 9011 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 9012 9013 if (un->un_f_is_fibre == FALSE) { 9014 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 9015 } 9016 9017 /* 9018 * Remove any event callbacks, fibre only 9019 */ 9020 if (un->un_f_is_fibre == TRUE) { 9021 if ((un->un_insert_event != NULL) && 9022 (ddi_remove_event_handler(un->un_insert_cb_id) != 9023 DDI_SUCCESS)) { 9024 /* 9025 * Note: We are returning here after having done 9026 * substantial cleanup above. This is consistent 9027 * with the legacy implementation but this may not 9028 * be the right thing to do. 9029 */ 9030 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9031 "sd_dr_detach: Cannot cancel insert event\n"); 9032 goto err_remove_event; 9033 } 9034 un->un_insert_event = NULL; 9035 9036 if ((un->un_remove_event != NULL) && 9037 (ddi_remove_event_handler(un->un_remove_cb_id) != 9038 DDI_SUCCESS)) { 9039 /* 9040 * Note: We are returning here after having done 9041 * substantial cleanup above. This is consistent 9042 * with the legacy implementation but this may not 9043 * be the right thing to do. 9044 */ 9045 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9046 "sd_dr_detach: Cannot cancel remove event\n"); 9047 goto err_remove_event; 9048 } 9049 un->un_remove_event = NULL; 9050 } 9051 9052 /* Do not free the softstate if the callback routine is active */ 9053 sd_sync_with_callback(un); 9054 9055 /* 9056 * Hold the detach mutex here, to make sure that no other threads ever 9057 * can access a (partially) freed soft state structure. 9058 */ 9059 mutex_enter(&sd_detach_mutex); 9060 9061 /* 9062 * Clean up the soft state struct. 9063 * Cleanup is done in reverse order of allocs/inits. 9064 * At this point there should be no competing threads anymore. 9065 */ 9066 9067 /* Unregister and free device id. */ 9068 ddi_devid_unregister(devi); 9069 if (un->un_devid) { 9070 ddi_devid_free(un->un_devid); 9071 un->un_devid = NULL; 9072 } 9073 9074 /* 9075 * Destroy wmap cache if it exists. 9076 */ 9077 if (un->un_wm_cache != NULL) { 9078 kmem_cache_destroy(un->un_wm_cache); 9079 un->un_wm_cache = NULL; 9080 } 9081 9082 /* Remove minor nodes */ 9083 ddi_remove_minor_node(devi, NULL); 9084 9085 /* 9086 * kstat cleanup is done in detach for all device types (4363169). 9087 * We do not want to fail detach if the device kstats are not deleted 9088 * since there is a confusion about the devo_refcnt for the device. 9089 * We just delete the kstats and let detach complete successfully. 9090 */ 9091 if (un->un_stats != NULL) { 9092 kstat_delete(un->un_stats); 9093 un->un_stats = NULL; 9094 } 9095 if (un->un_errstats != NULL) { 9096 kstat_delete(un->un_errstats); 9097 un->un_errstats = NULL; 9098 } 9099 9100 /* Remove partition stats (not created for removables) */ 9101 if (!ISREMOVABLE(un)) { 9102 for (i = 0; i < NSDMAP; i++) { 9103 if (un->un_pstats[i] != NULL) { 9104 kstat_delete(un->un_pstats[i]); 9105 un->un_pstats[i] = NULL; 9106 } 9107 } 9108 } 9109 9110 /* Remove xbuf registration */ 9111 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 9112 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 9113 9114 /* Remove driver properties */ 9115 ddi_prop_remove_all(devi); 9116 9117 mutex_destroy(&un->un_pm_mutex); 9118 cv_destroy(&un->un_pm_busy_cv); 9119 9120 /* Open/close semaphore */ 9121 sema_destroy(&un->un_semoclose); 9122 9123 /* Removable media condvar. */ 9124 cv_destroy(&un->un_state_cv); 9125 9126 /* Suspend/resume condvar. */ 9127 cv_destroy(&un->un_suspend_cv); 9128 cv_destroy(&un->un_disk_busy_cv); 9129 9130 sd_free_rqs(un); 9131 9132 /* Free up soft state */ 9133 devp->sd_private = NULL; 9134 bzero(un, sizeof (struct sd_lun)); 9135 ddi_soft_state_free(sd_state, instance); 9136 9137 mutex_exit(&sd_detach_mutex); 9138 9139 /* This frees up the INQUIRY data associated with the device. */ 9140 scsi_unprobe(devp); 9141 9142 return (DDI_SUCCESS); 9143 9144 err_notclosed: 9145 mutex_exit(SD_MUTEX(un)); 9146 9147 err_stillbusy: 9148 _NOTE(NO_COMPETING_THREADS_NOW); 9149 9150 err_remove_event: 9151 mutex_enter(&sd_detach_mutex); 9152 un->un_detach_count--; 9153 mutex_exit(&sd_detach_mutex); 9154 9155 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9156 return (DDI_FAILURE); 9157 } 9158 9159 9160 /* 9161 * Driver minor node structure and data table 9162 */ 9163 struct driver_minor_data { 9164 char *name; 9165 minor_t minor; 9166 int type; 9167 }; 9168 9169 static struct driver_minor_data sd_minor_data[] = { 9170 {"a", 0, S_IFBLK}, 9171 {"b", 1, S_IFBLK}, 9172 {"c", 2, S_IFBLK}, 9173 {"d", 3, S_IFBLK}, 9174 {"e", 4, S_IFBLK}, 9175 {"f", 5, S_IFBLK}, 9176 {"g", 6, S_IFBLK}, 9177 {"h", 7, S_IFBLK}, 9178 #if defined(_SUNOS_VTOC_16) 9179 {"i", 8, S_IFBLK}, 9180 {"j", 9, S_IFBLK}, 9181 {"k", 10, S_IFBLK}, 9182 {"l", 11, S_IFBLK}, 9183 {"m", 12, S_IFBLK}, 9184 {"n", 13, S_IFBLK}, 9185 {"o", 14, S_IFBLK}, 9186 {"p", 15, S_IFBLK}, 9187 #endif /* defined(_SUNOS_VTOC_16) */ 9188 #if defined(_FIRMWARE_NEEDS_FDISK) 9189 {"q", 16, S_IFBLK}, 9190 {"r", 17, S_IFBLK}, 9191 {"s", 18, S_IFBLK}, 9192 {"t", 19, S_IFBLK}, 9193 {"u", 20, S_IFBLK}, 9194 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9195 {"a,raw", 0, S_IFCHR}, 9196 {"b,raw", 1, S_IFCHR}, 9197 {"c,raw", 2, S_IFCHR}, 9198 {"d,raw", 3, S_IFCHR}, 9199 {"e,raw", 4, S_IFCHR}, 9200 {"f,raw", 5, S_IFCHR}, 9201 {"g,raw", 6, S_IFCHR}, 9202 {"h,raw", 7, S_IFCHR}, 9203 #if defined(_SUNOS_VTOC_16) 9204 {"i,raw", 8, S_IFCHR}, 9205 {"j,raw", 9, S_IFCHR}, 9206 {"k,raw", 10, S_IFCHR}, 9207 {"l,raw", 11, S_IFCHR}, 9208 {"m,raw", 12, S_IFCHR}, 9209 {"n,raw", 13, S_IFCHR}, 9210 {"o,raw", 14, S_IFCHR}, 9211 {"p,raw", 15, S_IFCHR}, 9212 #endif /* defined(_SUNOS_VTOC_16) */ 9213 #if defined(_FIRMWARE_NEEDS_FDISK) 9214 {"q,raw", 16, S_IFCHR}, 9215 {"r,raw", 17, S_IFCHR}, 9216 {"s,raw", 18, S_IFCHR}, 9217 {"t,raw", 19, S_IFCHR}, 9218 {"u,raw", 20, S_IFCHR}, 9219 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9220 {0} 9221 }; 9222 9223 static struct driver_minor_data sd_minor_data_efi[] = { 9224 {"a", 0, S_IFBLK}, 9225 {"b", 1, S_IFBLK}, 9226 {"c", 2, S_IFBLK}, 9227 {"d", 3, S_IFBLK}, 9228 {"e", 4, S_IFBLK}, 9229 {"f", 5, S_IFBLK}, 9230 {"g", 6, S_IFBLK}, 9231 {"wd", 7, S_IFBLK}, 9232 #if defined(_FIRMWARE_NEEDS_FDISK) 9233 {"q", 16, S_IFBLK}, 9234 {"r", 17, S_IFBLK}, 9235 {"s", 18, S_IFBLK}, 9236 {"t", 19, S_IFBLK}, 9237 {"u", 20, S_IFBLK}, 9238 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9239 {"a,raw", 0, S_IFCHR}, 9240 {"b,raw", 1, S_IFCHR}, 9241 {"c,raw", 2, S_IFCHR}, 9242 {"d,raw", 3, S_IFCHR}, 9243 {"e,raw", 4, S_IFCHR}, 9244 {"f,raw", 5, S_IFCHR}, 9245 {"g,raw", 6, S_IFCHR}, 9246 {"wd,raw", 7, S_IFCHR}, 9247 #if defined(_FIRMWARE_NEEDS_FDISK) 9248 {"q,raw", 16, S_IFCHR}, 9249 {"r,raw", 17, S_IFCHR}, 9250 {"s,raw", 18, S_IFCHR}, 9251 {"t,raw", 19, S_IFCHR}, 9252 {"u,raw", 20, S_IFCHR}, 9253 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9254 {0} 9255 }; 9256 9257 9258 /* 9259 * Function: sd_create_minor_nodes 9260 * 9261 * Description: Create the minor device nodes for the instance. 9262 * 9263 * Arguments: un - driver soft state (unit) structure 9264 * devi - pointer to device info structure 9265 * 9266 * Return Code: DDI_SUCCESS 9267 * DDI_FAILURE 9268 * 9269 * Context: Kernel thread context 9270 */ 9271 9272 static int 9273 sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi) 9274 { 9275 struct driver_minor_data *dmdp; 9276 struct scsi_device *devp; 9277 int instance; 9278 char name[48]; 9279 9280 ASSERT(un != NULL); 9281 devp = ddi_get_driver_private(devi); 9282 instance = ddi_get_instance(devp->sd_dev); 9283 9284 /* 9285 * Create all the minor nodes for this target. 9286 */ 9287 if (un->un_blockcount > DK_MAX_BLOCKS) 9288 dmdp = sd_minor_data_efi; 9289 else 9290 dmdp = sd_minor_data; 9291 while (dmdp->name != NULL) { 9292 9293 (void) sprintf(name, "%s", dmdp->name); 9294 9295 if (ddi_create_minor_node(devi, name, dmdp->type, 9296 (instance << SDUNIT_SHIFT) | dmdp->minor, 9297 un->un_node_type, NULL) == DDI_FAILURE) { 9298 /* 9299 * Clean up any nodes that may have been created, in 9300 * case this fails in the middle of the loop. 9301 */ 9302 ddi_remove_minor_node(devi, NULL); 9303 return (DDI_FAILURE); 9304 } 9305 dmdp++; 9306 } 9307 9308 return (DDI_SUCCESS); 9309 } 9310 9311 9312 /* 9313 * Function: sd_create_errstats 9314 * 9315 * Description: This routine instantiates the device error stats. 9316 * 9317 * Note: During attach the stats are instantiated first so they are 9318 * available for attach-time routines that utilize the driver 9319 * iopath to send commands to the device. The stats are initialized 9320 * separately so data obtained during some attach-time routines is 9321 * available. (4362483) 9322 * 9323 * Arguments: un - driver soft state (unit) structure 9324 * instance - driver instance 9325 * 9326 * Context: Kernel thread context 9327 */ 9328 9329 static void 9330 sd_create_errstats(struct sd_lun *un, int instance) 9331 { 9332 struct sd_errstats *stp; 9333 char kstatmodule_err[KSTAT_STRLEN]; 9334 char kstatname[KSTAT_STRLEN]; 9335 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9336 9337 ASSERT(un != NULL); 9338 9339 if (un->un_errstats != NULL) { 9340 return; 9341 } 9342 9343 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9344 "%serr", sd_label); 9345 (void) snprintf(kstatname, sizeof (kstatname), 9346 "%s%d,err", sd_label, instance); 9347 9348 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9349 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9350 9351 if (un->un_errstats == NULL) { 9352 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9353 "sd_create_errstats: Failed kstat_create\n"); 9354 return; 9355 } 9356 9357 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9358 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9359 KSTAT_DATA_UINT32); 9360 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9361 KSTAT_DATA_UINT32); 9362 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9363 KSTAT_DATA_UINT32); 9364 kstat_named_init(&stp->sd_vid, "Vendor", 9365 KSTAT_DATA_CHAR); 9366 kstat_named_init(&stp->sd_pid, "Product", 9367 KSTAT_DATA_CHAR); 9368 kstat_named_init(&stp->sd_revision, "Revision", 9369 KSTAT_DATA_CHAR); 9370 kstat_named_init(&stp->sd_serial, "Serial No", 9371 KSTAT_DATA_CHAR); 9372 kstat_named_init(&stp->sd_capacity, "Size", 9373 KSTAT_DATA_ULONGLONG); 9374 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9375 KSTAT_DATA_UINT32); 9376 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9377 KSTAT_DATA_UINT32); 9378 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9379 KSTAT_DATA_UINT32); 9380 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9381 KSTAT_DATA_UINT32); 9382 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9383 KSTAT_DATA_UINT32); 9384 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9385 KSTAT_DATA_UINT32); 9386 9387 un->un_errstats->ks_private = un; 9388 un->un_errstats->ks_update = nulldev; 9389 9390 kstat_install(un->un_errstats); 9391 } 9392 9393 9394 /* 9395 * Function: sd_set_errstats 9396 * 9397 * Description: This routine sets the value of the vendor id, product id, 9398 * revision, serial number, and capacity device error stats. 9399 * 9400 * Note: During attach the stats are instantiated first so they are 9401 * available for attach-time routines that utilize the driver 9402 * iopath to send commands to the device. The stats are initialized 9403 * separately so data obtained during some attach-time routines is 9404 * available. (4362483) 9405 * 9406 * Arguments: un - driver soft state (unit) structure 9407 * 9408 * Context: Kernel thread context 9409 */ 9410 9411 static void 9412 sd_set_errstats(struct sd_lun *un) 9413 { 9414 struct sd_errstats *stp; 9415 9416 ASSERT(un != NULL); 9417 ASSERT(un->un_errstats != NULL); 9418 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9419 ASSERT(stp != NULL); 9420 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9421 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9422 (void) strncpy(stp->sd_revision.value.c, 9423 un->un_sd->sd_inq->inq_revision, 4); 9424 9425 /* 9426 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9427 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9428 * (4376302)) 9429 */ 9430 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9431 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9432 sizeof (SD_INQUIRY(un)->inq_serial)); 9433 } 9434 9435 if (un->un_f_blockcount_is_valid != TRUE) { 9436 /* 9437 * Set capacity error stat to 0 for no media. This ensures 9438 * a valid capacity is displayed in response to 'iostat -E' 9439 * when no media is present in the device. 9440 */ 9441 stp->sd_capacity.value.ui64 = 0; 9442 } else { 9443 /* 9444 * Multiply un_blockcount by un->un_sys_blocksize to get 9445 * capacity. 9446 * 9447 * Note: for non-512 blocksize devices "un_blockcount" has been 9448 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9449 * (un_tgt_blocksize / un->un_sys_blocksize). 9450 */ 9451 stp->sd_capacity.value.ui64 = (uint64_t) 9452 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9453 } 9454 } 9455 9456 9457 /* 9458 * Function: sd_set_pstats 9459 * 9460 * Description: This routine instantiates and initializes the partition 9461 * stats for each partition with more than zero blocks. 9462 * (4363169) 9463 * 9464 * Arguments: un - driver soft state (unit) structure 9465 * 9466 * Context: Kernel thread context 9467 */ 9468 9469 static void 9470 sd_set_pstats(struct sd_lun *un) 9471 { 9472 char kstatname[KSTAT_STRLEN]; 9473 int instance; 9474 int i; 9475 9476 ASSERT(un != NULL); 9477 9478 instance = ddi_get_instance(SD_DEVINFO(un)); 9479 9480 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9481 for (i = 0; i < NSDMAP; i++) { 9482 if ((un->un_pstats[i] == NULL) && 9483 (un->un_map[i].dkl_nblk != 0)) { 9484 (void) snprintf(kstatname, sizeof (kstatname), 9485 "%s%d,%s", sd_label, instance, 9486 sd_minor_data[i].name); 9487 un->un_pstats[i] = kstat_create(sd_label, 9488 instance, kstatname, "partition", KSTAT_TYPE_IO, 9489 1, KSTAT_FLAG_PERSISTENT); 9490 if (un->un_pstats[i] != NULL) { 9491 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9492 kstat_install(un->un_pstats[i]); 9493 } 9494 } 9495 } 9496 } 9497 9498 9499 #if (defined(__fibre)) 9500 /* 9501 * Function: sd_init_event_callbacks 9502 * 9503 * Description: This routine initializes the insertion and removal event 9504 * callbacks. (fibre only) 9505 * 9506 * Arguments: un - driver soft state (unit) structure 9507 * 9508 * Context: Kernel thread context 9509 */ 9510 9511 static void 9512 sd_init_event_callbacks(struct sd_lun *un) 9513 { 9514 ASSERT(un != NULL); 9515 9516 if ((un->un_insert_event == NULL) && 9517 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9518 &un->un_insert_event) == DDI_SUCCESS)) { 9519 /* 9520 * Add the callback for an insertion event 9521 */ 9522 (void) ddi_add_event_handler(SD_DEVINFO(un), 9523 un->un_insert_event, sd_event_callback, (void *)un, 9524 &(un->un_insert_cb_id)); 9525 } 9526 9527 if ((un->un_remove_event == NULL) && 9528 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9529 &un->un_remove_event) == DDI_SUCCESS)) { 9530 /* 9531 * Add the callback for a removal event 9532 */ 9533 (void) ddi_add_event_handler(SD_DEVINFO(un), 9534 un->un_remove_event, sd_event_callback, (void *)un, 9535 &(un->un_remove_cb_id)); 9536 } 9537 } 9538 9539 9540 /* 9541 * Function: sd_event_callback 9542 * 9543 * Description: This routine handles insert/remove events (photon). The 9544 * state is changed to OFFLINE which can be used to supress 9545 * error msgs. (fibre only) 9546 * 9547 * Arguments: un - driver soft state (unit) structure 9548 * 9549 * Context: Callout thread context 9550 */ 9551 /* ARGSUSED */ 9552 static void 9553 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9554 void *bus_impldata) 9555 { 9556 struct sd_lun *un = (struct sd_lun *)arg; 9557 9558 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9559 if (event == un->un_insert_event) { 9560 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9561 mutex_enter(SD_MUTEX(un)); 9562 if (un->un_state == SD_STATE_OFFLINE) { 9563 if (un->un_last_state != SD_STATE_SUSPENDED) { 9564 un->un_state = un->un_last_state; 9565 } else { 9566 /* 9567 * We have gone through SUSPEND/RESUME while 9568 * we were offline. Restore the last state 9569 */ 9570 un->un_state = un->un_save_state; 9571 } 9572 } 9573 mutex_exit(SD_MUTEX(un)); 9574 9575 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9576 } else if (event == un->un_remove_event) { 9577 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9578 mutex_enter(SD_MUTEX(un)); 9579 /* 9580 * We need to handle an event callback that occurs during 9581 * the suspend operation, since we don't prevent it. 9582 */ 9583 if (un->un_state != SD_STATE_OFFLINE) { 9584 if (un->un_state != SD_STATE_SUSPENDED) { 9585 New_state(un, SD_STATE_OFFLINE); 9586 } else { 9587 un->un_last_state = SD_STATE_OFFLINE; 9588 } 9589 } 9590 mutex_exit(SD_MUTEX(un)); 9591 } else { 9592 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9593 "!Unknown event\n"); 9594 } 9595 9596 } 9597 #endif 9598 9599 9600 /* 9601 * Function: sd_disable_caching() 9602 * 9603 * Description: This routine is the driver entry point for disabling 9604 * read and write caching by modifying the WCE (write cache 9605 * enable) and RCD (read cache disable) bits of mode 9606 * page 8 (MODEPAGE_CACHING). 9607 * 9608 * Arguments: un - driver soft state (unit) structure 9609 * 9610 * Return Code: EIO 9611 * code returned by sd_send_scsi_MODE_SENSE and 9612 * sd_send_scsi_MODE_SELECT 9613 * 9614 * Context: Kernel Thread 9615 */ 9616 9617 static int 9618 sd_disable_caching(struct sd_lun *un) 9619 { 9620 struct mode_caching *mode_caching_page; 9621 uchar_t *header; 9622 size_t buflen; 9623 int hdrlen; 9624 int bd_len; 9625 int rval = 0; 9626 9627 ASSERT(un != NULL); 9628 9629 /* 9630 * Do a test unit ready, otherwise a mode sense may not work if this 9631 * is the first command sent to the device after boot. 9632 */ 9633 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9634 9635 if (un->un_f_cfg_is_atapi == TRUE) { 9636 hdrlen = MODE_HEADER_LENGTH_GRP2; 9637 } else { 9638 hdrlen = MODE_HEADER_LENGTH; 9639 } 9640 9641 /* 9642 * Allocate memory for the retrieved mode page and its headers. Set 9643 * a pointer to the page itself. 9644 */ 9645 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9646 header = kmem_zalloc(buflen, KM_SLEEP); 9647 9648 /* Get the information from the device. */ 9649 if (un->un_f_cfg_is_atapi == TRUE) { 9650 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9651 MODEPAGE_CACHING, SD_PATH_DIRECT); 9652 } else { 9653 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9654 MODEPAGE_CACHING, SD_PATH_DIRECT); 9655 } 9656 if (rval != 0) { 9657 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9658 "sd_disable_caching: Mode Sense Failed\n"); 9659 kmem_free(header, buflen); 9660 return (rval); 9661 } 9662 9663 /* 9664 * Determine size of Block Descriptors in order to locate 9665 * the mode page data. ATAPI devices return 0, SCSI devices 9666 * should return MODE_BLK_DESC_LENGTH. 9667 */ 9668 if (un->un_f_cfg_is_atapi == TRUE) { 9669 struct mode_header_grp2 *mhp; 9670 mhp = (struct mode_header_grp2 *)header; 9671 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9672 } else { 9673 bd_len = ((struct mode_header *)header)->bdesc_length; 9674 } 9675 9676 if (bd_len > MODE_BLK_DESC_LENGTH) { 9677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9678 "sd_disable_caching: Mode Sense returned invalid " 9679 "block descriptor length\n"); 9680 kmem_free(header, buflen); 9681 return (EIO); 9682 } 9683 9684 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9685 9686 /* Check the relevant bits on successful mode sense. */ 9687 if ((mode_caching_page->wce) || !(mode_caching_page->rcd)) { 9688 /* 9689 * Read or write caching is enabled. Disable both of them. 9690 */ 9691 mode_caching_page->wce = 0; 9692 mode_caching_page->rcd = 1; 9693 9694 /* Clear reserved bits before mode select. */ 9695 mode_caching_page->mode_page.ps = 0; 9696 9697 /* 9698 * Clear out mode header for mode select. 9699 * The rest of the retrieved page will be reused. 9700 */ 9701 bzero(header, hdrlen); 9702 9703 /* Change the cache page to disable all caching. */ 9704 if (un->un_f_cfg_is_atapi == TRUE) { 9705 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 9706 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9707 } else { 9708 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 9709 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9710 } 9711 } 9712 9713 kmem_free(header, buflen); 9714 return (rval); 9715 } 9716 9717 9718 /* 9719 * Function: sd_make_device 9720 * 9721 * Description: Utility routine to return the Solaris device number from 9722 * the data in the device's dev_info structure. 9723 * 9724 * Return Code: The Solaris device number 9725 * 9726 * Context: Any 9727 */ 9728 9729 static dev_t 9730 sd_make_device(dev_info_t *devi) 9731 { 9732 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9733 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9734 } 9735 9736 9737 /* 9738 * Function: sd_pm_entry 9739 * 9740 * Description: Called at the start of a new command to manage power 9741 * and busy status of a device. This includes determining whether 9742 * the current power state of the device is sufficient for 9743 * performing the command or whether it must be changed. 9744 * The PM framework is notified appropriately. 9745 * Only with a return status of DDI_SUCCESS will the 9746 * component be busy to the framework. 9747 * 9748 * All callers of sd_pm_entry must check the return status 9749 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9750 * of DDI_FAILURE indicates the device failed to power up. 9751 * In this case un_pm_count has been adjusted so the result 9752 * on exit is still powered down, ie. count is less than 0. 9753 * Calling sd_pm_exit with this count value hits an ASSERT. 9754 * 9755 * Return Code: DDI_SUCCESS or DDI_FAILURE 9756 * 9757 * Context: Kernel thread context. 9758 */ 9759 9760 static int 9761 sd_pm_entry(struct sd_lun *un) 9762 { 9763 int return_status = DDI_SUCCESS; 9764 9765 ASSERT(!mutex_owned(SD_MUTEX(un))); 9766 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9767 9768 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9769 9770 if (un->un_f_pm_is_enabled == FALSE) { 9771 SD_TRACE(SD_LOG_IO_PM, un, 9772 "sd_pm_entry: exiting, PM not enabled\n"); 9773 return (return_status); 9774 } 9775 9776 /* 9777 * Just increment a counter if PM is enabled. On the transition from 9778 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9779 * the count with each IO and mark the device as idle when the count 9780 * hits 0. 9781 * 9782 * If the count is less than 0 the device is powered down. If a powered 9783 * down device is successfully powered up then the count must be 9784 * incremented to reflect the power up. Note that it'll get incremented 9785 * a second time to become busy. 9786 * 9787 * Because the following has the potential to change the device state 9788 * and must release the un_pm_mutex to do so, only one thread can be 9789 * allowed through at a time. 9790 */ 9791 9792 mutex_enter(&un->un_pm_mutex); 9793 while (un->un_pm_busy == TRUE) { 9794 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9795 } 9796 un->un_pm_busy = TRUE; 9797 9798 if (un->un_pm_count < 1) { 9799 9800 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9801 9802 /* 9803 * Indicate we are now busy so the framework won't attempt to 9804 * power down the device. This call will only fail if either 9805 * we passed a bad component number or the device has no 9806 * components. Neither of these should ever happen. 9807 */ 9808 mutex_exit(&un->un_pm_mutex); 9809 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9810 ASSERT(return_status == DDI_SUCCESS); 9811 9812 mutex_enter(&un->un_pm_mutex); 9813 9814 if (un->un_pm_count < 0) { 9815 mutex_exit(&un->un_pm_mutex); 9816 9817 SD_TRACE(SD_LOG_IO_PM, un, 9818 "sd_pm_entry: power up component\n"); 9819 9820 /* 9821 * pm_raise_power will cause sdpower to be called 9822 * which brings the device power level to the 9823 * desired state, ON in this case. If successful, 9824 * un_pm_count and un_power_level will be updated 9825 * appropriately. 9826 */ 9827 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9828 SD_SPINDLE_ON); 9829 9830 mutex_enter(&un->un_pm_mutex); 9831 9832 if (return_status != DDI_SUCCESS) { 9833 /* 9834 * Power up failed. 9835 * Idle the device and adjust the count 9836 * so the result on exit is that we're 9837 * still powered down, ie. count is less than 0. 9838 */ 9839 SD_TRACE(SD_LOG_IO_PM, un, 9840 "sd_pm_entry: power up failed," 9841 " idle the component\n"); 9842 9843 (void) pm_idle_component(SD_DEVINFO(un), 0); 9844 un->un_pm_count--; 9845 } else { 9846 /* 9847 * Device is powered up, verify the 9848 * count is non-negative. 9849 * This is debug only. 9850 */ 9851 ASSERT(un->un_pm_count == 0); 9852 } 9853 } 9854 9855 if (return_status == DDI_SUCCESS) { 9856 /* 9857 * For performance, now that the device has been tagged 9858 * as busy, and it's known to be powered up, update the 9859 * chain types to use jump tables that do not include 9860 * pm. This significantly lowers the overhead and 9861 * therefore improves performance. 9862 */ 9863 9864 mutex_exit(&un->un_pm_mutex); 9865 mutex_enter(SD_MUTEX(un)); 9866 SD_TRACE(SD_LOG_IO_PM, un, 9867 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9868 un->un_uscsi_chain_type); 9869 9870 if (ISREMOVABLE(un)) { 9871 un->un_buf_chain_type = 9872 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9873 } else { 9874 un->un_buf_chain_type = 9875 SD_CHAIN_INFO_DISK_NO_PM; 9876 } 9877 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9878 9879 SD_TRACE(SD_LOG_IO_PM, un, 9880 " changed uscsi_chain_type to %d\n", 9881 un->un_uscsi_chain_type); 9882 mutex_exit(SD_MUTEX(un)); 9883 mutex_enter(&un->un_pm_mutex); 9884 9885 if (un->un_pm_idle_timeid == NULL) { 9886 /* 300 ms. */ 9887 un->un_pm_idle_timeid = 9888 timeout(sd_pm_idletimeout_handler, un, 9889 (drv_usectohz((clock_t)300000))); 9890 /* 9891 * Include an extra call to busy which keeps the 9892 * device busy with-respect-to the PM layer 9893 * until the timer fires, at which time it'll 9894 * get the extra idle call. 9895 */ 9896 (void) pm_busy_component(SD_DEVINFO(un), 0); 9897 } 9898 } 9899 } 9900 un->un_pm_busy = FALSE; 9901 /* Next... */ 9902 cv_signal(&un->un_pm_busy_cv); 9903 9904 un->un_pm_count++; 9905 9906 SD_TRACE(SD_LOG_IO_PM, un, 9907 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9908 9909 mutex_exit(&un->un_pm_mutex); 9910 9911 return (return_status); 9912 } 9913 9914 9915 /* 9916 * Function: sd_pm_exit 9917 * 9918 * Description: Called at the completion of a command to manage busy 9919 * status for the device. If the device becomes idle the 9920 * PM framework is notified. 9921 * 9922 * Context: Kernel thread context 9923 */ 9924 9925 static void 9926 sd_pm_exit(struct sd_lun *un) 9927 { 9928 ASSERT(!mutex_owned(SD_MUTEX(un))); 9929 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9930 9931 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9932 9933 /* 9934 * After attach the following flag is only read, so don't 9935 * take the penalty of acquiring a mutex for it. 9936 */ 9937 if (un->un_f_pm_is_enabled == TRUE) { 9938 9939 mutex_enter(&un->un_pm_mutex); 9940 un->un_pm_count--; 9941 9942 SD_TRACE(SD_LOG_IO_PM, un, 9943 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9944 9945 ASSERT(un->un_pm_count >= 0); 9946 if (un->un_pm_count == 0) { 9947 mutex_exit(&un->un_pm_mutex); 9948 9949 SD_TRACE(SD_LOG_IO_PM, un, 9950 "sd_pm_exit: idle component\n"); 9951 9952 (void) pm_idle_component(SD_DEVINFO(un), 0); 9953 9954 } else { 9955 mutex_exit(&un->un_pm_mutex); 9956 } 9957 } 9958 9959 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9960 } 9961 9962 9963 /* 9964 * Function: sdopen 9965 * 9966 * Description: Driver's open(9e) entry point function. 9967 * 9968 * Arguments: dev_i - pointer to device number 9969 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9970 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9971 * cred_p - user credential pointer 9972 * 9973 * Return Code: EINVAL 9974 * ENXIO 9975 * EIO 9976 * EROFS 9977 * EBUSY 9978 * 9979 * Context: Kernel thread context 9980 */ 9981 /* ARGSUSED */ 9982 static int 9983 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9984 { 9985 struct sd_lun *un; 9986 int nodelay; 9987 int part; 9988 uint64_t partmask; 9989 int instance; 9990 dev_t dev; 9991 int rval = EIO; 9992 9993 /* Validate the open type */ 9994 if (otyp >= OTYPCNT) { 9995 return (EINVAL); 9996 } 9997 9998 dev = *dev_p; 9999 instance = SDUNIT(dev); 10000 mutex_enter(&sd_detach_mutex); 10001 10002 /* 10003 * Fail the open if there is no softstate for the instance, or 10004 * if another thread somewhere is trying to detach the instance. 10005 */ 10006 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10007 (un->un_detach_count != 0)) { 10008 mutex_exit(&sd_detach_mutex); 10009 /* 10010 * The probe cache only needs to be cleared when open (9e) fails 10011 * with ENXIO (4238046). 10012 */ 10013 /* 10014 * un-conditionally clearing probe cache is ok with 10015 * separate sd/ssd binaries 10016 * x86 platform can be an issue with both parallel 10017 * and fibre in 1 binary 10018 */ 10019 sd_scsi_clear_probe_cache(); 10020 return (ENXIO); 10021 } 10022 10023 /* 10024 * The un_layer_count is to prevent another thread in specfs from 10025 * trying to detach the instance, which can happen when we are 10026 * called from a higher-layer driver instead of thru specfs. 10027 * This will not be needed when DDI provides a layered driver 10028 * interface that allows specfs to know that an instance is in 10029 * use by a layered driver & should not be detached. 10030 * 10031 * Note: the semantics for layered driver opens are exactly one 10032 * close for every open. 10033 */ 10034 if (otyp == OTYP_LYR) { 10035 un->un_layer_count++; 10036 } 10037 10038 /* 10039 * Keep a count of the current # of opens in progress. This is because 10040 * some layered drivers try to call us as a regular open. This can 10041 * cause problems that we cannot prevent, however by keeping this count 10042 * we can at least keep our open and detach routines from racing against 10043 * each other under such conditions. 10044 */ 10045 un->un_opens_in_progress++; 10046 mutex_exit(&sd_detach_mutex); 10047 10048 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10049 part = SDPART(dev); 10050 partmask = 1 << part; 10051 10052 /* 10053 * We use a semaphore here in order to serialize 10054 * open and close requests on the device. 10055 */ 10056 sema_p(&un->un_semoclose); 10057 10058 mutex_enter(SD_MUTEX(un)); 10059 10060 /* 10061 * All device accesses go thru sdstrategy() where we check 10062 * on suspend status but there could be a scsi_poll command, 10063 * which bypasses sdstrategy(), so we need to check pm 10064 * status. 10065 */ 10066 10067 if (!nodelay) { 10068 while ((un->un_state == SD_STATE_SUSPENDED) || 10069 (un->un_state == SD_STATE_PM_CHANGING)) { 10070 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10071 } 10072 10073 mutex_exit(SD_MUTEX(un)); 10074 if (sd_pm_entry(un) != DDI_SUCCESS) { 10075 rval = EIO; 10076 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10077 "sdopen: sd_pm_entry failed\n"); 10078 goto open_failed_with_pm; 10079 } 10080 mutex_enter(SD_MUTEX(un)); 10081 } 10082 10083 /* check for previous exclusive open */ 10084 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10085 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10086 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10087 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10088 10089 if (un->un_exclopen & (partmask)) { 10090 goto excl_open_fail; 10091 } 10092 10093 if (flag & FEXCL) { 10094 int i; 10095 if (un->un_ocmap.lyropen[part]) { 10096 goto excl_open_fail; 10097 } 10098 for (i = 0; i < (OTYPCNT - 1); i++) { 10099 if (un->un_ocmap.regopen[i] & (partmask)) { 10100 goto excl_open_fail; 10101 } 10102 } 10103 } 10104 10105 /* 10106 * Check the write permission if this is a removable media device, 10107 * NDELAY has not been set, and writable permission is requested. 10108 * 10109 * Note: If NDELAY was set and this is write-protected media the WRITE 10110 * attempt will fail with EIO as part of the I/O processing. This is a 10111 * more permissive implementation that allows the open to succeed and 10112 * WRITE attempts to fail when appropriate. 10113 */ 10114 if (ISREMOVABLE(un)) { 10115 if ((flag & FWRITE) && (!nodelay)) { 10116 mutex_exit(SD_MUTEX(un)); 10117 /* 10118 * Defer the check for write permission on writable 10119 * DVD drive till sdstrategy and will not fail open even 10120 * if FWRITE is set as the device can be writable 10121 * depending upon the media and the media can change 10122 * after the call to open(). 10123 */ 10124 if (un->un_f_dvdram_writable_device == FALSE) { 10125 if (ISCD(un) || sr_check_wp(dev)) { 10126 rval = EROFS; 10127 mutex_enter(SD_MUTEX(un)); 10128 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10129 "write to cd or write protected media\n"); 10130 goto open_fail; 10131 } 10132 } 10133 mutex_enter(SD_MUTEX(un)); 10134 } 10135 } 10136 10137 /* 10138 * If opening in NDELAY/NONBLOCK mode, just return. 10139 * Check if disk is ready and has a valid geometry later. 10140 */ 10141 if (!nodelay) { 10142 mutex_exit(SD_MUTEX(un)); 10143 rval = sd_ready_and_valid(un); 10144 mutex_enter(SD_MUTEX(un)); 10145 /* 10146 * Fail if device is not ready or if the number of disk 10147 * blocks is zero or negative for non CD devices. 10148 */ 10149 if ((rval != SD_READY_VALID) || 10150 (!ISCD(un) && un->un_map[part].dkl_nblk <= 0)) { 10151 if (ISREMOVABLE(un)) { 10152 rval = ENXIO; 10153 } else { 10154 rval = EIO; 10155 } 10156 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10157 "device not ready or invalid disk block value\n"); 10158 goto open_fail; 10159 } 10160 #if defined(__i386) || defined(__amd64) 10161 } else { 10162 uchar_t *cp; 10163 /* 10164 * x86 requires special nodelay handling, so that p0 is 10165 * always defined and accessible. 10166 * Invalidate geometry only if device is not already open. 10167 */ 10168 cp = &un->un_ocmap.chkd[0]; 10169 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10170 if (*cp != (uchar_t)0) { 10171 break; 10172 } 10173 cp++; 10174 } 10175 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10176 un->un_f_geometry_is_valid = FALSE; 10177 } 10178 10179 #endif 10180 } 10181 10182 if (otyp == OTYP_LYR) { 10183 un->un_ocmap.lyropen[part]++; 10184 } else { 10185 un->un_ocmap.regopen[otyp] |= partmask; 10186 } 10187 10188 /* Set up open and exclusive open flags */ 10189 if (flag & FEXCL) { 10190 un->un_exclopen |= (partmask); 10191 } 10192 10193 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10194 "open of part %d type %d\n", part, otyp); 10195 10196 mutex_exit(SD_MUTEX(un)); 10197 if (!nodelay) { 10198 sd_pm_exit(un); 10199 } 10200 10201 sema_v(&un->un_semoclose); 10202 10203 mutex_enter(&sd_detach_mutex); 10204 un->un_opens_in_progress--; 10205 mutex_exit(&sd_detach_mutex); 10206 10207 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10208 return (DDI_SUCCESS); 10209 10210 excl_open_fail: 10211 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10212 rval = EBUSY; 10213 10214 open_fail: 10215 mutex_exit(SD_MUTEX(un)); 10216 10217 /* 10218 * On a failed open we must exit the pm management. 10219 */ 10220 if (!nodelay) { 10221 sd_pm_exit(un); 10222 } 10223 open_failed_with_pm: 10224 sema_v(&un->un_semoclose); 10225 10226 mutex_enter(&sd_detach_mutex); 10227 un->un_opens_in_progress--; 10228 if (otyp == OTYP_LYR) { 10229 un->un_layer_count--; 10230 } 10231 mutex_exit(&sd_detach_mutex); 10232 10233 return (rval); 10234 } 10235 10236 10237 /* 10238 * Function: sdclose 10239 * 10240 * Description: Driver's close(9e) entry point function. 10241 * 10242 * Arguments: dev - device number 10243 * flag - file status flag, informational only 10244 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10245 * cred_p - user credential pointer 10246 * 10247 * Return Code: ENXIO 10248 * 10249 * Context: Kernel thread context 10250 */ 10251 /* ARGSUSED */ 10252 static int 10253 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10254 { 10255 struct sd_lun *un; 10256 uchar_t *cp; 10257 int part; 10258 int nodelay; 10259 int rval = 0; 10260 10261 /* Validate the open type */ 10262 if (otyp >= OTYPCNT) { 10263 return (ENXIO); 10264 } 10265 10266 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10267 return (ENXIO); 10268 } 10269 10270 part = SDPART(dev); 10271 nodelay = flag & (FNDELAY | FNONBLOCK); 10272 10273 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10274 "sdclose: close of part %d type %d\n", part, otyp); 10275 10276 /* 10277 * We use a semaphore here in order to serialize 10278 * open and close requests on the device. 10279 */ 10280 sema_p(&un->un_semoclose); 10281 10282 mutex_enter(SD_MUTEX(un)); 10283 10284 /* Don't proceed if power is being changed. */ 10285 while (un->un_state == SD_STATE_PM_CHANGING) { 10286 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10287 } 10288 10289 if (un->un_exclopen & (1 << part)) { 10290 un->un_exclopen &= ~(1 << part); 10291 } 10292 10293 /* Update the open partition map */ 10294 if (otyp == OTYP_LYR) { 10295 un->un_ocmap.lyropen[part] -= 1; 10296 } else { 10297 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10298 } 10299 10300 cp = &un->un_ocmap.chkd[0]; 10301 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10302 if (*cp != NULL) { 10303 break; 10304 } 10305 cp++; 10306 } 10307 10308 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10309 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10310 10311 /* 10312 * We avoid persistance upon the last close, and set 10313 * the throttle back to the maximum. 10314 */ 10315 un->un_throttle = un->un_saved_throttle; 10316 10317 if (un->un_state == SD_STATE_OFFLINE) { 10318 if (un->un_f_is_fibre == FALSE) { 10319 scsi_log(SD_DEVINFO(un), sd_label, 10320 CE_WARN, "offline\n"); 10321 } 10322 un->un_f_geometry_is_valid = FALSE; 10323 10324 } else { 10325 /* 10326 * Flush any outstanding writes in NVRAM cache. 10327 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10328 * cmd, it may not work for non-Pluto devices. 10329 * SYNCHRONIZE CACHE is not required for removables, 10330 * except DVD-RAM drives. 10331 * 10332 * Also note: because SYNCHRONIZE CACHE is currently 10333 * the only command issued here that requires the 10334 * drive be powered up, only do the power up before 10335 * sending the Sync Cache command. If additional 10336 * commands are added which require a powered up 10337 * drive, the following sequence may have to change. 10338 * 10339 * And finally, note that parallel SCSI on SPARC 10340 * only issues a Sync Cache to DVD-RAM, a newly 10341 * supported device. 10342 */ 10343 #if defined(__i386) || defined(__amd64) 10344 if (!ISREMOVABLE(un) || 10345 un->un_f_dvdram_writable_device == TRUE) { 10346 #else 10347 if (un->un_f_dvdram_writable_device == TRUE) { 10348 #endif 10349 mutex_exit(SD_MUTEX(un)); 10350 if (sd_pm_entry(un) == DDI_SUCCESS) { 10351 if (sd_send_scsi_SYNCHRONIZE_CACHE(un) 10352 != 0) { 10353 rval = EIO; 10354 } 10355 sd_pm_exit(un); 10356 } else { 10357 rval = EIO; 10358 } 10359 mutex_enter(SD_MUTEX(un)); 10360 } 10361 10362 /* 10363 * For removable media devices, send an ALLOW MEDIA 10364 * REMOVAL command, but don't get upset if it fails. 10365 * Also invalidate the geometry. We need to raise 10366 * the power of the drive before we can call 10367 * sd_send_scsi_DOORLOCK() 10368 */ 10369 if (ISREMOVABLE(un)) { 10370 mutex_exit(SD_MUTEX(un)); 10371 if (sd_pm_entry(un) == DDI_SUCCESS) { 10372 rval = sd_send_scsi_DOORLOCK(un, 10373 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10374 10375 sd_pm_exit(un); 10376 if (ISCD(un) && (rval != 0) && 10377 (nodelay != 0)) { 10378 rval = ENXIO; 10379 } 10380 } else { 10381 rval = EIO; 10382 } 10383 mutex_enter(SD_MUTEX(un)); 10384 10385 sr_ejected(un); 10386 /* 10387 * Destroy the cache (if it exists) which was 10388 * allocated for the write maps since this is 10389 * the last close for this media. 10390 */ 10391 if (un->un_wm_cache) { 10392 /* 10393 * Check if there are pending commands. 10394 * and if there are give a warning and 10395 * do not destroy the cache. 10396 */ 10397 if (un->un_ncmds_in_driver > 0) { 10398 scsi_log(SD_DEVINFO(un), 10399 sd_label, CE_WARN, 10400 "Unable to clean up memory " 10401 "because of pending I/O\n"); 10402 } else { 10403 kmem_cache_destroy( 10404 un->un_wm_cache); 10405 un->un_wm_cache = NULL; 10406 } 10407 } 10408 } 10409 } 10410 } 10411 10412 mutex_exit(SD_MUTEX(un)); 10413 sema_v(&un->un_semoclose); 10414 10415 if (otyp == OTYP_LYR) { 10416 mutex_enter(&sd_detach_mutex); 10417 /* 10418 * The detach routine may run when the layer count 10419 * drops to zero. 10420 */ 10421 un->un_layer_count--; 10422 mutex_exit(&sd_detach_mutex); 10423 } 10424 10425 return (rval); 10426 } 10427 10428 10429 /* 10430 * Function: sd_ready_and_valid 10431 * 10432 * Description: Test if device is ready and has a valid geometry. 10433 * 10434 * Arguments: dev - device number 10435 * un - driver soft state (unit) structure 10436 * 10437 * Return Code: SD_READY_VALID ready and valid label 10438 * SD_READY_NOT_VALID ready, geom ops never applicable 10439 * SD_NOT_READY_VALID not ready, no label 10440 * 10441 * Context: Never called at interrupt context. 10442 */ 10443 10444 static int 10445 sd_ready_and_valid(struct sd_lun *un) 10446 { 10447 struct sd_errstats *stp; 10448 uint64_t capacity; 10449 uint_t lbasize; 10450 int rval = SD_READY_VALID; 10451 char name_str[48]; 10452 10453 ASSERT(un != NULL); 10454 ASSERT(!mutex_owned(SD_MUTEX(un))); 10455 10456 mutex_enter(SD_MUTEX(un)); 10457 if (ISREMOVABLE(un)) { 10458 mutex_exit(SD_MUTEX(un)); 10459 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 10460 rval = SD_NOT_READY_VALID; 10461 mutex_enter(SD_MUTEX(un)); 10462 goto done; 10463 } 10464 10465 mutex_enter(SD_MUTEX(un)); 10466 if ((un->un_f_geometry_is_valid == FALSE) || 10467 (un->un_f_blockcount_is_valid == FALSE) || 10468 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10469 10470 /* capacity has to be read every open. */ 10471 mutex_exit(SD_MUTEX(un)); 10472 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 10473 &lbasize, SD_PATH_DIRECT) != 0) { 10474 mutex_enter(SD_MUTEX(un)); 10475 un->un_f_geometry_is_valid = FALSE; 10476 rval = SD_NOT_READY_VALID; 10477 goto done; 10478 } else { 10479 mutex_enter(SD_MUTEX(un)); 10480 sd_update_block_info(un, lbasize, capacity); 10481 } 10482 } 10483 10484 /* 10485 * If this is a non 512 block device, allocate space for 10486 * the wmap cache. This is being done here since every time 10487 * a media is changed this routine will be called and the 10488 * block size is a function of media rather than device. 10489 */ 10490 if (NOT_DEVBSIZE(un)) { 10491 if (!(un->un_wm_cache)) { 10492 (void) snprintf(name_str, sizeof (name_str), 10493 "%s%d_cache", 10494 ddi_driver_name(SD_DEVINFO(un)), 10495 ddi_get_instance(SD_DEVINFO(un))); 10496 un->un_wm_cache = kmem_cache_create( 10497 name_str, sizeof (struct sd_w_map), 10498 8, sd_wm_cache_constructor, 10499 sd_wm_cache_destructor, NULL, 10500 (void *)un, NULL, 0); 10501 if (!(un->un_wm_cache)) { 10502 rval = ENOMEM; 10503 goto done; 10504 } 10505 } 10506 } 10507 10508 /* 10509 * Check if the media in the device is writable or not. 10510 */ 10511 if ((un->un_f_geometry_is_valid == FALSE) && ISCD(un)) { 10512 sd_check_for_writable_cd(un); 10513 } 10514 10515 } else { 10516 /* 10517 * Do a test unit ready to clear any unit attention from non-cd 10518 * devices. 10519 */ 10520 mutex_exit(SD_MUTEX(un)); 10521 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 10522 mutex_enter(SD_MUTEX(un)); 10523 } 10524 10525 10526 if (un->un_state == SD_STATE_NORMAL) { 10527 /* 10528 * If the target is not yet ready here (defined by a TUR 10529 * failure), invalidate the geometry and print an 'offline' 10530 * message. This is a legacy message, as the state of the 10531 * target is not actually changed to SD_STATE_OFFLINE. 10532 * 10533 * If the TUR fails for EACCES (Reservation Conflict), it 10534 * means there actually is nothing wrong with the target that 10535 * would require invalidating the geometry, so continue in 10536 * that case as if the TUR was successful. 10537 */ 10538 int err; 10539 10540 mutex_exit(SD_MUTEX(un)); 10541 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 10542 mutex_enter(SD_MUTEX(un)); 10543 10544 if ((err != 0) && (err != EACCES)) { 10545 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10546 "offline\n"); 10547 un->un_f_geometry_is_valid = FALSE; 10548 rval = SD_NOT_READY_VALID; 10549 goto done; 10550 } 10551 } 10552 10553 if (un->un_f_format_in_progress == FALSE) { 10554 /* 10555 * Note: sd_validate_geometry may return TRUE, but that does 10556 * not necessarily mean un_f_geometry_is_valid == TRUE! 10557 */ 10558 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 10559 if (rval == ENOTSUP) { 10560 if (un->un_f_geometry_is_valid == TRUE) 10561 rval = 0; 10562 else { 10563 rval = SD_READY_NOT_VALID; 10564 goto done; 10565 } 10566 } 10567 if (rval != 0) { 10568 /* 10569 * We don't check the validity of geometry for 10570 * CDROMs. Also we assume we have a good label 10571 * even if sd_validate_geometry returned ENOMEM. 10572 */ 10573 if (!ISCD(un) && rval != ENOMEM) { 10574 rval = SD_NOT_READY_VALID; 10575 goto done; 10576 } 10577 } 10578 } 10579 10580 #ifdef DOESNTWORK /* on eliteII, see 1118607 */ 10581 /* 10582 * check to see if this disk is write protected, if it is and we have 10583 * not set read-only, then fail 10584 */ 10585 if ((flag & FWRITE) && (sr_check_wp(dev))) { 10586 New_state(un, SD_STATE_CLOSED); 10587 goto done; 10588 } 10589 #endif 10590 10591 /* 10592 * If this is a removable media device, try and send 10593 * a PREVENT MEDIA REMOVAL command, but don't get upset 10594 * if it fails. For a CD, however, it is an error 10595 */ 10596 if (ISREMOVABLE(un)) { 10597 mutex_exit(SD_MUTEX(un)); 10598 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 10599 SD_PATH_DIRECT) != 0) && ISCD(un)) { 10600 rval = SD_NOT_READY_VALID; 10601 mutex_enter(SD_MUTEX(un)); 10602 goto done; 10603 } 10604 mutex_enter(SD_MUTEX(un)); 10605 } 10606 10607 /* The state has changed, inform the media watch routines */ 10608 un->un_mediastate = DKIO_INSERTED; 10609 cv_broadcast(&un->un_state_cv); 10610 rval = SD_READY_VALID; 10611 10612 done: 10613 10614 /* 10615 * Initialize the capacity kstat value, if no media previously 10616 * (capacity kstat is 0) and a media has been inserted 10617 * (un_blockcount > 0). 10618 * This is a more generic way then checking for ISREMOVABLE. 10619 */ 10620 if (un->un_errstats != NULL) { 10621 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10622 if ((stp->sd_capacity.value.ui64 == 0) && 10623 (un->un_f_blockcount_is_valid == TRUE)) { 10624 stp->sd_capacity.value.ui64 = 10625 (uint64_t)((uint64_t)un->un_blockcount * 10626 un->un_sys_blocksize); 10627 } 10628 } 10629 10630 mutex_exit(SD_MUTEX(un)); 10631 return (rval); 10632 } 10633 10634 10635 /* 10636 * Function: sdmin 10637 * 10638 * Description: Routine to limit the size of a data transfer. Used in 10639 * conjunction with physio(9F). 10640 * 10641 * Arguments: bp - pointer to the indicated buf(9S) struct. 10642 * 10643 * Context: Kernel thread context. 10644 */ 10645 10646 static void 10647 sdmin(struct buf *bp) 10648 { 10649 struct sd_lun *un; 10650 int instance; 10651 10652 instance = SDUNIT(bp->b_edev); 10653 10654 un = ddi_get_soft_state(sd_state, instance); 10655 ASSERT(un != NULL); 10656 10657 if (bp->b_bcount > un->un_max_xfer_size) { 10658 bp->b_bcount = un->un_max_xfer_size; 10659 } 10660 } 10661 10662 10663 /* 10664 * Function: sdread 10665 * 10666 * Description: Driver's read(9e) entry point function. 10667 * 10668 * Arguments: dev - device number 10669 * uio - structure pointer describing where data is to be stored 10670 * in user's space 10671 * cred_p - user credential pointer 10672 * 10673 * Return Code: ENXIO 10674 * EIO 10675 * EINVAL 10676 * value returned by physio 10677 * 10678 * Context: Kernel thread context. 10679 */ 10680 /* ARGSUSED */ 10681 static int 10682 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10683 { 10684 struct sd_lun *un = NULL; 10685 int secmask; 10686 int err; 10687 10688 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10689 return (ENXIO); 10690 } 10691 10692 ASSERT(!mutex_owned(SD_MUTEX(un))); 10693 10694 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10695 mutex_enter(SD_MUTEX(un)); 10696 /* 10697 * Because the call to sd_ready_and_valid will issue I/O we 10698 * must wait here if either the device is suspended or 10699 * if it's power level is changing. 10700 */ 10701 while ((un->un_state == SD_STATE_SUSPENDED) || 10702 (un->un_state == SD_STATE_PM_CHANGING)) { 10703 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10704 } 10705 un->un_ncmds_in_driver++; 10706 mutex_exit(SD_MUTEX(un)); 10707 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10708 mutex_enter(SD_MUTEX(un)); 10709 un->un_ncmds_in_driver--; 10710 ASSERT(un->un_ncmds_in_driver >= 0); 10711 mutex_exit(SD_MUTEX(un)); 10712 return (EIO); 10713 } 10714 mutex_enter(SD_MUTEX(un)); 10715 un->un_ncmds_in_driver--; 10716 ASSERT(un->un_ncmds_in_driver >= 0); 10717 mutex_exit(SD_MUTEX(un)); 10718 } 10719 10720 /* 10721 * Read requests are restricted to multiples of the system block size. 10722 */ 10723 secmask = un->un_sys_blocksize - 1; 10724 10725 if (uio->uio_loffset & ((offset_t)(secmask))) { 10726 SD_ERROR(SD_LOG_READ_WRITE, un, 10727 "sdread: file offset not modulo %d\n", 10728 un->un_sys_blocksize); 10729 err = EINVAL; 10730 } else if (uio->uio_iov->iov_len & (secmask)) { 10731 SD_ERROR(SD_LOG_READ_WRITE, un, 10732 "sdread: transfer length not modulo %d\n", 10733 un->un_sys_blocksize); 10734 err = EINVAL; 10735 } else { 10736 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10737 } 10738 return (err); 10739 } 10740 10741 10742 /* 10743 * Function: sdwrite 10744 * 10745 * Description: Driver's write(9e) entry point function. 10746 * 10747 * Arguments: dev - device number 10748 * uio - structure pointer describing where data is stored in 10749 * user's space 10750 * cred_p - user credential pointer 10751 * 10752 * Return Code: ENXIO 10753 * EIO 10754 * EINVAL 10755 * value returned by physio 10756 * 10757 * Context: Kernel thread context. 10758 */ 10759 /* ARGSUSED */ 10760 static int 10761 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10762 { 10763 struct sd_lun *un = NULL; 10764 int secmask; 10765 int err; 10766 10767 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10768 return (ENXIO); 10769 } 10770 10771 ASSERT(!mutex_owned(SD_MUTEX(un))); 10772 10773 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10774 mutex_enter(SD_MUTEX(un)); 10775 /* 10776 * Because the call to sd_ready_and_valid will issue I/O we 10777 * must wait here if either the device is suspended or 10778 * if it's power level is changing. 10779 */ 10780 while ((un->un_state == SD_STATE_SUSPENDED) || 10781 (un->un_state == SD_STATE_PM_CHANGING)) { 10782 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10783 } 10784 un->un_ncmds_in_driver++; 10785 mutex_exit(SD_MUTEX(un)); 10786 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10787 mutex_enter(SD_MUTEX(un)); 10788 un->un_ncmds_in_driver--; 10789 ASSERT(un->un_ncmds_in_driver >= 0); 10790 mutex_exit(SD_MUTEX(un)); 10791 return (EIO); 10792 } 10793 mutex_enter(SD_MUTEX(un)); 10794 un->un_ncmds_in_driver--; 10795 ASSERT(un->un_ncmds_in_driver >= 0); 10796 mutex_exit(SD_MUTEX(un)); 10797 } 10798 10799 /* 10800 * Write requests are restricted to multiples of the system block size. 10801 */ 10802 secmask = un->un_sys_blocksize - 1; 10803 10804 if (uio->uio_loffset & ((offset_t)(secmask))) { 10805 SD_ERROR(SD_LOG_READ_WRITE, un, 10806 "sdwrite: file offset not modulo %d\n", 10807 un->un_sys_blocksize); 10808 err = EINVAL; 10809 } else if (uio->uio_iov->iov_len & (secmask)) { 10810 SD_ERROR(SD_LOG_READ_WRITE, un, 10811 "sdwrite: transfer length not modulo %d\n", 10812 un->un_sys_blocksize); 10813 err = EINVAL; 10814 } else { 10815 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10816 } 10817 return (err); 10818 } 10819 10820 10821 /* 10822 * Function: sdaread 10823 * 10824 * Description: Driver's aread(9e) entry point function. 10825 * 10826 * Arguments: dev - device number 10827 * aio - structure pointer describing where data is to be stored 10828 * cred_p - user credential pointer 10829 * 10830 * Return Code: ENXIO 10831 * EIO 10832 * EINVAL 10833 * value returned by aphysio 10834 * 10835 * Context: Kernel thread context. 10836 */ 10837 /* ARGSUSED */ 10838 static int 10839 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10840 { 10841 struct sd_lun *un = NULL; 10842 struct uio *uio = aio->aio_uio; 10843 int secmask; 10844 int err; 10845 10846 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10847 return (ENXIO); 10848 } 10849 10850 ASSERT(!mutex_owned(SD_MUTEX(un))); 10851 10852 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10853 mutex_enter(SD_MUTEX(un)); 10854 /* 10855 * Because the call to sd_ready_and_valid will issue I/O we 10856 * must wait here if either the device is suspended or 10857 * if it's power level is changing. 10858 */ 10859 while ((un->un_state == SD_STATE_SUSPENDED) || 10860 (un->un_state == SD_STATE_PM_CHANGING)) { 10861 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10862 } 10863 un->un_ncmds_in_driver++; 10864 mutex_exit(SD_MUTEX(un)); 10865 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10866 mutex_enter(SD_MUTEX(un)); 10867 un->un_ncmds_in_driver--; 10868 ASSERT(un->un_ncmds_in_driver >= 0); 10869 mutex_exit(SD_MUTEX(un)); 10870 return (EIO); 10871 } 10872 mutex_enter(SD_MUTEX(un)); 10873 un->un_ncmds_in_driver--; 10874 ASSERT(un->un_ncmds_in_driver >= 0); 10875 mutex_exit(SD_MUTEX(un)); 10876 } 10877 10878 /* 10879 * Read requests are restricted to multiples of the system block size. 10880 */ 10881 secmask = un->un_sys_blocksize - 1; 10882 10883 if (uio->uio_loffset & ((offset_t)(secmask))) { 10884 SD_ERROR(SD_LOG_READ_WRITE, un, 10885 "sdaread: file offset not modulo %d\n", 10886 un->un_sys_blocksize); 10887 err = EINVAL; 10888 } else if (uio->uio_iov->iov_len & (secmask)) { 10889 SD_ERROR(SD_LOG_READ_WRITE, un, 10890 "sdaread: transfer length not modulo %d\n", 10891 un->un_sys_blocksize); 10892 err = EINVAL; 10893 } else { 10894 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10895 } 10896 return (err); 10897 } 10898 10899 10900 /* 10901 * Function: sdawrite 10902 * 10903 * Description: Driver's awrite(9e) entry point function. 10904 * 10905 * Arguments: dev - device number 10906 * aio - structure pointer describing where data is stored 10907 * cred_p - user credential pointer 10908 * 10909 * Return Code: ENXIO 10910 * EIO 10911 * EINVAL 10912 * value returned by aphysio 10913 * 10914 * Context: Kernel thread context. 10915 */ 10916 /* ARGSUSED */ 10917 static int 10918 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10919 { 10920 struct sd_lun *un = NULL; 10921 struct uio *uio = aio->aio_uio; 10922 int secmask; 10923 int err; 10924 10925 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10926 return (ENXIO); 10927 } 10928 10929 ASSERT(!mutex_owned(SD_MUTEX(un))); 10930 10931 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10932 mutex_enter(SD_MUTEX(un)); 10933 /* 10934 * Because the call to sd_ready_and_valid will issue I/O we 10935 * must wait here if either the device is suspended or 10936 * if it's power level is changing. 10937 */ 10938 while ((un->un_state == SD_STATE_SUSPENDED) || 10939 (un->un_state == SD_STATE_PM_CHANGING)) { 10940 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10941 } 10942 un->un_ncmds_in_driver++; 10943 mutex_exit(SD_MUTEX(un)); 10944 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10945 mutex_enter(SD_MUTEX(un)); 10946 un->un_ncmds_in_driver--; 10947 ASSERT(un->un_ncmds_in_driver >= 0); 10948 mutex_exit(SD_MUTEX(un)); 10949 return (EIO); 10950 } 10951 mutex_enter(SD_MUTEX(un)); 10952 un->un_ncmds_in_driver--; 10953 ASSERT(un->un_ncmds_in_driver >= 0); 10954 mutex_exit(SD_MUTEX(un)); 10955 } 10956 10957 /* 10958 * Write requests are restricted to multiples of the system block size. 10959 */ 10960 secmask = un->un_sys_blocksize - 1; 10961 10962 if (uio->uio_loffset & ((offset_t)(secmask))) { 10963 SD_ERROR(SD_LOG_READ_WRITE, un, 10964 "sdawrite: file offset not modulo %d\n", 10965 un->un_sys_blocksize); 10966 err = EINVAL; 10967 } else if (uio->uio_iov->iov_len & (secmask)) { 10968 SD_ERROR(SD_LOG_READ_WRITE, un, 10969 "sdawrite: transfer length not modulo %d\n", 10970 un->un_sys_blocksize); 10971 err = EINVAL; 10972 } else { 10973 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10974 } 10975 return (err); 10976 } 10977 10978 10979 10980 10981 10982 /* 10983 * Driver IO processing follows the following sequence: 10984 * 10985 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10986 * | | ^ 10987 * v v | 10988 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10989 * | | | | 10990 * v | | | 10991 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10992 * | | ^ ^ 10993 * v v | | 10994 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10995 * | | | | 10996 * +---+ | +------------+ +-------+ 10997 * | | | | 10998 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10999 * | v | | 11000 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11001 * | | ^ | 11002 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11003 * | v | | 11004 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11005 * | | ^ | 11006 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11007 * | v | | 11008 * | sd_checksum_iostart() sd_checksum_iodone() | 11009 * | | ^ | 11010 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11011 * | v | | 11012 * | sd_pm_iostart() sd_pm_iodone() | 11013 * | | ^ | 11014 * | | | | 11015 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11016 * | ^ 11017 * v | 11018 * sd_core_iostart() | 11019 * | | 11020 * | +------>(*destroypkt)() 11021 * +-> sd_start_cmds() <-+ | | 11022 * | | | v 11023 * | | | scsi_destroy_pkt(9F) 11024 * | | | 11025 * +->(*initpkt)() +- sdintr() 11026 * | | | | 11027 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11028 * | +-> scsi_setup_cdb(9F) | 11029 * | | 11030 * +--> scsi_transport(9F) | 11031 * | | 11032 * +----> SCSA ---->+ 11033 * 11034 * 11035 * This code is based upon the following presumtions: 11036 * 11037 * - iostart and iodone functions operate on buf(9S) structures. These 11038 * functions perform the necessary operations on the buf(9S) and pass 11039 * them along to the next function in the chain by using the macros 11040 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11041 * (for iodone side functions). 11042 * 11043 * - The iostart side functions may sleep. The iodone side functions 11044 * are called under interrupt context and may NOT sleep. Therefore 11045 * iodone side functions also may not call iostart side functions. 11046 * (NOTE: iostart side functions should NOT sleep for memory, as 11047 * this could result in deadlock.) 11048 * 11049 * - An iostart side function may call its corresponding iodone side 11050 * function directly (if necessary). 11051 * 11052 * - In the event of an error, an iostart side function can return a buf(9S) 11053 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11054 * b_error in the usual way of course). 11055 * 11056 * - The taskq mechanism may be used by the iodone side functions to dispatch 11057 * requests to the iostart side functions. The iostart side functions in 11058 * this case would be called under the context of a taskq thread, so it's 11059 * OK for them to block/sleep/spin in this case. 11060 * 11061 * - iostart side functions may allocate "shadow" buf(9S) structs and 11062 * pass them along to the next function in the chain. The corresponding 11063 * iodone side functions must coalesce the "shadow" bufs and return 11064 * the "original" buf to the next higher layer. 11065 * 11066 * - The b_private field of the buf(9S) struct holds a pointer to 11067 * an sd_xbuf struct, which contains information needed to 11068 * construct the scsi_pkt for the command. 11069 * 11070 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11071 * layer must acquire & release the SD_MUTEX(un) as needed. 11072 */ 11073 11074 11075 /* 11076 * Create taskq for all targets in the system. This is created at 11077 * _init(9E) and destroyed at _fini(9E). 11078 * 11079 * Note: here we set the minalloc to a reasonably high number to ensure that 11080 * we will have an adequate supply of task entries available at interrupt time. 11081 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11082 * sd_create_taskq(). Since we do not want to sleep for allocations at 11083 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11084 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11085 * requests any one instant in time. 11086 */ 11087 #define SD_TASKQ_NUMTHREADS 8 11088 #define SD_TASKQ_MINALLOC 256 11089 #define SD_TASKQ_MAXALLOC 256 11090 11091 static taskq_t *sd_tq = NULL; 11092 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11093 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11094 11095 /* 11096 * The following task queue is being created for the write part of 11097 * read-modify-write of non-512 block size devices. 11098 * Limit the number of threads to 1 for now. This number has been choosen 11099 * considering the fact that it applies only to dvd ram drives/MO drives 11100 * currently. Performance for which is not main criteria at this stage. 11101 * Note: It needs to be explored if we can use a single taskq in future 11102 */ 11103 #define SD_WMR_TASKQ_NUMTHREADS 1 11104 static taskq_t *sd_wmr_tq = NULL; 11105 11106 /* 11107 * Function: sd_taskq_create 11108 * 11109 * Description: Create taskq thread(s) and preallocate task entries 11110 * 11111 * Return Code: Returns a pointer to the allocated taskq_t. 11112 * 11113 * Context: Can sleep. Requires blockable context. 11114 * 11115 * Notes: - The taskq() facility currently is NOT part of the DDI. 11116 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11117 * - taskq_create() will block for memory, also it will panic 11118 * if it cannot create the requested number of threads. 11119 * - Currently taskq_create() creates threads that cannot be 11120 * swapped. 11121 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11122 * supply of taskq entries at interrupt time (ie, so that we 11123 * do not have to sleep for memory) 11124 */ 11125 11126 static void 11127 sd_taskq_create(void) 11128 { 11129 char taskq_name[TASKQ_NAMELEN]; 11130 11131 ASSERT(sd_tq == NULL); 11132 ASSERT(sd_wmr_tq == NULL); 11133 11134 (void) snprintf(taskq_name, sizeof (taskq_name), 11135 "%s_drv_taskq", sd_label); 11136 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11137 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11138 TASKQ_PREPOPULATE)); 11139 11140 (void) snprintf(taskq_name, sizeof (taskq_name), 11141 "%s_rmw_taskq", sd_label); 11142 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11143 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11144 TASKQ_PREPOPULATE)); 11145 } 11146 11147 11148 /* 11149 * Function: sd_taskq_delete 11150 * 11151 * Description: Complementary cleanup routine for sd_taskq_create(). 11152 * 11153 * Context: Kernel thread context. 11154 */ 11155 11156 static void 11157 sd_taskq_delete(void) 11158 { 11159 ASSERT(sd_tq != NULL); 11160 ASSERT(sd_wmr_tq != NULL); 11161 taskq_destroy(sd_tq); 11162 taskq_destroy(sd_wmr_tq); 11163 sd_tq = NULL; 11164 sd_wmr_tq = NULL; 11165 } 11166 11167 11168 /* 11169 * Function: sdstrategy 11170 * 11171 * Description: Driver's strategy (9E) entry point function. 11172 * 11173 * Arguments: bp - pointer to buf(9S) 11174 * 11175 * Return Code: Always returns zero 11176 * 11177 * Context: Kernel thread context. 11178 */ 11179 11180 static int 11181 sdstrategy(struct buf *bp) 11182 { 11183 struct sd_lun *un; 11184 11185 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11186 if (un == NULL) { 11187 bioerror(bp, EIO); 11188 bp->b_resid = bp->b_bcount; 11189 biodone(bp); 11190 return (0); 11191 } 11192 /* As was done in the past, fail new cmds. if state is dumping. */ 11193 if (un->un_state == SD_STATE_DUMPING) { 11194 bioerror(bp, ENXIO); 11195 bp->b_resid = bp->b_bcount; 11196 biodone(bp); 11197 return (0); 11198 } 11199 11200 ASSERT(!mutex_owned(SD_MUTEX(un))); 11201 11202 /* 11203 * Commands may sneak in while we released the mutex in 11204 * DDI_SUSPEND, we should block new commands. However, old 11205 * commands that are still in the driver at this point should 11206 * still be allowed to drain. 11207 */ 11208 mutex_enter(SD_MUTEX(un)); 11209 /* 11210 * Must wait here if either the device is suspended or 11211 * if it's power level is changing. 11212 */ 11213 while ((un->un_state == SD_STATE_SUSPENDED) || 11214 (un->un_state == SD_STATE_PM_CHANGING)) { 11215 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11216 } 11217 11218 un->un_ncmds_in_driver++; 11219 11220 /* 11221 * atapi: Since we are running the CD for now in PIO mode we need to 11222 * call bp_mapin here to avoid bp_mapin called interrupt context under 11223 * the HBA's init_pkt routine. 11224 */ 11225 if (un->un_f_cfg_is_atapi == TRUE) { 11226 mutex_exit(SD_MUTEX(un)); 11227 bp_mapin(bp); 11228 mutex_enter(SD_MUTEX(un)); 11229 } 11230 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11231 un->un_ncmds_in_driver); 11232 11233 mutex_exit(SD_MUTEX(un)); 11234 11235 /* 11236 * This will (eventually) allocate the sd_xbuf area and 11237 * call sd_xbuf_strategy(). We just want to return the 11238 * result of ddi_xbuf_qstrategy so that we have an opt- 11239 * imized tail call which saves us a stack frame. 11240 */ 11241 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11242 } 11243 11244 11245 /* 11246 * Function: sd_xbuf_strategy 11247 * 11248 * Description: Function for initiating IO operations via the 11249 * ddi_xbuf_qstrategy() mechanism. 11250 * 11251 * Context: Kernel thread context. 11252 */ 11253 11254 static void 11255 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11256 { 11257 struct sd_lun *un = arg; 11258 11259 ASSERT(bp != NULL); 11260 ASSERT(xp != NULL); 11261 ASSERT(un != NULL); 11262 ASSERT(!mutex_owned(SD_MUTEX(un))); 11263 11264 /* 11265 * Initialize the fields in the xbuf and save a pointer to the 11266 * xbuf in bp->b_private. 11267 */ 11268 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11269 11270 /* Send the buf down the iostart chain */ 11271 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11272 } 11273 11274 11275 /* 11276 * Function: sd_xbuf_init 11277 * 11278 * Description: Prepare the given sd_xbuf struct for use. 11279 * 11280 * Arguments: un - ptr to softstate 11281 * bp - ptr to associated buf(9S) 11282 * xp - ptr to associated sd_xbuf 11283 * chain_type - IO chain type to use: 11284 * SD_CHAIN_NULL 11285 * SD_CHAIN_BUFIO 11286 * SD_CHAIN_USCSI 11287 * SD_CHAIN_DIRECT 11288 * SD_CHAIN_DIRECT_PRIORITY 11289 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11290 * initialization; may be NULL if none. 11291 * 11292 * Context: Kernel thread context 11293 */ 11294 11295 static void 11296 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11297 uchar_t chain_type, void *pktinfop) 11298 { 11299 int index; 11300 11301 ASSERT(un != NULL); 11302 ASSERT(bp != NULL); 11303 ASSERT(xp != NULL); 11304 11305 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11306 bp, chain_type); 11307 11308 xp->xb_un = un; 11309 xp->xb_pktp = NULL; 11310 xp->xb_pktinfo = pktinfop; 11311 xp->xb_private = bp->b_private; 11312 xp->xb_blkno = (daddr_t)bp->b_blkno; 11313 11314 /* 11315 * Set up the iostart and iodone chain indexes in the xbuf, based 11316 * upon the specified chain type to use. 11317 */ 11318 switch (chain_type) { 11319 case SD_CHAIN_NULL: 11320 /* 11321 * Fall thru to just use the values for the buf type, even 11322 * tho for the NULL chain these values will never be used. 11323 */ 11324 /* FALLTHRU */ 11325 case SD_CHAIN_BUFIO: 11326 index = un->un_buf_chain_type; 11327 break; 11328 case SD_CHAIN_USCSI: 11329 index = un->un_uscsi_chain_type; 11330 break; 11331 case SD_CHAIN_DIRECT: 11332 index = un->un_direct_chain_type; 11333 break; 11334 case SD_CHAIN_DIRECT_PRIORITY: 11335 index = un->un_priority_chain_type; 11336 break; 11337 default: 11338 /* We're really broken if we ever get here... */ 11339 panic("sd_xbuf_init: illegal chain type!"); 11340 /*NOTREACHED*/ 11341 } 11342 11343 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11344 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11345 11346 /* 11347 * It might be a bit easier to simply bzero the entire xbuf above, 11348 * but it turns out that since we init a fair number of members anyway, 11349 * we save a fair number cycles by doing explicit assignment of zero. 11350 */ 11351 xp->xb_pkt_flags = 0; 11352 xp->xb_dma_resid = 0; 11353 xp->xb_retry_count = 0; 11354 xp->xb_victim_retry_count = 0; 11355 xp->xb_ua_retry_count = 0; 11356 xp->xb_sense_bp = NULL; 11357 xp->xb_sense_status = 0; 11358 xp->xb_sense_state = 0; 11359 xp->xb_sense_resid = 0; 11360 11361 bp->b_private = xp; 11362 bp->b_flags &= ~(B_DONE | B_ERROR); 11363 bp->b_resid = 0; 11364 bp->av_forw = NULL; 11365 bp->av_back = NULL; 11366 bioerror(bp, 0); 11367 11368 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11369 } 11370 11371 11372 /* 11373 * Function: sd_uscsi_strategy 11374 * 11375 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11376 * 11377 * Arguments: bp - buf struct ptr 11378 * 11379 * Return Code: Always returns 0 11380 * 11381 * Context: Kernel thread context 11382 */ 11383 11384 static int 11385 sd_uscsi_strategy(struct buf *bp) 11386 { 11387 struct sd_lun *un; 11388 struct sd_uscsi_info *uip; 11389 struct sd_xbuf *xp; 11390 uchar_t chain_type; 11391 11392 ASSERT(bp != NULL); 11393 11394 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11395 if (un == NULL) { 11396 bioerror(bp, EIO); 11397 bp->b_resid = bp->b_bcount; 11398 biodone(bp); 11399 return (0); 11400 } 11401 11402 ASSERT(!mutex_owned(SD_MUTEX(un))); 11403 11404 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11405 11406 mutex_enter(SD_MUTEX(un)); 11407 /* 11408 * atapi: Since we are running the CD for now in PIO mode we need to 11409 * call bp_mapin here to avoid bp_mapin called interrupt context under 11410 * the HBA's init_pkt routine. 11411 */ 11412 if (un->un_f_cfg_is_atapi == TRUE) { 11413 mutex_exit(SD_MUTEX(un)); 11414 bp_mapin(bp); 11415 mutex_enter(SD_MUTEX(un)); 11416 } 11417 un->un_ncmds_in_driver++; 11418 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11419 un->un_ncmds_in_driver); 11420 mutex_exit(SD_MUTEX(un)); 11421 11422 /* 11423 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11424 */ 11425 ASSERT(bp->b_private != NULL); 11426 uip = (struct sd_uscsi_info *)bp->b_private; 11427 11428 switch (uip->ui_flags) { 11429 case SD_PATH_DIRECT: 11430 chain_type = SD_CHAIN_DIRECT; 11431 break; 11432 case SD_PATH_DIRECT_PRIORITY: 11433 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11434 break; 11435 default: 11436 chain_type = SD_CHAIN_USCSI; 11437 break; 11438 } 11439 11440 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 11441 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11442 11443 /* Use the index obtained within xbuf_init */ 11444 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11445 11446 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11447 11448 return (0); 11449 } 11450 11451 11452 /* 11453 * These routines perform raw i/o operations. 11454 */ 11455 /*ARGSUSED*/ 11456 static void 11457 sduscsimin(struct buf *bp) 11458 { 11459 /* 11460 * do not break up because the CDB count would then 11461 * be incorrect and data underruns would result (incomplete 11462 * read/writes which would be retried and then failed, see 11463 * sdintr(). 11464 */ 11465 } 11466 11467 11468 11469 /* 11470 * Function: sd_send_scsi_cmd 11471 * 11472 * Description: Runs a USCSI command for user (when called thru sdioctl), 11473 * or for the driver 11474 * 11475 * Arguments: dev - the dev_t for the device 11476 * incmd - ptr to a valid uscsi_cmd struct 11477 * cdbspace - UIO_USERSPACE or UIO_SYSSPACE 11478 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11479 * rqbufspace - UIO_USERSPACE or UIO_SYSSPACE 11480 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11481 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11482 * to use the USCSI "direct" chain and bypass the normal 11483 * command waitq. 11484 * 11485 * Return Code: 0 - successful completion of the given command 11486 * EIO - scsi_reset() failed, or see biowait()/physio() codes. 11487 * ENXIO - soft state not found for specified dev 11488 * EINVAL 11489 * EFAULT - copyin/copyout error 11490 * return code of biowait(9F) or physio(9F): 11491 * EIO - IO error, caller may check incmd->uscsi_status 11492 * ENXIO 11493 * EACCES - reservation conflict 11494 * 11495 * Context: Waits for command to complete. Can sleep. 11496 */ 11497 11498 static int 11499 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 11500 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 11501 int path_flag) 11502 { 11503 struct sd_uscsi_info *uip; 11504 struct uscsi_cmd *uscmd; 11505 struct sd_lun *un; 11506 struct buf *bp; 11507 int rval; 11508 int flags; 11509 11510 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11511 if (un == NULL) { 11512 return (ENXIO); 11513 } 11514 11515 ASSERT(!mutex_owned(SD_MUTEX(un))); 11516 11517 #ifdef SDDEBUG 11518 switch (dataspace) { 11519 case UIO_USERSPACE: 11520 SD_TRACE(SD_LOG_IO, un, 11521 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 11522 break; 11523 case UIO_SYSSPACE: 11524 SD_TRACE(SD_LOG_IO, un, 11525 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 11526 break; 11527 default: 11528 SD_TRACE(SD_LOG_IO, un, 11529 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 11530 break; 11531 } 11532 #endif 11533 11534 /* 11535 * Perform resets directly; no need to generate a command to do it. 11536 */ 11537 if (incmd->uscsi_flags & (USCSI_RESET | USCSI_RESET_ALL)) { 11538 flags = ((incmd->uscsi_flags & USCSI_RESET_ALL) != 0) ? 11539 RESET_ALL : RESET_TARGET; 11540 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: Issuing reset\n"); 11541 if (scsi_reset(SD_ADDRESS(un), flags) == 0) { 11542 /* Reset attempt was unsuccessful */ 11543 SD_TRACE(SD_LOG_IO, un, 11544 "sd_send_scsi_cmd: reset: failure\n"); 11545 return (EIO); 11546 } 11547 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: reset: success\n"); 11548 return (0); 11549 } 11550 11551 /* Perfunctory sanity check... */ 11552 if (incmd->uscsi_cdblen <= 0) { 11553 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11554 "invalid uscsi_cdblen, returning EINVAL\n"); 11555 return (EINVAL); 11556 } 11557 11558 /* 11559 * In order to not worry about where the uscsi structure came from 11560 * (or where the cdb it points to came from) we're going to make 11561 * kmem_alloc'd copies of them here. This will also allow reference 11562 * to the data they contain long after this process has gone to 11563 * sleep and its kernel stack has been unmapped, etc. 11564 * 11565 * First get some memory for the uscsi_cmd struct and copy the 11566 * contents of the given uscsi_cmd struct into it. 11567 */ 11568 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 11569 bcopy(incmd, uscmd, sizeof (struct uscsi_cmd)); 11570 11571 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: uscsi_cmd", 11572 (uchar_t *)uscmd, sizeof (struct uscsi_cmd), SD_LOG_HEX); 11573 11574 /* 11575 * Now get some space for the CDB, and copy the given CDB into 11576 * it. Use ddi_copyin() in case the data is in user space. 11577 */ 11578 uscmd->uscsi_cdb = kmem_zalloc((size_t)incmd->uscsi_cdblen, KM_SLEEP); 11579 flags = (cdbspace == UIO_SYSSPACE) ? FKIOCTL : 0; 11580 if (ddi_copyin(incmd->uscsi_cdb, uscmd->uscsi_cdb, 11581 (uint_t)incmd->uscsi_cdblen, flags) != 0) { 11582 kmem_free(uscmd->uscsi_cdb, (size_t)incmd->uscsi_cdblen); 11583 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11584 return (EFAULT); 11585 } 11586 11587 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: CDB", 11588 (uchar_t *)uscmd->uscsi_cdb, incmd->uscsi_cdblen, SD_LOG_HEX); 11589 11590 bp = getrbuf(KM_SLEEP); 11591 11592 /* 11593 * Allocate an sd_uscsi_info struct and fill it with the info 11594 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11595 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11596 * since we allocate the buf here in this function, we do not 11597 * need to preserve the prior contents of b_private. 11598 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11599 */ 11600 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11601 uip->ui_flags = path_flag; 11602 uip->ui_cmdp = uscmd; 11603 bp->b_private = uip; 11604 11605 /* 11606 * Initialize Request Sense buffering, if requested. 11607 */ 11608 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11609 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11610 /* 11611 * Here uscmd->uscsi_rqbuf currently points to the caller's 11612 * buffer, but we replace this with a kernel buffer that 11613 * we allocate to use with the sense data. The sense data 11614 * (if present) gets copied into this new buffer before the 11615 * command is completed. Then we copy the sense data from 11616 * our allocated buf into the caller's buffer below. Note 11617 * that incmd->uscsi_rqbuf and incmd->uscsi_rqlen are used 11618 * below to perform the copy back to the caller's buf. 11619 */ 11620 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 11621 if (rqbufspace == UIO_USERSPACE) { 11622 uscmd->uscsi_rqlen = SENSE_LENGTH; 11623 uscmd->uscsi_rqresid = SENSE_LENGTH; 11624 } else { 11625 uchar_t rlen = min(SENSE_LENGTH, uscmd->uscsi_rqlen); 11626 uscmd->uscsi_rqlen = rlen; 11627 uscmd->uscsi_rqresid = rlen; 11628 } 11629 } else { 11630 uscmd->uscsi_rqbuf = NULL; 11631 uscmd->uscsi_rqlen = 0; 11632 uscmd->uscsi_rqresid = 0; 11633 } 11634 11635 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: rqbuf:0x%p rqlen:%d\n", 11636 uscmd->uscsi_rqbuf, uscmd->uscsi_rqlen); 11637 11638 if (un->un_f_is_fibre == FALSE) { 11639 /* 11640 * Force asynchronous mode, if necessary. Doing this here 11641 * has the unfortunate effect of running other queued 11642 * commands async also, but since the main purpose of this 11643 * capability is downloading new drive firmware, we can 11644 * probably live with it. 11645 */ 11646 if ((uscmd->uscsi_flags & USCSI_ASYNC) != 0) { 11647 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11648 == 1) { 11649 if (scsi_ifsetcap(SD_ADDRESS(un), 11650 "synchronous", 0, 1) == 1) { 11651 SD_TRACE(SD_LOG_IO, un, 11652 "sd_send_scsi_cmd: forced async ok\n"); 11653 } else { 11654 SD_TRACE(SD_LOG_IO, un, 11655 "sd_send_scsi_cmd:\ 11656 forced async failed\n"); 11657 rval = EINVAL; 11658 goto done; 11659 } 11660 } 11661 } 11662 11663 /* 11664 * Re-enable synchronous mode, if requested 11665 */ 11666 if (uscmd->uscsi_flags & USCSI_SYNC) { 11667 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11668 == 0) { 11669 int i = scsi_ifsetcap(SD_ADDRESS(un), 11670 "synchronous", 1, 1); 11671 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11672 "re-enabled sync %s\n", 11673 (i == 1) ? "ok" : "failed"); 11674 } 11675 } 11676 } 11677 11678 /* 11679 * Commands sent with priority are intended for error recovery 11680 * situations, and do not have retries performed. 11681 */ 11682 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11683 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11684 } 11685 11686 /* 11687 * If we're going to do actual I/O, let physio do all the right things 11688 */ 11689 if (uscmd->uscsi_buflen != 0) { 11690 struct iovec aiov; 11691 struct uio auio; 11692 struct uio *uio = &auio; 11693 11694 bzero(&auio, sizeof (struct uio)); 11695 bzero(&aiov, sizeof (struct iovec)); 11696 aiov.iov_base = uscmd->uscsi_bufaddr; 11697 aiov.iov_len = uscmd->uscsi_buflen; 11698 uio->uio_iov = &aiov; 11699 11700 uio->uio_iovcnt = 1; 11701 uio->uio_resid = uscmd->uscsi_buflen; 11702 uio->uio_segflg = dataspace; 11703 11704 /* 11705 * physio() will block here until the command completes.... 11706 */ 11707 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling physio.\n"); 11708 11709 rval = physio(sd_uscsi_strategy, bp, dev, 11710 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE), 11711 sduscsimin, uio); 11712 11713 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11714 "returned from physio with 0x%x\n", rval); 11715 11716 } else { 11717 /* 11718 * We have to mimic what physio would do here! Argh! 11719 */ 11720 bp->b_flags = B_BUSY | 11721 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE); 11722 bp->b_edev = dev; 11723 bp->b_dev = cmpdev(dev); /* maybe unnecessary? */ 11724 bp->b_bcount = 0; 11725 bp->b_blkno = 0; 11726 11727 SD_TRACE(SD_LOG_IO, un, 11728 "sd_send_scsi_cmd: calling sd_uscsi_strategy...\n"); 11729 11730 (void) sd_uscsi_strategy(bp); 11731 11732 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling biowait\n"); 11733 11734 rval = biowait(bp); 11735 11736 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11737 "returned from biowait with 0x%x\n", rval); 11738 } 11739 11740 done: 11741 11742 #ifdef SDDEBUG 11743 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11744 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11745 uscmd->uscsi_status, uscmd->uscsi_resid); 11746 if (uscmd->uscsi_bufaddr != NULL) { 11747 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11748 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11749 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11750 if (dataspace == UIO_SYSSPACE) { 11751 SD_DUMP_MEMORY(un, SD_LOG_IO, 11752 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11753 uscmd->uscsi_buflen, SD_LOG_HEX); 11754 } 11755 } 11756 #endif 11757 11758 /* 11759 * Get the status and residual to return to the caller. 11760 */ 11761 incmd->uscsi_status = uscmd->uscsi_status; 11762 incmd->uscsi_resid = uscmd->uscsi_resid; 11763 11764 /* 11765 * If the caller wants sense data, copy back whatever sense data 11766 * we may have gotten, and update the relevant rqsense info. 11767 */ 11768 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11769 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11770 11771 int rqlen = uscmd->uscsi_rqlen - uscmd->uscsi_rqresid; 11772 rqlen = min(((int)incmd->uscsi_rqlen), rqlen); 11773 11774 /* Update the Request Sense status and resid */ 11775 incmd->uscsi_rqresid = incmd->uscsi_rqlen - rqlen; 11776 incmd->uscsi_rqstatus = uscmd->uscsi_rqstatus; 11777 11778 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11779 "uscsi_rqstatus: 0x%02x uscsi_rqresid:0x%x\n", 11780 incmd->uscsi_rqstatus, incmd->uscsi_rqresid); 11781 11782 /* Copy out the sense data for user processes */ 11783 if ((incmd->uscsi_rqbuf != NULL) && (rqlen != 0)) { 11784 int flags = 11785 (rqbufspace == UIO_USERSPACE) ? 0 : FKIOCTL; 11786 if (ddi_copyout(uscmd->uscsi_rqbuf, incmd->uscsi_rqbuf, 11787 rqlen, flags) != 0) { 11788 rval = EFAULT; 11789 } 11790 /* 11791 * Note: Can't touch incmd->uscsi_rqbuf so use 11792 * uscmd->uscsi_rqbuf instead. They're the same. 11793 */ 11794 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11795 "incmd->uscsi_rqbuf: 0x%p rqlen:%d\n", 11796 incmd->uscsi_rqbuf, rqlen); 11797 SD_DUMP_MEMORY(un, SD_LOG_IO, "rq", 11798 (uchar_t *)uscmd->uscsi_rqbuf, rqlen, SD_LOG_HEX); 11799 } 11800 } 11801 11802 /* 11803 * Free allocated resources and return; mapout the buf in case it was 11804 * mapped in by a lower layer. 11805 */ 11806 bp_mapout(bp); 11807 freerbuf(bp); 11808 kmem_free(uip, sizeof (struct sd_uscsi_info)); 11809 if (uscmd->uscsi_rqbuf != NULL) { 11810 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 11811 } 11812 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 11813 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11814 11815 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: exit\n"); 11816 11817 return (rval); 11818 } 11819 11820 11821 /* 11822 * Function: sd_buf_iodone 11823 * 11824 * Description: Frees the sd_xbuf & returns the buf to its originator. 11825 * 11826 * Context: May be called from interrupt context. 11827 */ 11828 /* ARGSUSED */ 11829 static void 11830 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11831 { 11832 struct sd_xbuf *xp; 11833 11834 ASSERT(un != NULL); 11835 ASSERT(bp != NULL); 11836 ASSERT(!mutex_owned(SD_MUTEX(un))); 11837 11838 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11839 11840 xp = SD_GET_XBUF(bp); 11841 ASSERT(xp != NULL); 11842 11843 mutex_enter(SD_MUTEX(un)); 11844 11845 /* 11846 * Grab time when the cmd completed. 11847 * This is used for determining if the system has been 11848 * idle long enough to make it idle to the PM framework. 11849 * This is for lowering the overhead, and therefore improving 11850 * performance per I/O operation. 11851 */ 11852 un->un_pm_idle_time = ddi_get_time(); 11853 11854 un->un_ncmds_in_driver--; 11855 ASSERT(un->un_ncmds_in_driver >= 0); 11856 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11857 un->un_ncmds_in_driver); 11858 11859 mutex_exit(SD_MUTEX(un)); 11860 11861 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11862 biodone(bp); /* bp is gone after this */ 11863 11864 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11865 } 11866 11867 11868 /* 11869 * Function: sd_uscsi_iodone 11870 * 11871 * Description: Frees the sd_xbuf & returns the buf to its originator. 11872 * 11873 * Context: May be called from interrupt context. 11874 */ 11875 /* ARGSUSED */ 11876 static void 11877 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11878 { 11879 struct sd_xbuf *xp; 11880 11881 ASSERT(un != NULL); 11882 ASSERT(bp != NULL); 11883 11884 xp = SD_GET_XBUF(bp); 11885 ASSERT(xp != NULL); 11886 ASSERT(!mutex_owned(SD_MUTEX(un))); 11887 11888 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11889 11890 mutex_enter(SD_MUTEX(un)); 11891 11892 /* 11893 * Grab time when the cmd completed. 11894 * This is used for determining if the system has been 11895 * idle long enough to make it idle to the PM framework. 11896 * This is for lowering the overhead, and therefore improving 11897 * performance per I/O operation. 11898 */ 11899 un->un_pm_idle_time = ddi_get_time(); 11900 11901 un->un_ncmds_in_driver--; 11902 ASSERT(un->un_ncmds_in_driver >= 0); 11903 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11904 un->un_ncmds_in_driver); 11905 11906 mutex_exit(SD_MUTEX(un)); 11907 11908 kmem_free(xp, sizeof (struct sd_xbuf)); 11909 biodone(bp); 11910 11911 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11912 } 11913 11914 11915 /* 11916 * Function: sd_mapblockaddr_iostart 11917 * 11918 * Description: Verify request lies withing the partition limits for 11919 * the indicated minor device. Issue "overrun" buf if 11920 * request would exceed partition range. Converts 11921 * partition-relative block address to absolute. 11922 * 11923 * Context: Can sleep 11924 * 11925 * Issues: This follows what the old code did, in terms of accessing 11926 * some of the partition info in the unit struct without holding 11927 * the mutext. This is a general issue, if the partition info 11928 * can be altered while IO is in progress... as soon as we send 11929 * a buf, its partitioning can be invalid before it gets to the 11930 * device. Probably the right fix is to move partitioning out 11931 * of the driver entirely. 11932 */ 11933 11934 static void 11935 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11936 { 11937 daddr_t nblocks; /* #blocks in the given partition */ 11938 daddr_t blocknum; /* Block number specified by the buf */ 11939 size_t requested_nblocks; 11940 size_t available_nblocks; 11941 int partition; 11942 diskaddr_t partition_offset; 11943 struct sd_xbuf *xp; 11944 11945 11946 ASSERT(un != NULL); 11947 ASSERT(bp != NULL); 11948 ASSERT(!mutex_owned(SD_MUTEX(un))); 11949 11950 SD_TRACE(SD_LOG_IO_PARTITION, un, 11951 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11952 11953 xp = SD_GET_XBUF(bp); 11954 ASSERT(xp != NULL); 11955 11956 /* 11957 * If the geometry is not indicated as valid, attempt to access 11958 * the unit & verify the geometry/label. This can be the case for 11959 * removable-media devices, of if the device was opened in 11960 * NDELAY/NONBLOCK mode. 11961 */ 11962 if ((un->un_f_geometry_is_valid != TRUE) && 11963 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11964 /* 11965 * For removable devices it is possible to start an I/O 11966 * without a media by opening the device in nodelay mode. 11967 * Also for writable CDs there can be many scenarios where 11968 * there is no geometry yet but volume manager is trying to 11969 * issue a read() just because it can see TOC on the CD. So 11970 * do not print a message for removables. 11971 */ 11972 if (!ISREMOVABLE(un)) { 11973 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11974 "i/o to invalid geometry\n"); 11975 } 11976 bioerror(bp, EIO); 11977 bp->b_resid = bp->b_bcount; 11978 SD_BEGIN_IODONE(index, un, bp); 11979 return; 11980 } 11981 11982 partition = SDPART(bp->b_edev); 11983 11984 /* #blocks in partition */ 11985 nblocks = un->un_map[partition].dkl_nblk; /* #blocks in partition */ 11986 11987 /* Use of a local variable potentially improves performance slightly */ 11988 partition_offset = un->un_offset[partition]; 11989 11990 /* 11991 * blocknum is the starting block number of the request. At this 11992 * point it is still relative to the start of the minor device. 11993 */ 11994 blocknum = xp->xb_blkno; 11995 11996 /* 11997 * Legacy: If the starting block number is one past the last block 11998 * in the partition, do not set B_ERROR in the buf. 11999 */ 12000 if (blocknum == nblocks) { 12001 goto error_exit; 12002 } 12003 12004 /* 12005 * Confirm that the first block of the request lies within the 12006 * partition limits. Also the requested number of bytes must be 12007 * a multiple of the system block size. 12008 */ 12009 if ((blocknum < 0) || (blocknum >= nblocks) || 12010 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12011 bp->b_flags |= B_ERROR; 12012 goto error_exit; 12013 } 12014 12015 /* 12016 * If the requsted # blocks exceeds the available # blocks, that 12017 * is an overrun of the partition. 12018 */ 12019 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12020 available_nblocks = (size_t)(nblocks - blocknum); 12021 ASSERT(nblocks >= blocknum); 12022 12023 if (requested_nblocks > available_nblocks) { 12024 /* 12025 * Allocate an "overrun" buf to allow the request to proceed 12026 * for the amount of space available in the partition. The 12027 * amount not transferred will be added into the b_resid 12028 * when the operation is complete. The overrun buf 12029 * replaces the original buf here, and the original buf 12030 * is saved inside the overrun buf, for later use. 12031 */ 12032 size_t resid = SD_SYSBLOCKS2BYTES(un, 12033 (offset_t)(requested_nblocks - available_nblocks)); 12034 size_t count = bp->b_bcount - resid; 12035 /* 12036 * Note: count is an unsigned entity thus it'll NEVER 12037 * be less than 0 so ASSERT the original values are 12038 * correct. 12039 */ 12040 ASSERT(bp->b_bcount >= resid); 12041 12042 bp = sd_bioclone_alloc(bp, count, blocknum, 12043 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12044 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12045 ASSERT(xp != NULL); 12046 } 12047 12048 /* At this point there should be no residual for this buf. */ 12049 ASSERT(bp->b_resid == 0); 12050 12051 /* Convert the block number to an absolute address. */ 12052 xp->xb_blkno += partition_offset; 12053 12054 SD_NEXT_IOSTART(index, un, bp); 12055 12056 SD_TRACE(SD_LOG_IO_PARTITION, un, 12057 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12058 12059 return; 12060 12061 error_exit: 12062 bp->b_resid = bp->b_bcount; 12063 SD_BEGIN_IODONE(index, un, bp); 12064 SD_TRACE(SD_LOG_IO_PARTITION, un, 12065 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12066 } 12067 12068 12069 /* 12070 * Function: sd_mapblockaddr_iodone 12071 * 12072 * Description: Completion-side processing for partition management. 12073 * 12074 * Context: May be called under interrupt context 12075 */ 12076 12077 static void 12078 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12079 { 12080 /* int partition; */ /* Not used, see below. */ 12081 ASSERT(un != NULL); 12082 ASSERT(bp != NULL); 12083 ASSERT(!mutex_owned(SD_MUTEX(un))); 12084 12085 SD_TRACE(SD_LOG_IO_PARTITION, un, 12086 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12087 12088 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12089 /* 12090 * We have an "overrun" buf to deal with... 12091 */ 12092 struct sd_xbuf *xp; 12093 struct buf *obp; /* ptr to the original buf */ 12094 12095 xp = SD_GET_XBUF(bp); 12096 ASSERT(xp != NULL); 12097 12098 /* Retrieve the pointer to the original buf */ 12099 obp = (struct buf *)xp->xb_private; 12100 ASSERT(obp != NULL); 12101 12102 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12103 bioerror(obp, bp->b_error); 12104 12105 sd_bioclone_free(bp); 12106 12107 /* 12108 * Get back the original buf. 12109 * Note that since the restoration of xb_blkno below 12110 * was removed, the sd_xbuf is not needed. 12111 */ 12112 bp = obp; 12113 /* 12114 * xp = SD_GET_XBUF(bp); 12115 * ASSERT(xp != NULL); 12116 */ 12117 } 12118 12119 /* 12120 * Convert sd->xb_blkno back to a minor-device relative value. 12121 * Note: this has been commented out, as it is not needed in the 12122 * current implementation of the driver (ie, since this function 12123 * is at the top of the layering chains, so the info will be 12124 * discarded) and it is in the "hot" IO path. 12125 * 12126 * partition = getminor(bp->b_edev) & SDPART_MASK; 12127 * xp->xb_blkno -= un->un_offset[partition]; 12128 */ 12129 12130 SD_NEXT_IODONE(index, un, bp); 12131 12132 SD_TRACE(SD_LOG_IO_PARTITION, un, 12133 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12134 } 12135 12136 12137 /* 12138 * Function: sd_mapblocksize_iostart 12139 * 12140 * Description: Convert between system block size (un->un_sys_blocksize) 12141 * and target block size (un->un_tgt_blocksize). 12142 * 12143 * Context: Can sleep to allocate resources. 12144 * 12145 * Assumptions: A higher layer has already performed any partition validation, 12146 * and converted the xp->xb_blkno to an absolute value relative 12147 * to the start of the device. 12148 * 12149 * It is also assumed that the higher layer has implemented 12150 * an "overrun" mechanism for the case where the request would 12151 * read/write beyond the end of a partition. In this case we 12152 * assume (and ASSERT) that bp->b_resid == 0. 12153 * 12154 * Note: The implementation for this routine assumes the target 12155 * block size remains constant between allocation and transport. 12156 */ 12157 12158 static void 12159 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12160 { 12161 struct sd_mapblocksize_info *bsp; 12162 struct sd_xbuf *xp; 12163 offset_t first_byte; 12164 daddr_t start_block, end_block; 12165 daddr_t request_bytes; 12166 ushort_t is_aligned = FALSE; 12167 12168 ASSERT(un != NULL); 12169 ASSERT(bp != NULL); 12170 ASSERT(!mutex_owned(SD_MUTEX(un))); 12171 ASSERT(bp->b_resid == 0); 12172 12173 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12174 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12175 12176 /* 12177 * For a non-writable CD, a write request is an error 12178 */ 12179 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12180 (un->un_f_mmc_writable_media == FALSE)) { 12181 bioerror(bp, EIO); 12182 bp->b_resid = bp->b_bcount; 12183 SD_BEGIN_IODONE(index, un, bp); 12184 return; 12185 } 12186 12187 /* 12188 * We do not need a shadow buf if the device is using 12189 * un->un_sys_blocksize as its block size or if bcount == 0. 12190 * In this case there is no layer-private data block allocated. 12191 */ 12192 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12193 (bp->b_bcount == 0)) { 12194 goto done; 12195 } 12196 12197 #if defined(__i386) || defined(__amd64) 12198 /* We do not support non-block-aligned transfers for ROD devices */ 12199 ASSERT(!ISROD(un)); 12200 #endif 12201 12202 xp = SD_GET_XBUF(bp); 12203 ASSERT(xp != NULL); 12204 12205 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12206 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12207 un->un_tgt_blocksize, un->un_sys_blocksize); 12208 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12209 "request start block:0x%x\n", xp->xb_blkno); 12210 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12211 "request len:0x%x\n", bp->b_bcount); 12212 12213 /* 12214 * Allocate the layer-private data area for the mapblocksize layer. 12215 * Layers are allowed to use the xp_private member of the sd_xbuf 12216 * struct to store the pointer to their layer-private data block, but 12217 * each layer also has the responsibility of restoring the prior 12218 * contents of xb_private before returning the buf/xbuf to the 12219 * higher layer that sent it. 12220 * 12221 * Here we save the prior contents of xp->xb_private into the 12222 * bsp->mbs_oprivate field of our layer-private data area. This value 12223 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12224 * the layer-private area and returning the buf/xbuf to the layer 12225 * that sent it. 12226 * 12227 * Note that here we use kmem_zalloc for the allocation as there are 12228 * parts of the mapblocksize code that expect certain fields to be 12229 * zero unless explicitly set to a required value. 12230 */ 12231 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12232 bsp->mbs_oprivate = xp->xb_private; 12233 xp->xb_private = bsp; 12234 12235 /* 12236 * This treats the data on the disk (target) as an array of bytes. 12237 * first_byte is the byte offset, from the beginning of the device, 12238 * to the location of the request. This is converted from a 12239 * un->un_sys_blocksize block address to a byte offset, and then back 12240 * to a block address based upon a un->un_tgt_blocksize block size. 12241 * 12242 * xp->xb_blkno should be absolute upon entry into this function, 12243 * but, but it is based upon partitions that use the "system" 12244 * block size. It must be adjusted to reflect the block size of 12245 * the target. 12246 * 12247 * Note that end_block is actually the block that follows the last 12248 * block of the request, but that's what is needed for the computation. 12249 */ 12250 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12251 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12252 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12253 un->un_tgt_blocksize; 12254 12255 /* request_bytes is rounded up to a multiple of the target block size */ 12256 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12257 12258 /* 12259 * See if the starting address of the request and the request 12260 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12261 * then we do not need to allocate a shadow buf to handle the request. 12262 */ 12263 if (((first_byte % un->un_tgt_blocksize) == 0) && 12264 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12265 is_aligned = TRUE; 12266 } 12267 12268 if ((bp->b_flags & B_READ) == 0) { 12269 /* 12270 * Lock the range for a write operation. An aligned request is 12271 * considered a simple write; otherwise the request must be a 12272 * read-modify-write. 12273 */ 12274 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12275 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12276 } 12277 12278 /* 12279 * Alloc a shadow buf if the request is not aligned. Also, this is 12280 * where the READ command is generated for a read-modify-write. (The 12281 * write phase is deferred until after the read completes.) 12282 */ 12283 if (is_aligned == FALSE) { 12284 12285 struct sd_mapblocksize_info *shadow_bsp; 12286 struct sd_xbuf *shadow_xp; 12287 struct buf *shadow_bp; 12288 12289 /* 12290 * Allocate the shadow buf and it associated xbuf. Note that 12291 * after this call the xb_blkno value in both the original 12292 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12293 * same: absolute relative to the start of the device, and 12294 * adjusted for the target block size. The b_blkno in the 12295 * shadow buf will also be set to this value. We should never 12296 * change b_blkno in the original bp however. 12297 * 12298 * Note also that the shadow buf will always need to be a 12299 * READ command, regardless of whether the incoming command 12300 * is a READ or a WRITE. 12301 */ 12302 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12303 xp->xb_blkno, 12304 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12305 12306 shadow_xp = SD_GET_XBUF(shadow_bp); 12307 12308 /* 12309 * Allocate the layer-private data for the shadow buf. 12310 * (No need to preserve xb_private in the shadow xbuf.) 12311 */ 12312 shadow_xp->xb_private = shadow_bsp = 12313 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12314 12315 /* 12316 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12317 * to figure out where the start of the user data is (based upon 12318 * the system block size) in the data returned by the READ 12319 * command (which will be based upon the target blocksize). Note 12320 * that this is only really used if the request is unaligned. 12321 */ 12322 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12323 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12324 ASSERT((bsp->mbs_copy_offset >= 0) && 12325 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12326 12327 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12328 12329 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12330 12331 /* Transfer the wmap (if any) to the shadow buf */ 12332 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12333 bsp->mbs_wmp = NULL; 12334 12335 /* 12336 * The shadow buf goes on from here in place of the 12337 * original buf. 12338 */ 12339 shadow_bsp->mbs_orig_bp = bp; 12340 bp = shadow_bp; 12341 } 12342 12343 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12344 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12345 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12346 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12347 request_bytes); 12348 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12349 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12350 12351 done: 12352 SD_NEXT_IOSTART(index, un, bp); 12353 12354 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12355 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12356 } 12357 12358 12359 /* 12360 * Function: sd_mapblocksize_iodone 12361 * 12362 * Description: Completion side processing for block-size mapping. 12363 * 12364 * Context: May be called under interrupt context 12365 */ 12366 12367 static void 12368 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12369 { 12370 struct sd_mapblocksize_info *bsp; 12371 struct sd_xbuf *xp; 12372 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12373 struct buf *orig_bp; /* ptr to the original buf */ 12374 offset_t shadow_end; 12375 offset_t request_end; 12376 offset_t shadow_start; 12377 ssize_t copy_offset; 12378 size_t copy_length; 12379 size_t shortfall; 12380 uint_t is_write; /* TRUE if this bp is a WRITE */ 12381 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12382 12383 ASSERT(un != NULL); 12384 ASSERT(bp != NULL); 12385 12386 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12387 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12388 12389 /* 12390 * There is no shadow buf or layer-private data if the target is 12391 * using un->un_sys_blocksize as its block size or if bcount == 0. 12392 */ 12393 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12394 (bp->b_bcount == 0)) { 12395 goto exit; 12396 } 12397 12398 xp = SD_GET_XBUF(bp); 12399 ASSERT(xp != NULL); 12400 12401 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12402 bsp = xp->xb_private; 12403 12404 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12405 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12406 12407 if (is_write) { 12408 /* 12409 * For a WRITE request we must free up the block range that 12410 * we have locked up. This holds regardless of whether this is 12411 * an aligned write request or a read-modify-write request. 12412 */ 12413 sd_range_unlock(un, bsp->mbs_wmp); 12414 bsp->mbs_wmp = NULL; 12415 } 12416 12417 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12418 /* 12419 * An aligned read or write command will have no shadow buf; 12420 * there is not much else to do with it. 12421 */ 12422 goto done; 12423 } 12424 12425 orig_bp = bsp->mbs_orig_bp; 12426 ASSERT(orig_bp != NULL); 12427 orig_xp = SD_GET_XBUF(orig_bp); 12428 ASSERT(orig_xp != NULL); 12429 ASSERT(!mutex_owned(SD_MUTEX(un))); 12430 12431 if (!is_write && has_wmap) { 12432 /* 12433 * A READ with a wmap means this is the READ phase of a 12434 * read-modify-write. If an error occurred on the READ then 12435 * we do not proceed with the WRITE phase or copy any data. 12436 * Just release the write maps and return with an error. 12437 */ 12438 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12439 orig_bp->b_resid = orig_bp->b_bcount; 12440 bioerror(orig_bp, bp->b_error); 12441 sd_range_unlock(un, bsp->mbs_wmp); 12442 goto freebuf_done; 12443 } 12444 } 12445 12446 /* 12447 * Here is where we set up to copy the data from the shadow buf 12448 * into the space associated with the original buf. 12449 * 12450 * To deal with the conversion between block sizes, these 12451 * computations treat the data as an array of bytes, with the 12452 * first byte (byte 0) corresponding to the first byte in the 12453 * first block on the disk. 12454 */ 12455 12456 /* 12457 * shadow_start and shadow_len indicate the location and size of 12458 * the data returned with the shadow IO request. 12459 */ 12460 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12461 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12462 12463 /* 12464 * copy_offset gives the offset (in bytes) from the start of the first 12465 * block of the READ request to the beginning of the data. We retrieve 12466 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12467 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12468 * data to be copied (in bytes). 12469 */ 12470 copy_offset = bsp->mbs_copy_offset; 12471 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12472 copy_length = orig_bp->b_bcount; 12473 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12474 12475 /* 12476 * Set up the resid and error fields of orig_bp as appropriate. 12477 */ 12478 if (shadow_end >= request_end) { 12479 /* We got all the requested data; set resid to zero */ 12480 orig_bp->b_resid = 0; 12481 } else { 12482 /* 12483 * We failed to get enough data to fully satisfy the original 12484 * request. Just copy back whatever data we got and set 12485 * up the residual and error code as required. 12486 * 12487 * 'shortfall' is the amount by which the data received with the 12488 * shadow buf has "fallen short" of the requested amount. 12489 */ 12490 shortfall = (size_t)(request_end - shadow_end); 12491 12492 if (shortfall > orig_bp->b_bcount) { 12493 /* 12494 * We did not get enough data to even partially 12495 * fulfill the original request. The residual is 12496 * equal to the amount requested. 12497 */ 12498 orig_bp->b_resid = orig_bp->b_bcount; 12499 } else { 12500 /* 12501 * We did not get all the data that we requested 12502 * from the device, but we will try to return what 12503 * portion we did get. 12504 */ 12505 orig_bp->b_resid = shortfall; 12506 } 12507 ASSERT(copy_length >= orig_bp->b_resid); 12508 copy_length -= orig_bp->b_resid; 12509 } 12510 12511 /* Propagate the error code from the shadow buf to the original buf */ 12512 bioerror(orig_bp, bp->b_error); 12513 12514 if (is_write) { 12515 goto freebuf_done; /* No data copying for a WRITE */ 12516 } 12517 12518 if (has_wmap) { 12519 /* 12520 * This is a READ command from the READ phase of a 12521 * read-modify-write request. We have to copy the data given 12522 * by the user OVER the data returned by the READ command, 12523 * then convert the command from a READ to a WRITE and send 12524 * it back to the target. 12525 */ 12526 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12527 copy_length); 12528 12529 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12530 12531 /* 12532 * Dispatch the WRITE command to the taskq thread, which 12533 * will in turn send the command to the target. When the 12534 * WRITE command completes, we (sd_mapblocksize_iodone()) 12535 * will get called again as part of the iodone chain 12536 * processing for it. Note that we will still be dealing 12537 * with the shadow buf at that point. 12538 */ 12539 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12540 KM_NOSLEEP) != 0) { 12541 /* 12542 * Dispatch was successful so we are done. Return 12543 * without going any higher up the iodone chain. Do 12544 * not free up any layer-private data until after the 12545 * WRITE completes. 12546 */ 12547 return; 12548 } 12549 12550 /* 12551 * Dispatch of the WRITE command failed; set up the error 12552 * condition and send this IO back up the iodone chain. 12553 */ 12554 bioerror(orig_bp, EIO); 12555 orig_bp->b_resid = orig_bp->b_bcount; 12556 12557 } else { 12558 /* 12559 * This is a regular READ request (ie, not a RMW). Copy the 12560 * data from the shadow buf into the original buf. The 12561 * copy_offset compensates for any "misalignment" between the 12562 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12563 * original buf (with its un->un_sys_blocksize blocks). 12564 */ 12565 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12566 copy_length); 12567 } 12568 12569 freebuf_done: 12570 12571 /* 12572 * At this point we still have both the shadow buf AND the original 12573 * buf to deal with, as well as the layer-private data area in each. 12574 * Local variables are as follows: 12575 * 12576 * bp -- points to shadow buf 12577 * xp -- points to xbuf of shadow buf 12578 * bsp -- points to layer-private data area of shadow buf 12579 * orig_bp -- points to original buf 12580 * 12581 * First free the shadow buf and its associated xbuf, then free the 12582 * layer-private data area from the shadow buf. There is no need to 12583 * restore xb_private in the shadow xbuf. 12584 */ 12585 sd_shadow_buf_free(bp); 12586 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12587 12588 /* 12589 * Now update the local variables to point to the original buf, xbuf, 12590 * and layer-private area. 12591 */ 12592 bp = orig_bp; 12593 xp = SD_GET_XBUF(bp); 12594 ASSERT(xp != NULL); 12595 ASSERT(xp == orig_xp); 12596 bsp = xp->xb_private; 12597 ASSERT(bsp != NULL); 12598 12599 done: 12600 /* 12601 * Restore xb_private to whatever it was set to by the next higher 12602 * layer in the chain, then free the layer-private data area. 12603 */ 12604 xp->xb_private = bsp->mbs_oprivate; 12605 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12606 12607 exit: 12608 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12609 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12610 12611 SD_NEXT_IODONE(index, un, bp); 12612 } 12613 12614 12615 /* 12616 * Function: sd_checksum_iostart 12617 * 12618 * Description: A stub function for a layer that's currently not used. 12619 * For now just a placeholder. 12620 * 12621 * Context: Kernel thread context 12622 */ 12623 12624 static void 12625 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12626 { 12627 ASSERT(un != NULL); 12628 ASSERT(bp != NULL); 12629 ASSERT(!mutex_owned(SD_MUTEX(un))); 12630 SD_NEXT_IOSTART(index, un, bp); 12631 } 12632 12633 12634 /* 12635 * Function: sd_checksum_iodone 12636 * 12637 * Description: A stub function for a layer that's currently not used. 12638 * For now just a placeholder. 12639 * 12640 * Context: May be called under interrupt context 12641 */ 12642 12643 static void 12644 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12645 { 12646 ASSERT(un != NULL); 12647 ASSERT(bp != NULL); 12648 ASSERT(!mutex_owned(SD_MUTEX(un))); 12649 SD_NEXT_IODONE(index, un, bp); 12650 } 12651 12652 12653 /* 12654 * Function: sd_checksum_uscsi_iostart 12655 * 12656 * Description: A stub function for a layer that's currently not used. 12657 * For now just a placeholder. 12658 * 12659 * Context: Kernel thread context 12660 */ 12661 12662 static void 12663 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12664 { 12665 ASSERT(un != NULL); 12666 ASSERT(bp != NULL); 12667 ASSERT(!mutex_owned(SD_MUTEX(un))); 12668 SD_NEXT_IOSTART(index, un, bp); 12669 } 12670 12671 12672 /* 12673 * Function: sd_checksum_uscsi_iodone 12674 * 12675 * Description: A stub function for a layer that's currently not used. 12676 * For now just a placeholder. 12677 * 12678 * Context: May be called under interrupt context 12679 */ 12680 12681 static void 12682 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12683 { 12684 ASSERT(un != NULL); 12685 ASSERT(bp != NULL); 12686 ASSERT(!mutex_owned(SD_MUTEX(un))); 12687 SD_NEXT_IODONE(index, un, bp); 12688 } 12689 12690 12691 /* 12692 * Function: sd_pm_iostart 12693 * 12694 * Description: iostart-side routine for Power mangement. 12695 * 12696 * Context: Kernel thread context 12697 */ 12698 12699 static void 12700 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12701 { 12702 ASSERT(un != NULL); 12703 ASSERT(bp != NULL); 12704 ASSERT(!mutex_owned(SD_MUTEX(un))); 12705 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12706 12707 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12708 12709 if (sd_pm_entry(un) != DDI_SUCCESS) { 12710 /* 12711 * Set up to return the failed buf back up the 'iodone' 12712 * side of the calling chain. 12713 */ 12714 bioerror(bp, EIO); 12715 bp->b_resid = bp->b_bcount; 12716 12717 SD_BEGIN_IODONE(index, un, bp); 12718 12719 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12720 return; 12721 } 12722 12723 SD_NEXT_IOSTART(index, un, bp); 12724 12725 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12726 } 12727 12728 12729 /* 12730 * Function: sd_pm_iodone 12731 * 12732 * Description: iodone-side routine for power mangement. 12733 * 12734 * Context: may be called from interrupt context 12735 */ 12736 12737 static void 12738 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12739 { 12740 ASSERT(un != NULL); 12741 ASSERT(bp != NULL); 12742 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12743 12744 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12745 12746 /* 12747 * After attach the following flag is only read, so don't 12748 * take the penalty of acquiring a mutex for it. 12749 */ 12750 if (un->un_f_pm_is_enabled == TRUE) { 12751 sd_pm_exit(un); 12752 } 12753 12754 SD_NEXT_IODONE(index, un, bp); 12755 12756 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12757 } 12758 12759 12760 /* 12761 * Function: sd_core_iostart 12762 * 12763 * Description: Primary driver function for enqueuing buf(9S) structs from 12764 * the system and initiating IO to the target device 12765 * 12766 * Context: Kernel thread context. Can sleep. 12767 * 12768 * Assumptions: - The given xp->xb_blkno is absolute 12769 * (ie, relative to the start of the device). 12770 * - The IO is to be done using the native blocksize of 12771 * the device, as specified in un->un_tgt_blocksize. 12772 */ 12773 /* ARGSUSED */ 12774 static void 12775 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12776 { 12777 struct sd_xbuf *xp; 12778 12779 ASSERT(un != NULL); 12780 ASSERT(bp != NULL); 12781 ASSERT(!mutex_owned(SD_MUTEX(un))); 12782 ASSERT(bp->b_resid == 0); 12783 12784 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12785 12786 xp = SD_GET_XBUF(bp); 12787 ASSERT(xp != NULL); 12788 12789 mutex_enter(SD_MUTEX(un)); 12790 12791 /* 12792 * If we are currently in the failfast state, fail any new IO 12793 * that has B_FAILFAST set, then return. 12794 */ 12795 if ((bp->b_flags & B_FAILFAST) && 12796 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12797 mutex_exit(SD_MUTEX(un)); 12798 bioerror(bp, EIO); 12799 bp->b_resid = bp->b_bcount; 12800 SD_BEGIN_IODONE(index, un, bp); 12801 return; 12802 } 12803 12804 if (SD_IS_DIRECT_PRIORITY(xp)) { 12805 /* 12806 * Priority command -- transport it immediately. 12807 * 12808 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12809 * because all direct priority commands should be associated 12810 * with error recovery actions which we don't want to retry. 12811 */ 12812 sd_start_cmds(un, bp); 12813 } else { 12814 /* 12815 * Normal command -- add it to the wait queue, then start 12816 * transporting commands from the wait queue. 12817 */ 12818 sd_add_buf_to_waitq(un, bp); 12819 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12820 sd_start_cmds(un, NULL); 12821 } 12822 12823 mutex_exit(SD_MUTEX(un)); 12824 12825 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12826 } 12827 12828 12829 /* 12830 * Function: sd_init_cdb_limits 12831 * 12832 * Description: This is to handle scsi_pkt initialization differences 12833 * between the driver platforms. 12834 * 12835 * Legacy behaviors: 12836 * 12837 * If the block number or the sector count exceeds the 12838 * capabilities of a Group 0 command, shift over to a 12839 * Group 1 command. We don't blindly use Group 1 12840 * commands because a) some drives (CDC Wren IVs) get a 12841 * bit confused, and b) there is probably a fair amount 12842 * of speed difference for a target to receive and decode 12843 * a 10 byte command instead of a 6 byte command. 12844 * 12845 * The xfer time difference of 6 vs 10 byte CDBs is 12846 * still significant so this code is still worthwhile. 12847 * 10 byte CDBs are very inefficient with the fas HBA driver 12848 * and older disks. Each CDB byte took 1 usec with some 12849 * popular disks. 12850 * 12851 * Context: Must be called at attach time 12852 */ 12853 12854 static void 12855 sd_init_cdb_limits(struct sd_lun *un) 12856 { 12857 /* 12858 * Use CDB_GROUP1 commands for most devices except for 12859 * parallel SCSI fixed drives in which case we get better 12860 * performance using CDB_GROUP0 commands (where applicable). 12861 */ 12862 un->un_mincdb = SD_CDB_GROUP1; 12863 #if !defined(__fibre) 12864 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12865 !ISREMOVABLE(un)) { 12866 un->un_mincdb = SD_CDB_GROUP0; 12867 } 12868 #endif 12869 12870 /* 12871 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12872 * commands for fixed disks unless we are building for a 32 bit 12873 * kernel. 12874 */ 12875 #ifdef _LP64 12876 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP4; 12877 #else 12878 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP1; 12879 #endif 12880 12881 /* 12882 * x86 systems require the PKT_DMA_PARTIAL flag 12883 */ 12884 #if defined(__x86) 12885 un->un_pkt_flags = PKT_DMA_PARTIAL; 12886 #else 12887 un->un_pkt_flags = 0; 12888 #endif 12889 12890 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12891 ? sizeof (struct scsi_arq_status) : 1); 12892 un->un_cmd_timeout = (ushort_t)sd_io_time; 12893 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12894 } 12895 12896 12897 /* 12898 * Function: sd_initpkt_for_buf 12899 * 12900 * Description: Allocate and initialize for transport a scsi_pkt struct, 12901 * based upon the info specified in the given buf struct. 12902 * 12903 * Assumes the xb_blkno in the request is absolute (ie, 12904 * relative to the start of the device (NOT partition!). 12905 * Also assumes that the request is using the native block 12906 * size of the device (as returned by the READ CAPACITY 12907 * command). 12908 * 12909 * Return Code: SD_PKT_ALLOC_SUCCESS 12910 * SD_PKT_ALLOC_FAILURE 12911 * SD_PKT_ALLOC_FAILURE_NO_DMA 12912 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12913 * 12914 * Context: Kernel thread and may be called from software interrupt context 12915 * as part of a sdrunout callback. This function may not block or 12916 * call routines that block 12917 */ 12918 12919 static int 12920 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12921 { 12922 struct sd_xbuf *xp; 12923 struct scsi_pkt *pktp = NULL; 12924 struct sd_lun *un; 12925 size_t blockcount; 12926 daddr_t startblock; 12927 int rval; 12928 int cmd_flags; 12929 12930 ASSERT(bp != NULL); 12931 ASSERT(pktpp != NULL); 12932 xp = SD_GET_XBUF(bp); 12933 ASSERT(xp != NULL); 12934 un = SD_GET_UN(bp); 12935 ASSERT(un != NULL); 12936 ASSERT(mutex_owned(SD_MUTEX(un))); 12937 ASSERT(bp->b_resid == 0); 12938 12939 SD_TRACE(SD_LOG_IO_CORE, un, 12940 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12941 12942 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12943 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12944 /* 12945 * Already have a scsi_pkt -- just need DMA resources. 12946 * We must recompute the CDB in case the mapping returns 12947 * a nonzero pkt_resid. 12948 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12949 * that is being retried, the unmap/remap of the DMA resouces 12950 * will result in the entire transfer starting over again 12951 * from the very first block. 12952 */ 12953 ASSERT(xp->xb_pktp != NULL); 12954 pktp = xp->xb_pktp; 12955 } else { 12956 pktp = NULL; 12957 } 12958 #endif /* __i386 || __amd64 */ 12959 12960 startblock = xp->xb_blkno; /* Absolute block num. */ 12961 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12962 12963 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12964 12965 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12966 12967 #else 12968 12969 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 12970 12971 #endif 12972 12973 /* 12974 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12975 * call scsi_init_pkt, and build the CDB. 12976 */ 12977 rval = sd_setup_rw_pkt(un, &pktp, bp, 12978 cmd_flags, sdrunout, (caddr_t)un, 12979 startblock, blockcount); 12980 12981 if (rval == 0) { 12982 /* 12983 * Success. 12984 * 12985 * If partial DMA is being used and required for this transfer. 12986 * set it up here. 12987 */ 12988 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12989 (pktp->pkt_resid != 0)) { 12990 12991 /* 12992 * Save the CDB length and pkt_resid for the 12993 * next xfer 12994 */ 12995 xp->xb_dma_resid = pktp->pkt_resid; 12996 12997 /* rezero resid */ 12998 pktp->pkt_resid = 0; 12999 13000 } else { 13001 xp->xb_dma_resid = 0; 13002 } 13003 13004 pktp->pkt_flags = un->un_tagflags; 13005 pktp->pkt_time = un->un_cmd_timeout; 13006 pktp->pkt_comp = sdintr; 13007 13008 pktp->pkt_private = bp; 13009 *pktpp = pktp; 13010 13011 SD_TRACE(SD_LOG_IO_CORE, un, 13012 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13013 13014 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13015 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13016 #endif 13017 13018 return (SD_PKT_ALLOC_SUCCESS); 13019 13020 } 13021 13022 /* 13023 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13024 * from sd_setup_rw_pkt. 13025 */ 13026 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13027 13028 if (rval == SD_PKT_ALLOC_FAILURE) { 13029 *pktpp = NULL; 13030 /* 13031 * Set the driver state to RWAIT to indicate the driver 13032 * is waiting on resource allocations. The driver will not 13033 * suspend, pm_suspend, or detatch while the state is RWAIT. 13034 */ 13035 New_state(un, SD_STATE_RWAIT); 13036 13037 SD_ERROR(SD_LOG_IO_CORE, un, 13038 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13039 13040 if ((bp->b_flags & B_ERROR) != 0) { 13041 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13042 } 13043 return (SD_PKT_ALLOC_FAILURE); 13044 } else { 13045 /* 13046 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13047 * 13048 * This should never happen. Maybe someone messed with the 13049 * kernel's minphys? 13050 */ 13051 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13052 "Request rejected: too large for CDB: " 13053 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13054 SD_ERROR(SD_LOG_IO_CORE, un, 13055 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13056 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13057 13058 } 13059 } 13060 13061 13062 /* 13063 * Function: sd_destroypkt_for_buf 13064 * 13065 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13066 * 13067 * Context: Kernel thread or interrupt context 13068 */ 13069 13070 static void 13071 sd_destroypkt_for_buf(struct buf *bp) 13072 { 13073 ASSERT(bp != NULL); 13074 ASSERT(SD_GET_UN(bp) != NULL); 13075 13076 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13077 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13078 13079 ASSERT(SD_GET_PKTP(bp) != NULL); 13080 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13081 13082 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13083 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13084 } 13085 13086 /* 13087 * Function: sd_setup_rw_pkt 13088 * 13089 * Description: Determines appropriate CDB group for the requested LBA 13090 * and transfer length, calls scsi_init_pkt, and builds 13091 * the CDB. Do not use for partial DMA transfers except 13092 * for the initial transfer since the CDB size must 13093 * remain constant. 13094 * 13095 * Context: Kernel thread and may be called from software interrupt 13096 * context as part of a sdrunout callback. This function may not 13097 * block or call routines that block 13098 */ 13099 13100 13101 int 13102 sd_setup_rw_pkt(struct sd_lun *un, 13103 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13104 int (*callback)(caddr_t), caddr_t callback_arg, 13105 diskaddr_t lba, uint32_t blockcount) 13106 { 13107 struct scsi_pkt *return_pktp; 13108 union scsi_cdb *cdbp; 13109 struct sd_cdbinfo *cp = NULL; 13110 int i; 13111 13112 /* 13113 * See which size CDB to use, based upon the request. 13114 */ 13115 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13116 13117 /* 13118 * Check lba and block count against sd_cdbtab limits. 13119 * In the partial DMA case, we have to use the same size 13120 * CDB for all the transfers. Check lba + blockcount 13121 * against the max LBA so we know that segment of the 13122 * transfer can use the CDB we select. 13123 */ 13124 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13125 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13126 13127 /* 13128 * The command will fit into the CDB type 13129 * specified by sd_cdbtab[i]. 13130 */ 13131 cp = sd_cdbtab + i; 13132 13133 /* 13134 * Call scsi_init_pkt so we can fill in the 13135 * CDB. 13136 */ 13137 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13138 bp, cp->sc_grpcode, un->un_status_len, 0, 13139 flags, callback, callback_arg); 13140 13141 if (return_pktp != NULL) { 13142 13143 /* 13144 * Return new value of pkt 13145 */ 13146 *pktpp = return_pktp; 13147 13148 /* 13149 * To be safe, zero the CDB insuring there is 13150 * no leftover data from a previous command. 13151 */ 13152 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13153 13154 /* 13155 * Handle partial DMA mapping 13156 */ 13157 if (return_pktp->pkt_resid != 0) { 13158 13159 /* 13160 * Not going to xfer as many blocks as 13161 * originally expected 13162 */ 13163 blockcount -= 13164 SD_BYTES2TGTBLOCKS(un, 13165 return_pktp->pkt_resid); 13166 } 13167 13168 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13169 13170 /* 13171 * Set command byte based on the CDB 13172 * type we matched. 13173 */ 13174 cdbp->scc_cmd = cp->sc_grpmask | 13175 ((bp->b_flags & B_READ) ? 13176 SCMD_READ : SCMD_WRITE); 13177 13178 SD_FILL_SCSI1_LUN(un, return_pktp); 13179 13180 /* 13181 * Fill in LBA and length 13182 */ 13183 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13184 (cp->sc_grpcode == CDB_GROUP4) || 13185 (cp->sc_grpcode == CDB_GROUP0) || 13186 (cp->sc_grpcode == CDB_GROUP5)); 13187 13188 if (cp->sc_grpcode == CDB_GROUP1) { 13189 FORMG1ADDR(cdbp, lba); 13190 FORMG1COUNT(cdbp, blockcount); 13191 return (0); 13192 } else if (cp->sc_grpcode == CDB_GROUP4) { 13193 FORMG4LONGADDR(cdbp, lba); 13194 FORMG4COUNT(cdbp, blockcount); 13195 return (0); 13196 } else if (cp->sc_grpcode == CDB_GROUP0) { 13197 FORMG0ADDR(cdbp, lba); 13198 FORMG0COUNT(cdbp, blockcount); 13199 return (0); 13200 } else if (cp->sc_grpcode == CDB_GROUP5) { 13201 FORMG5ADDR(cdbp, lba); 13202 FORMG5COUNT(cdbp, blockcount); 13203 return (0); 13204 } 13205 13206 /* 13207 * It should be impossible to not match one 13208 * of the CDB types above, so we should never 13209 * reach this point. Set the CDB command byte 13210 * to test-unit-ready to avoid writing 13211 * to somewhere we don't intend. 13212 */ 13213 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13214 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13215 } else { 13216 /* 13217 * Couldn't get scsi_pkt 13218 */ 13219 return (SD_PKT_ALLOC_FAILURE); 13220 } 13221 } 13222 } 13223 13224 /* 13225 * None of the available CDB types were suitable. This really 13226 * should never happen: on a 64 bit system we support 13227 * READ16/WRITE16 which will hold an entire 64 bit disk address 13228 * and on a 32 bit system we will refuse to bind to a device 13229 * larger than 2TB so addresses will never be larger than 32 bits. 13230 */ 13231 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13232 } 13233 13234 #if defined(__i386) || defined(__amd64) 13235 /* 13236 * Function: sd_setup_next_rw_pkt 13237 * 13238 * Description: Setup packet for partial DMA transfers, except for the 13239 * initial transfer. sd_setup_rw_pkt should be used for 13240 * the initial transfer. 13241 * 13242 * Context: Kernel thread and may be called from interrupt context. 13243 */ 13244 13245 int 13246 sd_setup_next_rw_pkt(struct sd_lun *un, 13247 struct scsi_pkt *pktp, struct buf *bp, 13248 diskaddr_t lba, uint32_t blockcount) 13249 { 13250 uchar_t com; 13251 union scsi_cdb *cdbp; 13252 uchar_t cdb_group_id; 13253 13254 ASSERT(pktp != NULL); 13255 ASSERT(pktp->pkt_cdbp != NULL); 13256 13257 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13258 com = cdbp->scc_cmd; 13259 cdb_group_id = CDB_GROUPID(com); 13260 13261 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13262 (cdb_group_id == CDB_GROUPID_1) || 13263 (cdb_group_id == CDB_GROUPID_4) || 13264 (cdb_group_id == CDB_GROUPID_5)); 13265 13266 /* 13267 * Move pkt to the next portion of the xfer. 13268 * func is NULL_FUNC so we do not have to release 13269 * the disk mutex here. 13270 */ 13271 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13272 NULL_FUNC, NULL) == pktp) { 13273 /* Success. Handle partial DMA */ 13274 if (pktp->pkt_resid != 0) { 13275 blockcount -= 13276 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13277 } 13278 13279 cdbp->scc_cmd = com; 13280 SD_FILL_SCSI1_LUN(un, pktp); 13281 if (cdb_group_id == CDB_GROUPID_1) { 13282 FORMG1ADDR(cdbp, lba); 13283 FORMG1COUNT(cdbp, blockcount); 13284 return (0); 13285 } else if (cdb_group_id == CDB_GROUPID_4) { 13286 FORMG4LONGADDR(cdbp, lba); 13287 FORMG4COUNT(cdbp, blockcount); 13288 return (0); 13289 } else if (cdb_group_id == CDB_GROUPID_0) { 13290 FORMG0ADDR(cdbp, lba); 13291 FORMG0COUNT(cdbp, blockcount); 13292 return (0); 13293 } else if (cdb_group_id == CDB_GROUPID_5) { 13294 FORMG5ADDR(cdbp, lba); 13295 FORMG5COUNT(cdbp, blockcount); 13296 return (0); 13297 } 13298 13299 /* Unreachable */ 13300 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13301 } 13302 13303 /* 13304 * Error setting up next portion of cmd transfer. 13305 * Something is definitely very wrong and this 13306 * should not happen. 13307 */ 13308 return (SD_PKT_ALLOC_FAILURE); 13309 } 13310 #endif /* defined(__i386) || defined(__amd64) */ 13311 13312 /* 13313 * Function: sd_initpkt_for_uscsi 13314 * 13315 * Description: Allocate and initialize for transport a scsi_pkt struct, 13316 * based upon the info specified in the given uscsi_cmd struct. 13317 * 13318 * Return Code: SD_PKT_ALLOC_SUCCESS 13319 * SD_PKT_ALLOC_FAILURE 13320 * SD_PKT_ALLOC_FAILURE_NO_DMA 13321 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13322 * 13323 * Context: Kernel thread and may be called from software interrupt context 13324 * as part of a sdrunout callback. This function may not block or 13325 * call routines that block 13326 */ 13327 13328 static int 13329 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13330 { 13331 struct uscsi_cmd *uscmd; 13332 struct sd_xbuf *xp; 13333 struct scsi_pkt *pktp; 13334 struct sd_lun *un; 13335 uint32_t flags = 0; 13336 13337 ASSERT(bp != NULL); 13338 ASSERT(pktpp != NULL); 13339 xp = SD_GET_XBUF(bp); 13340 ASSERT(xp != NULL); 13341 un = SD_GET_UN(bp); 13342 ASSERT(un != NULL); 13343 ASSERT(mutex_owned(SD_MUTEX(un))); 13344 13345 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13346 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13347 ASSERT(uscmd != NULL); 13348 13349 SD_TRACE(SD_LOG_IO_CORE, un, 13350 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13351 13352 /* 13353 * Allocate the scsi_pkt for the command. 13354 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path 13355 * during scsi_init_pkt time and will continue to use the 13356 * same path as long as the same scsi_pkt is used without 13357 * intervening scsi_dma_free(). Since uscsi command does 13358 * not call scsi_dmafree() before retry failed command, it 13359 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT 13360 * set such that scsi_vhci can use other available path for 13361 * retry. Besides, ucsci command does not allow DMA breakup, 13362 * so there is no need to set PKT_DMA_PARTIAL flag. 13363 */ 13364 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13365 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13366 sizeof (struct scsi_arq_status), 0, 13367 (un->un_pkt_flags & ~PKT_DMA_PARTIAL), 13368 sdrunout, (caddr_t)un); 13369 13370 if (pktp == NULL) { 13371 *pktpp = NULL; 13372 /* 13373 * Set the driver state to RWAIT to indicate the driver 13374 * is waiting on resource allocations. The driver will not 13375 * suspend, pm_suspend, or detatch while the state is RWAIT. 13376 */ 13377 New_state(un, SD_STATE_RWAIT); 13378 13379 SD_ERROR(SD_LOG_IO_CORE, un, 13380 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13381 13382 if ((bp->b_flags & B_ERROR) != 0) { 13383 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13384 } 13385 return (SD_PKT_ALLOC_FAILURE); 13386 } 13387 13388 /* 13389 * We do not do DMA breakup for USCSI commands, so return failure 13390 * here if all the needed DMA resources were not allocated. 13391 */ 13392 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13393 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13394 scsi_destroy_pkt(pktp); 13395 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13396 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13397 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13398 } 13399 13400 /* Init the cdb from the given uscsi struct */ 13401 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13402 uscmd->uscsi_cdb[0], 0, 0, 0); 13403 13404 SD_FILL_SCSI1_LUN(un, pktp); 13405 13406 /* 13407 * Set up the optional USCSI flags. See the uscsi (7I) man page 13408 * for listing of the supported flags. 13409 */ 13410 13411 if (uscmd->uscsi_flags & USCSI_SILENT) { 13412 flags |= FLAG_SILENT; 13413 } 13414 13415 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13416 flags |= FLAG_DIAGNOSE; 13417 } 13418 13419 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13420 flags |= FLAG_ISOLATE; 13421 } 13422 13423 if (un->un_f_is_fibre == FALSE) { 13424 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13425 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13426 } 13427 } 13428 13429 /* 13430 * Set the pkt flags here so we save time later. 13431 * Note: These flags are NOT in the uscsi man page!!! 13432 */ 13433 if (uscmd->uscsi_flags & USCSI_HEAD) { 13434 flags |= FLAG_HEAD; 13435 } 13436 13437 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13438 flags |= FLAG_NOINTR; 13439 } 13440 13441 /* 13442 * For tagged queueing, things get a bit complicated. 13443 * Check first for head of queue and last for ordered queue. 13444 * If neither head nor order, use the default driver tag flags. 13445 */ 13446 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13447 if (uscmd->uscsi_flags & USCSI_HTAG) { 13448 flags |= FLAG_HTAG; 13449 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13450 flags |= FLAG_OTAG; 13451 } else { 13452 flags |= un->un_tagflags & FLAG_TAGMASK; 13453 } 13454 } 13455 13456 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13457 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13458 } 13459 13460 pktp->pkt_flags = flags; 13461 13462 /* Copy the caller's CDB into the pkt... */ 13463 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13464 13465 if (uscmd->uscsi_timeout == 0) { 13466 pktp->pkt_time = un->un_uscsi_timeout; 13467 } else { 13468 pktp->pkt_time = uscmd->uscsi_timeout; 13469 } 13470 13471 /* need it later to identify USCSI request in sdintr */ 13472 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13473 13474 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13475 13476 pktp->pkt_private = bp; 13477 pktp->pkt_comp = sdintr; 13478 *pktpp = pktp; 13479 13480 SD_TRACE(SD_LOG_IO_CORE, un, 13481 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13482 13483 return (SD_PKT_ALLOC_SUCCESS); 13484 } 13485 13486 13487 /* 13488 * Function: sd_destroypkt_for_uscsi 13489 * 13490 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13491 * IOs.. Also saves relevant info into the associated uscsi_cmd 13492 * struct. 13493 * 13494 * Context: May be called under interrupt context 13495 */ 13496 13497 static void 13498 sd_destroypkt_for_uscsi(struct buf *bp) 13499 { 13500 struct uscsi_cmd *uscmd; 13501 struct sd_xbuf *xp; 13502 struct scsi_pkt *pktp; 13503 struct sd_lun *un; 13504 13505 ASSERT(bp != NULL); 13506 xp = SD_GET_XBUF(bp); 13507 ASSERT(xp != NULL); 13508 un = SD_GET_UN(bp); 13509 ASSERT(un != NULL); 13510 ASSERT(!mutex_owned(SD_MUTEX(un))); 13511 pktp = SD_GET_PKTP(bp); 13512 ASSERT(pktp != NULL); 13513 13514 SD_TRACE(SD_LOG_IO_CORE, un, 13515 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13516 13517 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13518 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13519 ASSERT(uscmd != NULL); 13520 13521 /* Save the status and the residual into the uscsi_cmd struct */ 13522 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13523 uscmd->uscsi_resid = bp->b_resid; 13524 13525 /* 13526 * If enabled, copy any saved sense data into the area specified 13527 * by the uscsi command. 13528 */ 13529 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13530 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13531 /* 13532 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13533 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13534 */ 13535 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13536 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13537 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 13538 } 13539 13540 /* We are done with the scsi_pkt; free it now */ 13541 ASSERT(SD_GET_PKTP(bp) != NULL); 13542 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13543 13544 SD_TRACE(SD_LOG_IO_CORE, un, 13545 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13546 } 13547 13548 13549 /* 13550 * Function: sd_bioclone_alloc 13551 * 13552 * Description: Allocate a buf(9S) and init it as per the given buf 13553 * and the various arguments. The associated sd_xbuf 13554 * struct is (nearly) duplicated. The struct buf *bp 13555 * argument is saved in new_xp->xb_private. 13556 * 13557 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13558 * datalen - size of data area for the shadow bp 13559 * blkno - starting LBA 13560 * func - function pointer for b_iodone in the shadow buf. (May 13561 * be NULL if none.) 13562 * 13563 * Return Code: Pointer to allocates buf(9S) struct 13564 * 13565 * Context: Can sleep. 13566 */ 13567 13568 static struct buf * 13569 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13570 daddr_t blkno, int (*func)(struct buf *)) 13571 { 13572 struct sd_lun *un; 13573 struct sd_xbuf *xp; 13574 struct sd_xbuf *new_xp; 13575 struct buf *new_bp; 13576 13577 ASSERT(bp != NULL); 13578 xp = SD_GET_XBUF(bp); 13579 ASSERT(xp != NULL); 13580 un = SD_GET_UN(bp); 13581 ASSERT(un != NULL); 13582 ASSERT(!mutex_owned(SD_MUTEX(un))); 13583 13584 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13585 NULL, KM_SLEEP); 13586 13587 new_bp->b_lblkno = blkno; 13588 13589 /* 13590 * Allocate an xbuf for the shadow bp and copy the contents of the 13591 * original xbuf into it. 13592 */ 13593 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13594 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13595 13596 /* 13597 * The given bp is automatically saved in the xb_private member 13598 * of the new xbuf. Callers are allowed to depend on this. 13599 */ 13600 new_xp->xb_private = bp; 13601 13602 new_bp->b_private = new_xp; 13603 13604 return (new_bp); 13605 } 13606 13607 /* 13608 * Function: sd_shadow_buf_alloc 13609 * 13610 * Description: Allocate a buf(9S) and init it as per the given buf 13611 * and the various arguments. The associated sd_xbuf 13612 * struct is (nearly) duplicated. The struct buf *bp 13613 * argument is saved in new_xp->xb_private. 13614 * 13615 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13616 * datalen - size of data area for the shadow bp 13617 * bflags - B_READ or B_WRITE (pseudo flag) 13618 * blkno - starting LBA 13619 * func - function pointer for b_iodone in the shadow buf. (May 13620 * be NULL if none.) 13621 * 13622 * Return Code: Pointer to allocates buf(9S) struct 13623 * 13624 * Context: Can sleep. 13625 */ 13626 13627 static struct buf * 13628 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13629 daddr_t blkno, int (*func)(struct buf *)) 13630 { 13631 struct sd_lun *un; 13632 struct sd_xbuf *xp; 13633 struct sd_xbuf *new_xp; 13634 struct buf *new_bp; 13635 13636 ASSERT(bp != NULL); 13637 xp = SD_GET_XBUF(bp); 13638 ASSERT(xp != NULL); 13639 un = SD_GET_UN(bp); 13640 ASSERT(un != NULL); 13641 ASSERT(!mutex_owned(SD_MUTEX(un))); 13642 13643 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13644 bp_mapin(bp); 13645 } 13646 13647 bflags &= (B_READ | B_WRITE); 13648 #if defined(__i386) || defined(__amd64) 13649 new_bp = getrbuf(KM_SLEEP); 13650 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13651 new_bp->b_bcount = datalen; 13652 new_bp->b_flags = bp->b_flags | bflags; 13653 #else 13654 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13655 datalen, bflags, SLEEP_FUNC, NULL); 13656 #endif 13657 new_bp->av_forw = NULL; 13658 new_bp->av_back = NULL; 13659 new_bp->b_dev = bp->b_dev; 13660 new_bp->b_blkno = blkno; 13661 new_bp->b_iodone = func; 13662 new_bp->b_edev = bp->b_edev; 13663 new_bp->b_resid = 0; 13664 13665 /* We need to preserve the B_FAILFAST flag */ 13666 if (bp->b_flags & B_FAILFAST) { 13667 new_bp->b_flags |= B_FAILFAST; 13668 } 13669 13670 /* 13671 * Allocate an xbuf for the shadow bp and copy the contents of the 13672 * original xbuf into it. 13673 */ 13674 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13675 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13676 13677 /* Need later to copy data between the shadow buf & original buf! */ 13678 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13679 13680 /* 13681 * The given bp is automatically saved in the xb_private member 13682 * of the new xbuf. Callers are allowed to depend on this. 13683 */ 13684 new_xp->xb_private = bp; 13685 13686 new_bp->b_private = new_xp; 13687 13688 return (new_bp); 13689 } 13690 13691 /* 13692 * Function: sd_bioclone_free 13693 * 13694 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13695 * in the larger than partition operation. 13696 * 13697 * Context: May be called under interrupt context 13698 */ 13699 13700 static void 13701 sd_bioclone_free(struct buf *bp) 13702 { 13703 struct sd_xbuf *xp; 13704 13705 ASSERT(bp != NULL); 13706 xp = SD_GET_XBUF(bp); 13707 ASSERT(xp != NULL); 13708 13709 /* 13710 * Call bp_mapout() before freeing the buf, in case a lower 13711 * layer or HBA had done a bp_mapin(). we must do this here 13712 * as we are the "originator" of the shadow buf. 13713 */ 13714 bp_mapout(bp); 13715 13716 /* 13717 * Null out b_iodone before freeing the bp, to ensure that the driver 13718 * never gets confused by a stale value in this field. (Just a little 13719 * extra defensiveness here.) 13720 */ 13721 bp->b_iodone = NULL; 13722 13723 freerbuf(bp); 13724 13725 kmem_free(xp, sizeof (struct sd_xbuf)); 13726 } 13727 13728 /* 13729 * Function: sd_shadow_buf_free 13730 * 13731 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13732 * 13733 * Context: May be called under interrupt context 13734 */ 13735 13736 static void 13737 sd_shadow_buf_free(struct buf *bp) 13738 { 13739 struct sd_xbuf *xp; 13740 13741 ASSERT(bp != NULL); 13742 xp = SD_GET_XBUF(bp); 13743 ASSERT(xp != NULL); 13744 13745 #if defined(__sparc) 13746 /* 13747 * Call bp_mapout() before freeing the buf, in case a lower 13748 * layer or HBA had done a bp_mapin(). we must do this here 13749 * as we are the "originator" of the shadow buf. 13750 */ 13751 bp_mapout(bp); 13752 #endif 13753 13754 /* 13755 * Null out b_iodone before freeing the bp, to ensure that the driver 13756 * never gets confused by a stale value in this field. (Just a little 13757 * extra defensiveness here.) 13758 */ 13759 bp->b_iodone = NULL; 13760 13761 #if defined(__i386) || defined(__amd64) 13762 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13763 freerbuf(bp); 13764 #else 13765 scsi_free_consistent_buf(bp); 13766 #endif 13767 13768 kmem_free(xp, sizeof (struct sd_xbuf)); 13769 } 13770 13771 13772 /* 13773 * Function: sd_print_transport_rejected_message 13774 * 13775 * Description: This implements the ludicrously complex rules for printing 13776 * a "transport rejected" message. This is to address the 13777 * specific problem of having a flood of this error message 13778 * produced when a failover occurs. 13779 * 13780 * Context: Any. 13781 */ 13782 13783 static void 13784 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13785 int code) 13786 { 13787 ASSERT(un != NULL); 13788 ASSERT(mutex_owned(SD_MUTEX(un))); 13789 ASSERT(xp != NULL); 13790 13791 /* 13792 * Print the "transport rejected" message under the following 13793 * conditions: 13794 * 13795 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13796 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13797 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13798 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13799 * scsi_transport(9F) (which indicates that the target might have 13800 * gone off-line). This uses the un->un_tran_fatal_count 13801 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13802 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13803 * from scsi_transport(). 13804 * 13805 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13806 * the preceeding cases in order for the message to be printed. 13807 */ 13808 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13809 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13810 (code != TRAN_FATAL_ERROR) || 13811 (un->un_tran_fatal_count == 1)) { 13812 switch (code) { 13813 case TRAN_BADPKT: 13814 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13815 "transport rejected bad packet\n"); 13816 break; 13817 case TRAN_FATAL_ERROR: 13818 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13819 "transport rejected fatal error\n"); 13820 break; 13821 default: 13822 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13823 "transport rejected (%d)\n", code); 13824 break; 13825 } 13826 } 13827 } 13828 } 13829 13830 13831 /* 13832 * Function: sd_add_buf_to_waitq 13833 * 13834 * Description: Add the given buf(9S) struct to the wait queue for the 13835 * instance. If sorting is enabled, then the buf is added 13836 * to the queue via an elevator sort algorithm (a la 13837 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13838 * If sorting is not enabled, then the buf is just added 13839 * to the end of the wait queue. 13840 * 13841 * Return Code: void 13842 * 13843 * Context: Does not sleep/block, therefore technically can be called 13844 * from any context. However if sorting is enabled then the 13845 * execution time is indeterminate, and may take long if 13846 * the wait queue grows large. 13847 */ 13848 13849 static void 13850 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13851 { 13852 struct buf *ap; 13853 13854 ASSERT(bp != NULL); 13855 ASSERT(un != NULL); 13856 ASSERT(mutex_owned(SD_MUTEX(un))); 13857 13858 /* If the queue is empty, add the buf as the only entry & return. */ 13859 if (un->un_waitq_headp == NULL) { 13860 ASSERT(un->un_waitq_tailp == NULL); 13861 un->un_waitq_headp = un->un_waitq_tailp = bp; 13862 bp->av_forw = NULL; 13863 return; 13864 } 13865 13866 ASSERT(un->un_waitq_tailp != NULL); 13867 13868 /* 13869 * If sorting is disabled, just add the buf to the tail end of 13870 * the wait queue and return. 13871 */ 13872 if (un->un_f_disksort_disabled) { 13873 un->un_waitq_tailp->av_forw = bp; 13874 un->un_waitq_tailp = bp; 13875 bp->av_forw = NULL; 13876 return; 13877 } 13878 13879 /* 13880 * Sort thru the list of requests currently on the wait queue 13881 * and add the new buf request at the appropriate position. 13882 * 13883 * The un->un_waitq_headp is an activity chain pointer on which 13884 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13885 * first queue holds those requests which are positioned after 13886 * the current SD_GET_BLKNO() (in the first request); the second holds 13887 * requests which came in after their SD_GET_BLKNO() number was passed. 13888 * Thus we implement a one way scan, retracting after reaching 13889 * the end of the drive to the first request on the second 13890 * queue, at which time it becomes the first queue. 13891 * A one-way scan is natural because of the way UNIX read-ahead 13892 * blocks are allocated. 13893 * 13894 * If we lie after the first request, then we must locate the 13895 * second request list and add ourselves to it. 13896 */ 13897 ap = un->un_waitq_headp; 13898 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13899 while (ap->av_forw != NULL) { 13900 /* 13901 * Look for an "inversion" in the (normally 13902 * ascending) block numbers. This indicates 13903 * the start of the second request list. 13904 */ 13905 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13906 /* 13907 * Search the second request list for the 13908 * first request at a larger block number. 13909 * We go before that; however if there is 13910 * no such request, we go at the end. 13911 */ 13912 do { 13913 if (SD_GET_BLKNO(bp) < 13914 SD_GET_BLKNO(ap->av_forw)) { 13915 goto insert; 13916 } 13917 ap = ap->av_forw; 13918 } while (ap->av_forw != NULL); 13919 goto insert; /* after last */ 13920 } 13921 ap = ap->av_forw; 13922 } 13923 13924 /* 13925 * No inversions... we will go after the last, and 13926 * be the first request in the second request list. 13927 */ 13928 goto insert; 13929 } 13930 13931 /* 13932 * Request is at/after the current request... 13933 * sort in the first request list. 13934 */ 13935 while (ap->av_forw != NULL) { 13936 /* 13937 * We want to go after the current request (1) if 13938 * there is an inversion after it (i.e. it is the end 13939 * of the first request list), or (2) if the next 13940 * request is a larger block no. than our request. 13941 */ 13942 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13943 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13944 goto insert; 13945 } 13946 ap = ap->av_forw; 13947 } 13948 13949 /* 13950 * Neither a second list nor a larger request, therefore 13951 * we go at the end of the first list (which is the same 13952 * as the end of the whole schebang). 13953 */ 13954 insert: 13955 bp->av_forw = ap->av_forw; 13956 ap->av_forw = bp; 13957 13958 /* 13959 * If we inserted onto the tail end of the waitq, make sure the 13960 * tail pointer is updated. 13961 */ 13962 if (ap == un->un_waitq_tailp) { 13963 un->un_waitq_tailp = bp; 13964 } 13965 } 13966 13967 13968 /* 13969 * Function: sd_start_cmds 13970 * 13971 * Description: Remove and transport cmds from the driver queues. 13972 * 13973 * Arguments: un - pointer to the unit (soft state) struct for the target. 13974 * 13975 * immed_bp - ptr to a buf to be transported immediately. Only 13976 * the immed_bp is transported; bufs on the waitq are not 13977 * processed and the un_retry_bp is not checked. If immed_bp is 13978 * NULL, then normal queue processing is performed. 13979 * 13980 * Context: May be called from kernel thread context, interrupt context, 13981 * or runout callback context. This function may not block or 13982 * call routines that block. 13983 */ 13984 13985 static void 13986 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13987 { 13988 struct sd_xbuf *xp; 13989 struct buf *bp; 13990 void (*statp)(kstat_io_t *); 13991 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13992 void (*saved_statp)(kstat_io_t *); 13993 #endif 13994 int rval; 13995 13996 ASSERT(un != NULL); 13997 ASSERT(mutex_owned(SD_MUTEX(un))); 13998 ASSERT(un->un_ncmds_in_transport >= 0); 13999 ASSERT(un->un_throttle >= 0); 14000 14001 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 14002 14003 do { 14004 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14005 saved_statp = NULL; 14006 #endif 14007 14008 /* 14009 * If we are syncing or dumping, fail the command to 14010 * avoid recursively calling back into scsi_transport(). 14011 * The dump I/O itself uses a separate code path so this 14012 * only prevents non-dump I/O from being sent while dumping. 14013 * File system sync takes place before dumping begins. 14014 * During panic, filesystem I/O is allowed provided 14015 * un_in_callback is <= 1. This is to prevent recursion 14016 * such as sd_start_cmds -> scsi_transport -> sdintr -> 14017 * sd_start_cmds and so on. See panic.c for more information 14018 * about the states the system can be in during panic. 14019 */ 14020 if ((un->un_state == SD_STATE_DUMPING) || 14021 (ddi_in_panic() && (un->un_in_callback > 1))) { 14022 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14023 "sd_start_cmds: panicking\n"); 14024 goto exit; 14025 } 14026 14027 if ((bp = immed_bp) != NULL) { 14028 /* 14029 * We have a bp that must be transported immediately. 14030 * It's OK to transport the immed_bp here without doing 14031 * the throttle limit check because the immed_bp is 14032 * always used in a retry/recovery case. This means 14033 * that we know we are not at the throttle limit by 14034 * virtue of the fact that to get here we must have 14035 * already gotten a command back via sdintr(). This also 14036 * relies on (1) the command on un_retry_bp preventing 14037 * further commands from the waitq from being issued; 14038 * and (2) the code in sd_retry_command checking the 14039 * throttle limit before issuing a delayed or immediate 14040 * retry. This holds even if the throttle limit is 14041 * currently ratcheted down from its maximum value. 14042 */ 14043 statp = kstat_runq_enter; 14044 if (bp == un->un_retry_bp) { 14045 ASSERT((un->un_retry_statp == NULL) || 14046 (un->un_retry_statp == kstat_waitq_enter) || 14047 (un->un_retry_statp == 14048 kstat_runq_back_to_waitq)); 14049 /* 14050 * If the waitq kstat was incremented when 14051 * sd_set_retry_bp() queued this bp for a retry, 14052 * then we must set up statp so that the waitq 14053 * count will get decremented correctly below. 14054 * Also we must clear un->un_retry_statp to 14055 * ensure that we do not act on a stale value 14056 * in this field. 14057 */ 14058 if ((un->un_retry_statp == kstat_waitq_enter) || 14059 (un->un_retry_statp == 14060 kstat_runq_back_to_waitq)) { 14061 statp = kstat_waitq_to_runq; 14062 } 14063 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14064 saved_statp = un->un_retry_statp; 14065 #endif 14066 un->un_retry_statp = NULL; 14067 14068 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14069 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14070 "un_throttle:%d un_ncmds_in_transport:%d\n", 14071 un, un->un_retry_bp, un->un_throttle, 14072 un->un_ncmds_in_transport); 14073 } else { 14074 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14075 "processing priority bp:0x%p\n", bp); 14076 } 14077 14078 } else if ((bp = un->un_waitq_headp) != NULL) { 14079 /* 14080 * A command on the waitq is ready to go, but do not 14081 * send it if: 14082 * 14083 * (1) the throttle limit has been reached, or 14084 * (2) a retry is pending, or 14085 * (3) a START_STOP_UNIT callback pending, or 14086 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14087 * command is pending. 14088 * 14089 * For all of these conditions, IO processing will 14090 * restart after the condition is cleared. 14091 */ 14092 if (un->un_ncmds_in_transport >= un->un_throttle) { 14093 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14094 "sd_start_cmds: exiting, " 14095 "throttle limit reached!\n"); 14096 goto exit; 14097 } 14098 if (un->un_retry_bp != NULL) { 14099 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14100 "sd_start_cmds: exiting, retry pending!\n"); 14101 goto exit; 14102 } 14103 if (un->un_startstop_timeid != NULL) { 14104 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14105 "sd_start_cmds: exiting, " 14106 "START_STOP pending!\n"); 14107 goto exit; 14108 } 14109 if (un->un_direct_priority_timeid != NULL) { 14110 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14111 "sd_start_cmds: exiting, " 14112 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14113 goto exit; 14114 } 14115 14116 /* Dequeue the command */ 14117 un->un_waitq_headp = bp->av_forw; 14118 if (un->un_waitq_headp == NULL) { 14119 un->un_waitq_tailp = NULL; 14120 } 14121 bp->av_forw = NULL; 14122 statp = kstat_waitq_to_runq; 14123 SD_TRACE(SD_LOG_IO_CORE, un, 14124 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14125 14126 } else { 14127 /* No work to do so bail out now */ 14128 SD_TRACE(SD_LOG_IO_CORE, un, 14129 "sd_start_cmds: no more work, exiting!\n"); 14130 goto exit; 14131 } 14132 14133 /* 14134 * Reset the state to normal. This is the mechanism by which 14135 * the state transitions from either SD_STATE_RWAIT or 14136 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14137 * If state is SD_STATE_PM_CHANGING then this command is 14138 * part of the device power control and the state must 14139 * not be put back to normal. Doing so would would 14140 * allow new commands to proceed when they shouldn't, 14141 * the device may be going off. 14142 */ 14143 if ((un->un_state != SD_STATE_SUSPENDED) && 14144 (un->un_state != SD_STATE_PM_CHANGING)) { 14145 New_state(un, SD_STATE_NORMAL); 14146 } 14147 14148 xp = SD_GET_XBUF(bp); 14149 ASSERT(xp != NULL); 14150 14151 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14152 /* 14153 * Allocate the scsi_pkt if we need one, or attach DMA 14154 * resources if we have a scsi_pkt that needs them. The 14155 * latter should only occur for commands that are being 14156 * retried. 14157 */ 14158 if ((xp->xb_pktp == NULL) || 14159 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14160 #else 14161 if (xp->xb_pktp == NULL) { 14162 #endif 14163 /* 14164 * There is no scsi_pkt allocated for this buf. Call 14165 * the initpkt function to allocate & init one. 14166 * 14167 * The scsi_init_pkt runout callback functionality is 14168 * implemented as follows: 14169 * 14170 * 1) The initpkt function always calls 14171 * scsi_init_pkt(9F) with sdrunout specified as the 14172 * callback routine. 14173 * 2) A successful packet allocation is initialized and 14174 * the I/O is transported. 14175 * 3) The I/O associated with an allocation resource 14176 * failure is left on its queue to be retried via 14177 * runout or the next I/O. 14178 * 4) The I/O associated with a DMA error is removed 14179 * from the queue and failed with EIO. Processing of 14180 * the transport queues is also halted to be 14181 * restarted via runout or the next I/O. 14182 * 5) The I/O associated with a CDB size or packet 14183 * size error is removed from the queue and failed 14184 * with EIO. Processing of the transport queues is 14185 * continued. 14186 * 14187 * Note: there is no interface for canceling a runout 14188 * callback. To prevent the driver from detaching or 14189 * suspending while a runout is pending the driver 14190 * state is set to SD_STATE_RWAIT 14191 * 14192 * Note: using the scsi_init_pkt callback facility can 14193 * result in an I/O request persisting at the head of 14194 * the list which cannot be satisfied even after 14195 * multiple retries. In the future the driver may 14196 * implement some kind of maximum runout count before 14197 * failing an I/O. 14198 * 14199 * Note: the use of funcp below may seem superfluous, 14200 * but it helps warlock figure out the correct 14201 * initpkt function calls (see [s]sd.wlcmd). 14202 */ 14203 struct scsi_pkt *pktp; 14204 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14205 14206 ASSERT(bp != un->un_rqs_bp); 14207 14208 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14209 switch ((*funcp)(bp, &pktp)) { 14210 case SD_PKT_ALLOC_SUCCESS: 14211 xp->xb_pktp = pktp; 14212 SD_TRACE(SD_LOG_IO_CORE, un, 14213 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14214 pktp); 14215 goto got_pkt; 14216 14217 case SD_PKT_ALLOC_FAILURE: 14218 /* 14219 * Temporary (hopefully) resource depletion. 14220 * Since retries and RQS commands always have a 14221 * scsi_pkt allocated, these cases should never 14222 * get here. So the only cases this needs to 14223 * handle is a bp from the waitq (which we put 14224 * back onto the waitq for sdrunout), or a bp 14225 * sent as an immed_bp (which we just fail). 14226 */ 14227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14228 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14229 14230 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14231 14232 if (bp == immed_bp) { 14233 /* 14234 * If SD_XB_DMA_FREED is clear, then 14235 * this is a failure to allocate a 14236 * scsi_pkt, and we must fail the 14237 * command. 14238 */ 14239 if ((xp->xb_pkt_flags & 14240 SD_XB_DMA_FREED) == 0) { 14241 break; 14242 } 14243 14244 /* 14245 * If this immediate command is NOT our 14246 * un_retry_bp, then we must fail it. 14247 */ 14248 if (bp != un->un_retry_bp) { 14249 break; 14250 } 14251 14252 /* 14253 * We get here if this cmd is our 14254 * un_retry_bp that was DMAFREED, but 14255 * scsi_init_pkt() failed to reallocate 14256 * DMA resources when we attempted to 14257 * retry it. This can happen when an 14258 * mpxio failover is in progress, but 14259 * we don't want to just fail the 14260 * command in this case. 14261 * 14262 * Use timeout(9F) to restart it after 14263 * a 100ms delay. We don't want to 14264 * let sdrunout() restart it, because 14265 * sdrunout() is just supposed to start 14266 * commands that are sitting on the 14267 * wait queue. The un_retry_bp stays 14268 * set until the command completes, but 14269 * sdrunout can be called many times 14270 * before that happens. Since sdrunout 14271 * cannot tell if the un_retry_bp is 14272 * already in the transport, it could 14273 * end up calling scsi_transport() for 14274 * the un_retry_bp multiple times. 14275 * 14276 * Also: don't schedule the callback 14277 * if some other callback is already 14278 * pending. 14279 */ 14280 if (un->un_retry_statp == NULL) { 14281 /* 14282 * restore the kstat pointer to 14283 * keep kstat counts coherent 14284 * when we do retry the command. 14285 */ 14286 un->un_retry_statp = 14287 saved_statp; 14288 } 14289 14290 if ((un->un_startstop_timeid == NULL) && 14291 (un->un_retry_timeid == NULL) && 14292 (un->un_direct_priority_timeid == 14293 NULL)) { 14294 14295 un->un_retry_timeid = 14296 timeout( 14297 sd_start_retry_command, 14298 un, SD_RESTART_TIMEOUT); 14299 } 14300 goto exit; 14301 } 14302 14303 #else 14304 if (bp == immed_bp) { 14305 break; /* Just fail the command */ 14306 } 14307 #endif 14308 14309 /* Add the buf back to the head of the waitq */ 14310 bp->av_forw = un->un_waitq_headp; 14311 un->un_waitq_headp = bp; 14312 if (un->un_waitq_tailp == NULL) { 14313 un->un_waitq_tailp = bp; 14314 } 14315 goto exit; 14316 14317 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14318 /* 14319 * HBA DMA resource failure. Fail the command 14320 * and continue processing of the queues. 14321 */ 14322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14323 "sd_start_cmds: " 14324 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14325 break; 14326 14327 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14328 /* 14329 * Note:x86: Partial DMA mapping not supported 14330 * for USCSI commands, and all the needed DMA 14331 * resources were not allocated. 14332 */ 14333 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14334 "sd_start_cmds: " 14335 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14336 break; 14337 14338 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14339 /* 14340 * Note:x86: Request cannot fit into CDB based 14341 * on lba and len. 14342 */ 14343 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14344 "sd_start_cmds: " 14345 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14346 break; 14347 14348 default: 14349 /* Should NEVER get here! */ 14350 panic("scsi_initpkt error"); 14351 /*NOTREACHED*/ 14352 } 14353 14354 /* 14355 * Fatal error in allocating a scsi_pkt for this buf. 14356 * Update kstats & return the buf with an error code. 14357 * We must use sd_return_failed_command_no_restart() to 14358 * avoid a recursive call back into sd_start_cmds(). 14359 * However this also means that we must keep processing 14360 * the waitq here in order to avoid stalling. 14361 */ 14362 if (statp == kstat_waitq_to_runq) { 14363 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14364 } 14365 sd_return_failed_command_no_restart(un, bp, EIO); 14366 if (bp == immed_bp) { 14367 /* immed_bp is gone by now, so clear this */ 14368 immed_bp = NULL; 14369 } 14370 continue; 14371 } 14372 got_pkt: 14373 if (bp == immed_bp) { 14374 /* goto the head of the class.... */ 14375 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14376 } 14377 14378 un->un_ncmds_in_transport++; 14379 SD_UPDATE_KSTATS(un, statp, bp); 14380 14381 /* 14382 * Call scsi_transport() to send the command to the target. 14383 * According to SCSA architecture, we must drop the mutex here 14384 * before calling scsi_transport() in order to avoid deadlock. 14385 * Note that the scsi_pkt's completion routine can be executed 14386 * (from interrupt context) even before the call to 14387 * scsi_transport() returns. 14388 */ 14389 SD_TRACE(SD_LOG_IO_CORE, un, 14390 "sd_start_cmds: calling scsi_transport()\n"); 14391 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14392 14393 mutex_exit(SD_MUTEX(un)); 14394 rval = scsi_transport(xp->xb_pktp); 14395 mutex_enter(SD_MUTEX(un)); 14396 14397 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14398 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14399 14400 switch (rval) { 14401 case TRAN_ACCEPT: 14402 /* Clear this with every pkt accepted by the HBA */ 14403 un->un_tran_fatal_count = 0; 14404 break; /* Success; try the next cmd (if any) */ 14405 14406 case TRAN_BUSY: 14407 un->un_ncmds_in_transport--; 14408 ASSERT(un->un_ncmds_in_transport >= 0); 14409 14410 /* 14411 * Don't retry request sense, the sense data 14412 * is lost when another request is sent. 14413 * Free up the rqs buf and retry 14414 * the original failed cmd. Update kstat. 14415 */ 14416 if (bp == un->un_rqs_bp) { 14417 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14418 bp = sd_mark_rqs_idle(un, xp); 14419 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14420 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 14421 kstat_waitq_enter); 14422 goto exit; 14423 } 14424 14425 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14426 /* 14427 * Free the DMA resources for the scsi_pkt. This will 14428 * allow mpxio to select another path the next time 14429 * we call scsi_transport() with this scsi_pkt. 14430 * See sdintr() for the rationalization behind this. 14431 */ 14432 if ((un->un_f_is_fibre == TRUE) && 14433 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14434 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14435 scsi_dmafree(xp->xb_pktp); 14436 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14437 } 14438 #endif 14439 14440 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14441 /* 14442 * Commands that are SD_PATH_DIRECT_PRIORITY 14443 * are for error recovery situations. These do 14444 * not use the normal command waitq, so if they 14445 * get a TRAN_BUSY we cannot put them back onto 14446 * the waitq for later retry. One possible 14447 * problem is that there could already be some 14448 * other command on un_retry_bp that is waiting 14449 * for this one to complete, so we would be 14450 * deadlocked if we put this command back onto 14451 * the waitq for later retry (since un_retry_bp 14452 * must complete before the driver gets back to 14453 * commands on the waitq). 14454 * 14455 * To avoid deadlock we must schedule a callback 14456 * that will restart this command after a set 14457 * interval. This should keep retrying for as 14458 * long as the underlying transport keeps 14459 * returning TRAN_BUSY (just like for other 14460 * commands). Use the same timeout interval as 14461 * for the ordinary TRAN_BUSY retry. 14462 */ 14463 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14464 "sd_start_cmds: scsi_transport() returned " 14465 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14466 14467 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14468 un->un_direct_priority_timeid = 14469 timeout(sd_start_direct_priority_command, 14470 bp, SD_BSY_TIMEOUT / 500); 14471 14472 goto exit; 14473 } 14474 14475 /* 14476 * For TRAN_BUSY, we want to reduce the throttle value, 14477 * unless we are retrying a command. 14478 */ 14479 if (bp != un->un_retry_bp) { 14480 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14481 } 14482 14483 /* 14484 * Set up the bp to be tried again 10 ms later. 14485 * Note:x86: Is there a timeout value in the sd_lun 14486 * for this condition? 14487 */ 14488 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 14489 kstat_runq_back_to_waitq); 14490 goto exit; 14491 14492 case TRAN_FATAL_ERROR: 14493 un->un_tran_fatal_count++; 14494 /* FALLTHRU */ 14495 14496 case TRAN_BADPKT: 14497 default: 14498 un->un_ncmds_in_transport--; 14499 ASSERT(un->un_ncmds_in_transport >= 0); 14500 14501 /* 14502 * If this is our REQUEST SENSE command with a 14503 * transport error, we must get back the pointers 14504 * to the original buf, and mark the REQUEST 14505 * SENSE command as "available". 14506 */ 14507 if (bp == un->un_rqs_bp) { 14508 bp = sd_mark_rqs_idle(un, xp); 14509 xp = SD_GET_XBUF(bp); 14510 } else { 14511 /* 14512 * Legacy behavior: do not update transport 14513 * error count for request sense commands. 14514 */ 14515 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14516 } 14517 14518 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14519 sd_print_transport_rejected_message(un, xp, rval); 14520 14521 /* 14522 * We must use sd_return_failed_command_no_restart() to 14523 * avoid a recursive call back into sd_start_cmds(). 14524 * However this also means that we must keep processing 14525 * the waitq here in order to avoid stalling. 14526 */ 14527 sd_return_failed_command_no_restart(un, bp, EIO); 14528 14529 /* 14530 * Notify any threads waiting in sd_ddi_suspend() that 14531 * a command completion has occurred. 14532 */ 14533 if (un->un_state == SD_STATE_SUSPENDED) { 14534 cv_broadcast(&un->un_disk_busy_cv); 14535 } 14536 14537 if (bp == immed_bp) { 14538 /* immed_bp is gone by now, so clear this */ 14539 immed_bp = NULL; 14540 } 14541 break; 14542 } 14543 14544 } while (immed_bp == NULL); 14545 14546 exit: 14547 ASSERT(mutex_owned(SD_MUTEX(un))); 14548 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14549 } 14550 14551 14552 /* 14553 * Function: sd_return_command 14554 * 14555 * Description: Returns a command to its originator (with or without an 14556 * error). Also starts commands waiting to be transported 14557 * to the target. 14558 * 14559 * Context: May be called from interrupt, kernel, or timeout context 14560 */ 14561 14562 static void 14563 sd_return_command(struct sd_lun *un, struct buf *bp) 14564 { 14565 struct sd_xbuf *xp; 14566 #if defined(__i386) || defined(__amd64) 14567 struct scsi_pkt *pktp; 14568 #endif 14569 14570 ASSERT(bp != NULL); 14571 ASSERT(un != NULL); 14572 ASSERT(mutex_owned(SD_MUTEX(un))); 14573 ASSERT(bp != un->un_rqs_bp); 14574 xp = SD_GET_XBUF(bp); 14575 ASSERT(xp != NULL); 14576 14577 #if defined(__i386) || defined(__amd64) 14578 pktp = SD_GET_PKTP(bp); 14579 #endif 14580 14581 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14582 14583 #if defined(__i386) || defined(__amd64) 14584 /* 14585 * Note:x86: check for the "sdrestart failed" case. 14586 */ 14587 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14588 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14589 (xp->xb_pktp->pkt_resid == 0)) { 14590 14591 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14592 /* 14593 * Successfully set up next portion of cmd 14594 * transfer, try sending it 14595 */ 14596 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14597 NULL, NULL, 0, (clock_t)0, NULL); 14598 sd_start_cmds(un, NULL); 14599 return; /* Note:x86: need a return here? */ 14600 } 14601 } 14602 #endif 14603 14604 /* 14605 * If this is the failfast bp, clear it from un_failfast_bp. This 14606 * can happen if upon being re-tried the failfast bp either 14607 * succeeded or encountered another error (possibly even a different 14608 * error than the one that precipitated the failfast state, but in 14609 * that case it would have had to exhaust retries as well). Regardless, 14610 * this should not occur whenever the instance is in the active 14611 * failfast state. 14612 */ 14613 if (bp == un->un_failfast_bp) { 14614 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14615 un->un_failfast_bp = NULL; 14616 } 14617 14618 /* 14619 * Clear the failfast state upon successful completion of ANY cmd. 14620 */ 14621 if (bp->b_error == 0) { 14622 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14623 } 14624 14625 /* 14626 * This is used if the command was retried one or more times. Show that 14627 * we are done with it, and allow processing of the waitq to resume. 14628 */ 14629 if (bp == un->un_retry_bp) { 14630 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14631 "sd_return_command: un:0x%p: " 14632 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14633 un->un_retry_bp = NULL; 14634 un->un_retry_statp = NULL; 14635 } 14636 14637 SD_UPDATE_RDWR_STATS(un, bp); 14638 SD_UPDATE_PARTITION_STATS(un, bp); 14639 14640 switch (un->un_state) { 14641 case SD_STATE_SUSPENDED: 14642 /* 14643 * Notify any threads waiting in sd_ddi_suspend() that 14644 * a command completion has occurred. 14645 */ 14646 cv_broadcast(&un->un_disk_busy_cv); 14647 break; 14648 default: 14649 sd_start_cmds(un, NULL); 14650 break; 14651 } 14652 14653 /* Return this command up the iodone chain to its originator. */ 14654 mutex_exit(SD_MUTEX(un)); 14655 14656 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14657 xp->xb_pktp = NULL; 14658 14659 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14660 14661 ASSERT(!mutex_owned(SD_MUTEX(un))); 14662 mutex_enter(SD_MUTEX(un)); 14663 14664 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14665 } 14666 14667 14668 /* 14669 * Function: sd_return_failed_command 14670 * 14671 * Description: Command completion when an error occurred. 14672 * 14673 * Context: May be called from interrupt context 14674 */ 14675 14676 static void 14677 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14678 { 14679 ASSERT(bp != NULL); 14680 ASSERT(un != NULL); 14681 ASSERT(mutex_owned(SD_MUTEX(un))); 14682 14683 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14684 "sd_return_failed_command: entry\n"); 14685 14686 /* 14687 * b_resid could already be nonzero due to a partial data 14688 * transfer, so do not change it here. 14689 */ 14690 SD_BIOERROR(bp, errcode); 14691 14692 sd_return_command(un, bp); 14693 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14694 "sd_return_failed_command: exit\n"); 14695 } 14696 14697 14698 /* 14699 * Function: sd_return_failed_command_no_restart 14700 * 14701 * Description: Same as sd_return_failed_command, but ensures that no 14702 * call back into sd_start_cmds will be issued. 14703 * 14704 * Context: May be called from interrupt context 14705 */ 14706 14707 static void 14708 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14709 int errcode) 14710 { 14711 struct sd_xbuf *xp; 14712 14713 ASSERT(bp != NULL); 14714 ASSERT(un != NULL); 14715 ASSERT(mutex_owned(SD_MUTEX(un))); 14716 xp = SD_GET_XBUF(bp); 14717 ASSERT(xp != NULL); 14718 ASSERT(errcode != 0); 14719 14720 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14721 "sd_return_failed_command_no_restart: entry\n"); 14722 14723 /* 14724 * b_resid could already be nonzero due to a partial data 14725 * transfer, so do not change it here. 14726 */ 14727 SD_BIOERROR(bp, errcode); 14728 14729 /* 14730 * If this is the failfast bp, clear it. This can happen if the 14731 * failfast bp encounterd a fatal error when we attempted to 14732 * re-try it (such as a scsi_transport(9F) failure). However 14733 * we should NOT be in an active failfast state if the failfast 14734 * bp is not NULL. 14735 */ 14736 if (bp == un->un_failfast_bp) { 14737 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14738 un->un_failfast_bp = NULL; 14739 } 14740 14741 if (bp == un->un_retry_bp) { 14742 /* 14743 * This command was retried one or more times. Show that we are 14744 * done with it, and allow processing of the waitq to resume. 14745 */ 14746 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14747 "sd_return_failed_command_no_restart: " 14748 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14749 un->un_retry_bp = NULL; 14750 un->un_retry_statp = NULL; 14751 } 14752 14753 SD_UPDATE_RDWR_STATS(un, bp); 14754 SD_UPDATE_PARTITION_STATS(un, bp); 14755 14756 mutex_exit(SD_MUTEX(un)); 14757 14758 if (xp->xb_pktp != NULL) { 14759 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14760 xp->xb_pktp = NULL; 14761 } 14762 14763 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14764 14765 mutex_enter(SD_MUTEX(un)); 14766 14767 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14768 "sd_return_failed_command_no_restart: exit\n"); 14769 } 14770 14771 14772 /* 14773 * Function: sd_retry_command 14774 * 14775 * Description: queue up a command for retry, or (optionally) fail it 14776 * if retry counts are exhausted. 14777 * 14778 * Arguments: un - Pointer to the sd_lun struct for the target. 14779 * 14780 * bp - Pointer to the buf for the command to be retried. 14781 * 14782 * retry_check_flag - Flag to see which (if any) of the retry 14783 * counts should be decremented/checked. If the indicated 14784 * retry count is exhausted, then the command will not be 14785 * retried; it will be failed instead. This should use a 14786 * value equal to one of the following: 14787 * 14788 * SD_RETRIES_NOCHECK 14789 * SD_RESD_RETRIES_STANDARD 14790 * SD_RETRIES_VICTIM 14791 * 14792 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14793 * if the check should be made to see of FLAG_ISOLATE is set 14794 * in the pkt. If FLAG_ISOLATE is set, then the command is 14795 * not retried, it is simply failed. 14796 * 14797 * user_funcp - Ptr to function to call before dispatching the 14798 * command. May be NULL if no action needs to be performed. 14799 * (Primarily intended for printing messages.) 14800 * 14801 * user_arg - Optional argument to be passed along to 14802 * the user_funcp call. 14803 * 14804 * failure_code - errno return code to set in the bp if the 14805 * command is going to be failed. 14806 * 14807 * retry_delay - Retry delay interval in (clock_t) units. May 14808 * be zero which indicates that the retry should be retried 14809 * immediately (ie, without an intervening delay). 14810 * 14811 * statp - Ptr to kstat function to be updated if the command 14812 * is queued for a delayed retry. May be NULL if no kstat 14813 * update is desired. 14814 * 14815 * Context: May be called from interupt context. 14816 */ 14817 14818 static void 14819 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14820 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14821 code), void *user_arg, int failure_code, clock_t retry_delay, 14822 void (*statp)(kstat_io_t *)) 14823 { 14824 struct sd_xbuf *xp; 14825 struct scsi_pkt *pktp; 14826 14827 ASSERT(un != NULL); 14828 ASSERT(mutex_owned(SD_MUTEX(un))); 14829 ASSERT(bp != NULL); 14830 xp = SD_GET_XBUF(bp); 14831 ASSERT(xp != NULL); 14832 pktp = SD_GET_PKTP(bp); 14833 ASSERT(pktp != NULL); 14834 14835 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14836 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14837 14838 /* 14839 * If we are syncing or dumping, fail the command to avoid 14840 * recursively calling back into scsi_transport(). 14841 */ 14842 if (ddi_in_panic()) { 14843 goto fail_command_no_log; 14844 } 14845 14846 /* 14847 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14848 * log an error and fail the command. 14849 */ 14850 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14851 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14852 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14853 sd_dump_memory(un, SD_LOG_IO, "CDB", 14854 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14855 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14856 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14857 goto fail_command; 14858 } 14859 14860 /* 14861 * If we are suspended, then put the command onto head of the 14862 * wait queue since we don't want to start more commands. 14863 */ 14864 switch (un->un_state) { 14865 case SD_STATE_SUSPENDED: 14866 case SD_STATE_DUMPING: 14867 bp->av_forw = un->un_waitq_headp; 14868 un->un_waitq_headp = bp; 14869 if (un->un_waitq_tailp == NULL) { 14870 un->un_waitq_tailp = bp; 14871 } 14872 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14873 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14874 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14875 return; 14876 default: 14877 break; 14878 } 14879 14880 /* 14881 * If the caller wants us to check FLAG_ISOLATE, then see if that 14882 * is set; if it is then we do not want to retry the command. 14883 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14884 */ 14885 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14886 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14887 goto fail_command; 14888 } 14889 } 14890 14891 14892 /* 14893 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14894 * command timeout or a selection timeout has occurred. This means 14895 * that we were unable to establish an kind of communication with 14896 * the target, and subsequent retries and/or commands are likely 14897 * to encounter similar results and take a long time to complete. 14898 * 14899 * If this is a failfast error condition, we need to update the 14900 * failfast state, even if this bp does not have B_FAILFAST set. 14901 */ 14902 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14903 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14904 ASSERT(un->un_failfast_bp == NULL); 14905 /* 14906 * If we are already in the active failfast state, and 14907 * another failfast error condition has been detected, 14908 * then fail this command if it has B_FAILFAST set. 14909 * If B_FAILFAST is clear, then maintain the legacy 14910 * behavior of retrying heroically, even tho this will 14911 * take a lot more time to fail the command. 14912 */ 14913 if (bp->b_flags & B_FAILFAST) { 14914 goto fail_command; 14915 } 14916 } else { 14917 /* 14918 * We're not in the active failfast state, but we 14919 * have a failfast error condition, so we must begin 14920 * transition to the next state. We do this regardless 14921 * of whether or not this bp has B_FAILFAST set. 14922 */ 14923 if (un->un_failfast_bp == NULL) { 14924 /* 14925 * This is the first bp to meet a failfast 14926 * condition so save it on un_failfast_bp & 14927 * do normal retry processing. Do not enter 14928 * active failfast state yet. This marks 14929 * entry into the "failfast pending" state. 14930 */ 14931 un->un_failfast_bp = bp; 14932 14933 } else if (un->un_failfast_bp == bp) { 14934 /* 14935 * This is the second time *this* bp has 14936 * encountered a failfast error condition, 14937 * so enter active failfast state & flush 14938 * queues as appropriate. 14939 */ 14940 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14941 un->un_failfast_bp = NULL; 14942 sd_failfast_flushq(un); 14943 14944 /* 14945 * Fail this bp now if B_FAILFAST set; 14946 * otherwise continue with retries. (It would 14947 * be pretty ironic if this bp succeeded on a 14948 * subsequent retry after we just flushed all 14949 * the queues). 14950 */ 14951 if (bp->b_flags & B_FAILFAST) { 14952 goto fail_command; 14953 } 14954 14955 #if !defined(lint) && !defined(__lint) 14956 } else { 14957 /* 14958 * If neither of the preceeding conditionals 14959 * was true, it means that there is some 14960 * *other* bp that has met an inital failfast 14961 * condition and is currently either being 14962 * retried or is waiting to be retried. In 14963 * that case we should perform normal retry 14964 * processing on *this* bp, since there is a 14965 * chance that the current failfast condition 14966 * is transient and recoverable. If that does 14967 * not turn out to be the case, then retries 14968 * will be cleared when the wait queue is 14969 * flushed anyway. 14970 */ 14971 #endif 14972 } 14973 } 14974 } else { 14975 /* 14976 * SD_RETRIES_FAILFAST is clear, which indicates that we 14977 * likely were able to at least establish some level of 14978 * communication with the target and subsequent commands 14979 * and/or retries are likely to get through to the target, 14980 * In this case we want to be aggressive about clearing 14981 * the failfast state. Note that this does not affect 14982 * the "failfast pending" condition. 14983 */ 14984 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14985 } 14986 14987 14988 /* 14989 * Check the specified retry count to see if we can still do 14990 * any retries with this pkt before we should fail it. 14991 */ 14992 switch (retry_check_flag & SD_RETRIES_MASK) { 14993 case SD_RETRIES_VICTIM: 14994 /* 14995 * Check the victim retry count. If exhausted, then fall 14996 * thru & check against the standard retry count. 14997 */ 14998 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14999 /* Increment count & proceed with the retry */ 15000 xp->xb_victim_retry_count++; 15001 break; 15002 } 15003 /* Victim retries exhausted, fall back to std. retries... */ 15004 /* FALLTHRU */ 15005 15006 case SD_RETRIES_STANDARD: 15007 if (xp->xb_retry_count >= un->un_retry_count) { 15008 /* Retries exhausted, fail the command */ 15009 SD_TRACE(SD_LOG_IO_CORE, un, 15010 "sd_retry_command: retries exhausted!\n"); 15011 /* 15012 * update b_resid for failed SCMD_READ & SCMD_WRITE 15013 * commands with nonzero pkt_resid. 15014 */ 15015 if ((pktp->pkt_reason == CMD_CMPLT) && 15016 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 15017 (pktp->pkt_resid != 0)) { 15018 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15019 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15020 SD_UPDATE_B_RESID(bp, pktp); 15021 } 15022 } 15023 goto fail_command; 15024 } 15025 xp->xb_retry_count++; 15026 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15027 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15028 break; 15029 15030 case SD_RETRIES_UA: 15031 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15032 /* Retries exhausted, fail the command */ 15033 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15034 "Unit Attention retries exhausted. " 15035 "Check the target.\n"); 15036 goto fail_command; 15037 } 15038 xp->xb_ua_retry_count++; 15039 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15040 "sd_retry_command: retry count:%d\n", 15041 xp->xb_ua_retry_count); 15042 break; 15043 15044 case SD_RETRIES_BUSY: 15045 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15046 /* Retries exhausted, fail the command */ 15047 SD_TRACE(SD_LOG_IO_CORE, un, 15048 "sd_retry_command: retries exhausted!\n"); 15049 goto fail_command; 15050 } 15051 xp->xb_retry_count++; 15052 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15053 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15054 break; 15055 15056 case SD_RETRIES_NOCHECK: 15057 default: 15058 /* No retry count to check. Just proceed with the retry */ 15059 break; 15060 } 15061 15062 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15063 15064 /* 15065 * If we were given a zero timeout, we must attempt to retry the 15066 * command immediately (ie, without a delay). 15067 */ 15068 if (retry_delay == 0) { 15069 /* 15070 * Check some limiting conditions to see if we can actually 15071 * do the immediate retry. If we cannot, then we must 15072 * fall back to queueing up a delayed retry. 15073 */ 15074 if (un->un_ncmds_in_transport >= un->un_throttle) { 15075 /* 15076 * We are at the throttle limit for the target, 15077 * fall back to delayed retry. 15078 */ 15079 retry_delay = SD_BSY_TIMEOUT; 15080 statp = kstat_waitq_enter; 15081 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15082 "sd_retry_command: immed. retry hit " 15083 "throttle!\n"); 15084 } else { 15085 /* 15086 * We're clear to proceed with the immediate retry. 15087 * First call the user-provided function (if any) 15088 */ 15089 if (user_funcp != NULL) { 15090 (*user_funcp)(un, bp, user_arg, 15091 SD_IMMEDIATE_RETRY_ISSUED); 15092 } 15093 15094 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15095 "sd_retry_command: issuing immediate retry\n"); 15096 15097 /* 15098 * Call sd_start_cmds() to transport the command to 15099 * the target. 15100 */ 15101 sd_start_cmds(un, bp); 15102 15103 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15104 "sd_retry_command exit\n"); 15105 return; 15106 } 15107 } 15108 15109 /* 15110 * Set up to retry the command after a delay. 15111 * First call the user-provided function (if any) 15112 */ 15113 if (user_funcp != NULL) { 15114 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15115 } 15116 15117 sd_set_retry_bp(un, bp, retry_delay, statp); 15118 15119 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15120 return; 15121 15122 fail_command: 15123 15124 if (user_funcp != NULL) { 15125 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15126 } 15127 15128 fail_command_no_log: 15129 15130 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15131 "sd_retry_command: returning failed command\n"); 15132 15133 sd_return_failed_command(un, bp, failure_code); 15134 15135 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15136 } 15137 15138 15139 /* 15140 * Function: sd_set_retry_bp 15141 * 15142 * Description: Set up the given bp for retry. 15143 * 15144 * Arguments: un - ptr to associated softstate 15145 * bp - ptr to buf(9S) for the command 15146 * retry_delay - time interval before issuing retry (may be 0) 15147 * statp - optional pointer to kstat function 15148 * 15149 * Context: May be called under interrupt context 15150 */ 15151 15152 static void 15153 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15154 void (*statp)(kstat_io_t *)) 15155 { 15156 ASSERT(un != NULL); 15157 ASSERT(mutex_owned(SD_MUTEX(un))); 15158 ASSERT(bp != NULL); 15159 15160 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15161 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15162 15163 /* 15164 * Indicate that the command is being retried. This will not allow any 15165 * other commands on the wait queue to be transported to the target 15166 * until this command has been completed (success or failure). The 15167 * "retry command" is not transported to the target until the given 15168 * time delay expires, unless the user specified a 0 retry_delay. 15169 * 15170 * Note: the timeout(9F) callback routine is what actually calls 15171 * sd_start_cmds() to transport the command, with the exception of a 15172 * zero retry_delay. The only current implementor of a zero retry delay 15173 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15174 */ 15175 if (un->un_retry_bp == NULL) { 15176 ASSERT(un->un_retry_statp == NULL); 15177 un->un_retry_bp = bp; 15178 15179 /* 15180 * If the user has not specified a delay the command should 15181 * be queued and no timeout should be scheduled. 15182 */ 15183 if (retry_delay == 0) { 15184 /* 15185 * Save the kstat pointer that will be used in the 15186 * call to SD_UPDATE_KSTATS() below, so that 15187 * sd_start_cmds() can correctly decrement the waitq 15188 * count when it is time to transport this command. 15189 */ 15190 un->un_retry_statp = statp; 15191 goto done; 15192 } 15193 } 15194 15195 if (un->un_retry_bp == bp) { 15196 /* 15197 * Save the kstat pointer that will be used in the call to 15198 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15199 * correctly decrement the waitq count when it is time to 15200 * transport this command. 15201 */ 15202 un->un_retry_statp = statp; 15203 15204 /* 15205 * Schedule a timeout if: 15206 * 1) The user has specified a delay. 15207 * 2) There is not a START_STOP_UNIT callback pending. 15208 * 15209 * If no delay has been specified, then it is up to the caller 15210 * to ensure that IO processing continues without stalling. 15211 * Effectively, this means that the caller will issue the 15212 * required call to sd_start_cmds(). The START_STOP_UNIT 15213 * callback does this after the START STOP UNIT command has 15214 * completed. In either of these cases we should not schedule 15215 * a timeout callback here. Also don't schedule the timeout if 15216 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15217 */ 15218 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15219 (un->un_direct_priority_timeid == NULL)) { 15220 un->un_retry_timeid = 15221 timeout(sd_start_retry_command, un, retry_delay); 15222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15223 "sd_set_retry_bp: setting timeout: un: 0x%p" 15224 " bp:0x%p un_retry_timeid:0x%p\n", 15225 un, bp, un->un_retry_timeid); 15226 } 15227 } else { 15228 /* 15229 * We only get in here if there is already another command 15230 * waiting to be retried. In this case, we just put the 15231 * given command onto the wait queue, so it can be transported 15232 * after the current retry command has completed. 15233 * 15234 * Also we have to make sure that if the command at the head 15235 * of the wait queue is the un_failfast_bp, that we do not 15236 * put ahead of it any other commands that are to be retried. 15237 */ 15238 if ((un->un_failfast_bp != NULL) && 15239 (un->un_failfast_bp == un->un_waitq_headp)) { 15240 /* 15241 * Enqueue this command AFTER the first command on 15242 * the wait queue (which is also un_failfast_bp). 15243 */ 15244 bp->av_forw = un->un_waitq_headp->av_forw; 15245 un->un_waitq_headp->av_forw = bp; 15246 if (un->un_waitq_headp == un->un_waitq_tailp) { 15247 un->un_waitq_tailp = bp; 15248 } 15249 } else { 15250 /* Enqueue this command at the head of the waitq. */ 15251 bp->av_forw = un->un_waitq_headp; 15252 un->un_waitq_headp = bp; 15253 if (un->un_waitq_tailp == NULL) { 15254 un->un_waitq_tailp = bp; 15255 } 15256 } 15257 15258 if (statp == NULL) { 15259 statp = kstat_waitq_enter; 15260 } 15261 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15262 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15263 } 15264 15265 done: 15266 if (statp != NULL) { 15267 SD_UPDATE_KSTATS(un, statp, bp); 15268 } 15269 15270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15271 "sd_set_retry_bp: exit un:0x%p\n", un); 15272 } 15273 15274 15275 /* 15276 * Function: sd_start_retry_command 15277 * 15278 * Description: Start the command that has been waiting on the target's 15279 * retry queue. Called from timeout(9F) context after the 15280 * retry delay interval has expired. 15281 * 15282 * Arguments: arg - pointer to associated softstate for the device. 15283 * 15284 * Context: timeout(9F) thread context. May not sleep. 15285 */ 15286 15287 static void 15288 sd_start_retry_command(void *arg) 15289 { 15290 struct sd_lun *un = arg; 15291 15292 ASSERT(un != NULL); 15293 ASSERT(!mutex_owned(SD_MUTEX(un))); 15294 15295 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15296 "sd_start_retry_command: entry\n"); 15297 15298 mutex_enter(SD_MUTEX(un)); 15299 15300 un->un_retry_timeid = NULL; 15301 15302 if (un->un_retry_bp != NULL) { 15303 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15304 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15305 un, un->un_retry_bp); 15306 sd_start_cmds(un, un->un_retry_bp); 15307 } 15308 15309 mutex_exit(SD_MUTEX(un)); 15310 15311 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15312 "sd_start_retry_command: exit\n"); 15313 } 15314 15315 15316 /* 15317 * Function: sd_start_direct_priority_command 15318 * 15319 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15320 * received TRAN_BUSY when we called scsi_transport() to send it 15321 * to the underlying HBA. This function is called from timeout(9F) 15322 * context after the delay interval has expired. 15323 * 15324 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15325 * 15326 * Context: timeout(9F) thread context. May not sleep. 15327 */ 15328 15329 static void 15330 sd_start_direct_priority_command(void *arg) 15331 { 15332 struct buf *priority_bp = arg; 15333 struct sd_lun *un; 15334 15335 ASSERT(priority_bp != NULL); 15336 un = SD_GET_UN(priority_bp); 15337 ASSERT(un != NULL); 15338 ASSERT(!mutex_owned(SD_MUTEX(un))); 15339 15340 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15341 "sd_start_direct_priority_command: entry\n"); 15342 15343 mutex_enter(SD_MUTEX(un)); 15344 un->un_direct_priority_timeid = NULL; 15345 sd_start_cmds(un, priority_bp); 15346 mutex_exit(SD_MUTEX(un)); 15347 15348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15349 "sd_start_direct_priority_command: exit\n"); 15350 } 15351 15352 15353 /* 15354 * Function: sd_send_request_sense_command 15355 * 15356 * Description: Sends a REQUEST SENSE command to the target 15357 * 15358 * Context: May be called from interrupt context. 15359 */ 15360 15361 static void 15362 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15363 struct scsi_pkt *pktp) 15364 { 15365 ASSERT(bp != NULL); 15366 ASSERT(un != NULL); 15367 ASSERT(mutex_owned(SD_MUTEX(un))); 15368 15369 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15370 "entry: buf:0x%p\n", bp); 15371 15372 /* 15373 * If we are syncing or dumping, then fail the command to avoid a 15374 * recursive callback into scsi_transport(). Also fail the command 15375 * if we are suspended (legacy behavior). 15376 */ 15377 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15378 (un->un_state == SD_STATE_DUMPING)) { 15379 sd_return_failed_command(un, bp, EIO); 15380 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15381 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15382 return; 15383 } 15384 15385 /* 15386 * Retry the failed command and don't issue the request sense if: 15387 * 1) the sense buf is busy 15388 * 2) we have 1 or more outstanding commands on the target 15389 * (the sense data will be cleared or invalidated any way) 15390 * 15391 * Note: There could be an issue with not checking a retry limit here, 15392 * the problem is determining which retry limit to check. 15393 */ 15394 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15395 /* Don't retry if the command is flagged as non-retryable */ 15396 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15397 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15398 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 15399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15400 "sd_send_request_sense_command: " 15401 "at full throttle, retrying exit\n"); 15402 } else { 15403 sd_return_failed_command(un, bp, EIO); 15404 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15405 "sd_send_request_sense_command: " 15406 "at full throttle, non-retryable exit\n"); 15407 } 15408 return; 15409 } 15410 15411 sd_mark_rqs_busy(un, bp); 15412 sd_start_cmds(un, un->un_rqs_bp); 15413 15414 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15415 "sd_send_request_sense_command: exit\n"); 15416 } 15417 15418 15419 /* 15420 * Function: sd_mark_rqs_busy 15421 * 15422 * Description: Indicate that the request sense bp for this instance is 15423 * in use. 15424 * 15425 * Context: May be called under interrupt context 15426 */ 15427 15428 static void 15429 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15430 { 15431 struct sd_xbuf *sense_xp; 15432 15433 ASSERT(un != NULL); 15434 ASSERT(bp != NULL); 15435 ASSERT(mutex_owned(SD_MUTEX(un))); 15436 ASSERT(un->un_sense_isbusy == 0); 15437 15438 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15439 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15440 15441 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15442 ASSERT(sense_xp != NULL); 15443 15444 SD_INFO(SD_LOG_IO, un, 15445 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15446 15447 ASSERT(sense_xp->xb_pktp != NULL); 15448 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15449 == (FLAG_SENSING | FLAG_HEAD)); 15450 15451 un->un_sense_isbusy = 1; 15452 un->un_rqs_bp->b_resid = 0; 15453 sense_xp->xb_pktp->pkt_resid = 0; 15454 sense_xp->xb_pktp->pkt_reason = 0; 15455 15456 /* So we can get back the bp at interrupt time! */ 15457 sense_xp->xb_sense_bp = bp; 15458 15459 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15460 15461 /* 15462 * Mark this buf as awaiting sense data. (This is already set in 15463 * the pkt_flags for the RQS packet.) 15464 */ 15465 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15466 15467 sense_xp->xb_retry_count = 0; 15468 sense_xp->xb_victim_retry_count = 0; 15469 sense_xp->xb_ua_retry_count = 0; 15470 sense_xp->xb_dma_resid = 0; 15471 15472 /* Clean up the fields for auto-request sense */ 15473 sense_xp->xb_sense_status = 0; 15474 sense_xp->xb_sense_state = 0; 15475 sense_xp->xb_sense_resid = 0; 15476 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15477 15478 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15479 } 15480 15481 15482 /* 15483 * Function: sd_mark_rqs_idle 15484 * 15485 * Description: SD_MUTEX must be held continuously through this routine 15486 * to prevent reuse of the rqs struct before the caller can 15487 * complete it's processing. 15488 * 15489 * Return Code: Pointer to the RQS buf 15490 * 15491 * Context: May be called under interrupt context 15492 */ 15493 15494 static struct buf * 15495 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15496 { 15497 struct buf *bp; 15498 ASSERT(un != NULL); 15499 ASSERT(sense_xp != NULL); 15500 ASSERT(mutex_owned(SD_MUTEX(un))); 15501 ASSERT(un->un_sense_isbusy != 0); 15502 15503 un->un_sense_isbusy = 0; 15504 bp = sense_xp->xb_sense_bp; 15505 sense_xp->xb_sense_bp = NULL; 15506 15507 /* This pkt is no longer interested in getting sense data */ 15508 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15509 15510 return (bp); 15511 } 15512 15513 15514 15515 /* 15516 * Function: sd_alloc_rqs 15517 * 15518 * Description: Set up the unit to receive auto request sense data 15519 * 15520 * Return Code: DDI_SUCCESS or DDI_FAILURE 15521 * 15522 * Context: Called under attach(9E) context 15523 */ 15524 15525 static int 15526 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15527 { 15528 struct sd_xbuf *xp; 15529 15530 ASSERT(un != NULL); 15531 ASSERT(!mutex_owned(SD_MUTEX(un))); 15532 ASSERT(un->un_rqs_bp == NULL); 15533 ASSERT(un->un_rqs_pktp == NULL); 15534 15535 /* 15536 * First allocate the required buf and scsi_pkt structs, then set up 15537 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15538 */ 15539 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15540 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15541 if (un->un_rqs_bp == NULL) { 15542 return (DDI_FAILURE); 15543 } 15544 15545 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15546 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15547 15548 if (un->un_rqs_pktp == NULL) { 15549 sd_free_rqs(un); 15550 return (DDI_FAILURE); 15551 } 15552 15553 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15554 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15555 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 15556 15557 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15558 15559 /* Set up the other needed members in the ARQ scsi_pkt. */ 15560 un->un_rqs_pktp->pkt_comp = sdintr; 15561 un->un_rqs_pktp->pkt_time = sd_io_time; 15562 un->un_rqs_pktp->pkt_flags |= 15563 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15564 15565 /* 15566 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15567 * provide any intpkt, destroypkt routines as we take care of 15568 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15569 */ 15570 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15571 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15572 xp->xb_pktp = un->un_rqs_pktp; 15573 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15574 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15575 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15576 15577 /* 15578 * Save the pointer to the request sense private bp so it can 15579 * be retrieved in sdintr. 15580 */ 15581 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15582 ASSERT(un->un_rqs_bp->b_private == xp); 15583 15584 /* 15585 * See if the HBA supports auto-request sense for the specified 15586 * target/lun. If it does, then try to enable it (if not already 15587 * enabled). 15588 * 15589 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15590 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15591 * return success. However, in both of these cases ARQ is always 15592 * enabled and scsi_ifgetcap will always return true. The best approach 15593 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15594 * 15595 * The 3rd case is the HBA (adp) always return enabled on 15596 * scsi_ifgetgetcap even when it's not enable, the best approach 15597 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15598 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15599 */ 15600 15601 if (un->un_f_is_fibre == TRUE) { 15602 un->un_f_arq_enabled = TRUE; 15603 } else { 15604 #if defined(__i386) || defined(__amd64) 15605 /* 15606 * Circumvent the Adaptec bug, remove this code when 15607 * the bug is fixed 15608 */ 15609 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15610 #endif 15611 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15612 case 0: 15613 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15614 "sd_alloc_rqs: HBA supports ARQ\n"); 15615 /* 15616 * ARQ is supported by this HBA but currently is not 15617 * enabled. Attempt to enable it and if successful then 15618 * mark this instance as ARQ enabled. 15619 */ 15620 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15621 == 1) { 15622 /* Successfully enabled ARQ in the HBA */ 15623 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15624 "sd_alloc_rqs: ARQ enabled\n"); 15625 un->un_f_arq_enabled = TRUE; 15626 } else { 15627 /* Could not enable ARQ in the HBA */ 15628 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15629 "sd_alloc_rqs: failed ARQ enable\n"); 15630 un->un_f_arq_enabled = FALSE; 15631 } 15632 break; 15633 case 1: 15634 /* 15635 * ARQ is supported by this HBA and is already enabled. 15636 * Just mark ARQ as enabled for this instance. 15637 */ 15638 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15639 "sd_alloc_rqs: ARQ already enabled\n"); 15640 un->un_f_arq_enabled = TRUE; 15641 break; 15642 default: 15643 /* 15644 * ARQ is not supported by this HBA; disable it for this 15645 * instance. 15646 */ 15647 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15648 "sd_alloc_rqs: HBA does not support ARQ\n"); 15649 un->un_f_arq_enabled = FALSE; 15650 break; 15651 } 15652 } 15653 15654 return (DDI_SUCCESS); 15655 } 15656 15657 15658 /* 15659 * Function: sd_free_rqs 15660 * 15661 * Description: Cleanup for the pre-instance RQS command. 15662 * 15663 * Context: Kernel thread context 15664 */ 15665 15666 static void 15667 sd_free_rqs(struct sd_lun *un) 15668 { 15669 ASSERT(un != NULL); 15670 15671 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15672 15673 /* 15674 * If consistent memory is bound to a scsi_pkt, the pkt 15675 * has to be destroyed *before* freeing the consistent memory. 15676 * Don't change the sequence of this operations. 15677 * scsi_destroy_pkt() might access memory, which isn't allowed, 15678 * after it was freed in scsi_free_consistent_buf(). 15679 */ 15680 if (un->un_rqs_pktp != NULL) { 15681 scsi_destroy_pkt(un->un_rqs_pktp); 15682 un->un_rqs_pktp = NULL; 15683 } 15684 15685 if (un->un_rqs_bp != NULL) { 15686 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 15687 scsi_free_consistent_buf(un->un_rqs_bp); 15688 un->un_rqs_bp = NULL; 15689 } 15690 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15691 } 15692 15693 15694 15695 /* 15696 * Function: sd_reduce_throttle 15697 * 15698 * Description: Reduces the maximun # of outstanding commands on a 15699 * target to the current number of outstanding commands. 15700 * Queues a tiemout(9F) callback to restore the limit 15701 * after a specified interval has elapsed. 15702 * Typically used when we get a TRAN_BUSY return code 15703 * back from scsi_transport(). 15704 * 15705 * Arguments: un - ptr to the sd_lun softstate struct 15706 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15707 * 15708 * Context: May be called from interrupt context 15709 */ 15710 15711 static void 15712 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15713 { 15714 ASSERT(un != NULL); 15715 ASSERT(mutex_owned(SD_MUTEX(un))); 15716 ASSERT(un->un_ncmds_in_transport >= 0); 15717 15718 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15719 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15720 un, un->un_throttle, un->un_ncmds_in_transport); 15721 15722 if (un->un_throttle > 1) { 15723 if (un->un_f_use_adaptive_throttle == TRUE) { 15724 switch (throttle_type) { 15725 case SD_THROTTLE_TRAN_BUSY: 15726 if (un->un_busy_throttle == 0) { 15727 un->un_busy_throttle = un->un_throttle; 15728 } 15729 break; 15730 case SD_THROTTLE_QFULL: 15731 un->un_busy_throttle = 0; 15732 break; 15733 default: 15734 ASSERT(FALSE); 15735 } 15736 15737 if (un->un_ncmds_in_transport > 0) { 15738 un->un_throttle = un->un_ncmds_in_transport; 15739 } 15740 15741 } else { 15742 if (un->un_ncmds_in_transport == 0) { 15743 un->un_throttle = 1; 15744 } else { 15745 un->un_throttle = un->un_ncmds_in_transport; 15746 } 15747 } 15748 } 15749 15750 /* Reschedule the timeout if none is currently active */ 15751 if (un->un_reset_throttle_timeid == NULL) { 15752 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15753 un, SD_THROTTLE_RESET_INTERVAL); 15754 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15755 "sd_reduce_throttle: timeout scheduled!\n"); 15756 } 15757 15758 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15759 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15760 } 15761 15762 15763 15764 /* 15765 * Function: sd_restore_throttle 15766 * 15767 * Description: Callback function for timeout(9F). Resets the current 15768 * value of un->un_throttle to its default. 15769 * 15770 * Arguments: arg - pointer to associated softstate for the device. 15771 * 15772 * Context: May be called from interrupt context 15773 */ 15774 15775 static void 15776 sd_restore_throttle(void *arg) 15777 { 15778 struct sd_lun *un = arg; 15779 15780 ASSERT(un != NULL); 15781 ASSERT(!mutex_owned(SD_MUTEX(un))); 15782 15783 mutex_enter(SD_MUTEX(un)); 15784 15785 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15786 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15787 15788 un->un_reset_throttle_timeid = NULL; 15789 15790 if (un->un_f_use_adaptive_throttle == TRUE) { 15791 /* 15792 * If un_busy_throttle is nonzero, then it contains the 15793 * value that un_throttle was when we got a TRAN_BUSY back 15794 * from scsi_transport(). We want to revert back to this 15795 * value. 15796 * 15797 * In the QFULL case, the throttle limit will incrementally 15798 * increase until it reaches max throttle. 15799 */ 15800 if (un->un_busy_throttle > 0) { 15801 un->un_throttle = un->un_busy_throttle; 15802 un->un_busy_throttle = 0; 15803 } else { 15804 /* 15805 * increase throttle by 10% open gate slowly, schedule 15806 * another restore if saved throttle has not been 15807 * reached 15808 */ 15809 short throttle; 15810 if (sd_qfull_throttle_enable) { 15811 throttle = un->un_throttle + 15812 max((un->un_throttle / 10), 1); 15813 un->un_throttle = 15814 (throttle < un->un_saved_throttle) ? 15815 throttle : un->un_saved_throttle; 15816 if (un->un_throttle < un->un_saved_throttle) { 15817 un->un_reset_throttle_timeid = 15818 timeout(sd_restore_throttle, 15819 un, SD_QFULL_THROTTLE_RESET_INTERVAL); 15820 } 15821 } 15822 } 15823 15824 /* 15825 * If un_throttle has fallen below the low-water mark, we 15826 * restore the maximum value here (and allow it to ratchet 15827 * down again if necessary). 15828 */ 15829 if (un->un_throttle < un->un_min_throttle) { 15830 un->un_throttle = un->un_saved_throttle; 15831 } 15832 } else { 15833 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15834 "restoring limit from 0x%x to 0x%x\n", 15835 un->un_throttle, un->un_saved_throttle); 15836 un->un_throttle = un->un_saved_throttle; 15837 } 15838 15839 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15840 "sd_restore_throttle: calling sd_start_cmds!\n"); 15841 15842 sd_start_cmds(un, NULL); 15843 15844 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15845 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15846 un, un->un_throttle); 15847 15848 mutex_exit(SD_MUTEX(un)); 15849 15850 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15851 } 15852 15853 /* 15854 * Function: sdrunout 15855 * 15856 * Description: Callback routine for scsi_init_pkt when a resource allocation 15857 * fails. 15858 * 15859 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15860 * soft state instance. 15861 * 15862 * Return Code: The scsi_init_pkt routine allows for the callback function to 15863 * return a 0 indicating the callback should be rescheduled or a 1 15864 * indicating not to reschedule. This routine always returns 1 15865 * because the driver always provides a callback function to 15866 * scsi_init_pkt. This results in a callback always being scheduled 15867 * (via the scsi_init_pkt callback implementation) if a resource 15868 * failure occurs. 15869 * 15870 * Context: This callback function may not block or call routines that block 15871 * 15872 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15873 * request persisting at the head of the list which cannot be 15874 * satisfied even after multiple retries. In the future the driver 15875 * may implement some time of maximum runout count before failing 15876 * an I/O. 15877 */ 15878 15879 static int 15880 sdrunout(caddr_t arg) 15881 { 15882 struct sd_lun *un = (struct sd_lun *)arg; 15883 15884 ASSERT(un != NULL); 15885 ASSERT(!mutex_owned(SD_MUTEX(un))); 15886 15887 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15888 15889 mutex_enter(SD_MUTEX(un)); 15890 sd_start_cmds(un, NULL); 15891 mutex_exit(SD_MUTEX(un)); 15892 /* 15893 * This callback routine always returns 1 (i.e. do not reschedule) 15894 * because we always specify sdrunout as the callback handler for 15895 * scsi_init_pkt inside the call to sd_start_cmds. 15896 */ 15897 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15898 return (1); 15899 } 15900 15901 15902 /* 15903 * Function: sdintr 15904 * 15905 * Description: Completion callback routine for scsi_pkt(9S) structs 15906 * sent to the HBA driver via scsi_transport(9F). 15907 * 15908 * Context: Interrupt context 15909 */ 15910 15911 static void 15912 sdintr(struct scsi_pkt *pktp) 15913 { 15914 struct buf *bp; 15915 struct sd_xbuf *xp; 15916 struct sd_lun *un; 15917 15918 ASSERT(pktp != NULL); 15919 bp = (struct buf *)pktp->pkt_private; 15920 ASSERT(bp != NULL); 15921 xp = SD_GET_XBUF(bp); 15922 ASSERT(xp != NULL); 15923 ASSERT(xp->xb_pktp != NULL); 15924 un = SD_GET_UN(bp); 15925 ASSERT(un != NULL); 15926 ASSERT(!mutex_owned(SD_MUTEX(un))); 15927 15928 #ifdef SD_FAULT_INJECTION 15929 15930 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15931 /* SD FaultInjection */ 15932 sd_faultinjection(pktp); 15933 15934 #endif /* SD_FAULT_INJECTION */ 15935 15936 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15937 " xp:0x%p, un:0x%p\n", bp, xp, un); 15938 15939 mutex_enter(SD_MUTEX(un)); 15940 15941 /* Reduce the count of the #commands currently in transport */ 15942 un->un_ncmds_in_transport--; 15943 ASSERT(un->un_ncmds_in_transport >= 0); 15944 15945 /* Increment counter to indicate that the callback routine is active */ 15946 un->un_in_callback++; 15947 15948 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15949 15950 #ifdef SDDEBUG 15951 if (bp == un->un_retry_bp) { 15952 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15953 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15954 un, un->un_retry_bp, un->un_ncmds_in_transport); 15955 } 15956 #endif 15957 15958 /* 15959 * If pkt_reason is CMD_DEV_GONE, just fail the command 15960 */ 15961 if (pktp->pkt_reason == CMD_DEV_GONE) { 15962 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15963 "Device is gone\n"); 15964 sd_return_failed_command(un, bp, EIO); 15965 goto exit; 15966 } 15967 15968 /* 15969 * First see if the pkt has auto-request sense data with it.... 15970 * Look at the packet state first so we don't take a performance 15971 * hit looking at the arq enabled flag unless absolutely necessary. 15972 */ 15973 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15974 (un->un_f_arq_enabled == TRUE)) { 15975 /* 15976 * The HBA did an auto request sense for this command so check 15977 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15978 * driver command that should not be retried. 15979 */ 15980 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15981 /* 15982 * Save the relevant sense info into the xp for the 15983 * original cmd. 15984 */ 15985 struct scsi_arq_status *asp; 15986 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15987 xp->xb_sense_status = 15988 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15989 xp->xb_sense_state = asp->sts_rqpkt_state; 15990 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15991 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15992 min(sizeof (struct scsi_extended_sense), 15993 SENSE_LENGTH)); 15994 15995 /* fail the command */ 15996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15997 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15998 sd_return_failed_command(un, bp, EIO); 15999 goto exit; 16000 } 16001 16002 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16003 /* 16004 * We want to either retry or fail this command, so free 16005 * the DMA resources here. If we retry the command then 16006 * the DMA resources will be reallocated in sd_start_cmds(). 16007 * Note that when PKT_DMA_PARTIAL is used, this reallocation 16008 * causes the *entire* transfer to start over again from the 16009 * beginning of the request, even for PARTIAL chunks that 16010 * have already transferred successfully. 16011 */ 16012 if ((un->un_f_is_fibre == TRUE) && 16013 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16014 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16015 scsi_dmafree(pktp); 16016 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16017 } 16018 #endif 16019 16020 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16021 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16022 16023 sd_handle_auto_request_sense(un, bp, xp, pktp); 16024 goto exit; 16025 } 16026 16027 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16028 if (pktp->pkt_flags & FLAG_SENSING) { 16029 /* This pktp is from the unit's REQUEST_SENSE command */ 16030 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16031 "sdintr: sd_handle_request_sense\n"); 16032 sd_handle_request_sense(un, bp, xp, pktp); 16033 goto exit; 16034 } 16035 16036 /* 16037 * Check to see if the command successfully completed as requested; 16038 * this is the most common case (and also the hot performance path). 16039 * 16040 * Requirements for successful completion are: 16041 * pkt_reason is CMD_CMPLT and packet status is status good. 16042 * In addition: 16043 * - A residual of zero indicates successful completion no matter what 16044 * the command is. 16045 * - If the residual is not zero and the command is not a read or 16046 * write, then it's still defined as successful completion. In other 16047 * words, if the command is a read or write the residual must be 16048 * zero for successful completion. 16049 * - If the residual is not zero and the command is a read or 16050 * write, and it's a USCSICMD, then it's still defined as 16051 * successful completion. 16052 */ 16053 if ((pktp->pkt_reason == CMD_CMPLT) && 16054 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16055 16056 /* 16057 * Since this command is returned with a good status, we 16058 * can reset the count for Sonoma failover. 16059 */ 16060 un->un_sonoma_failure_count = 0; 16061 16062 /* 16063 * Return all USCSI commands on good status 16064 */ 16065 if (pktp->pkt_resid == 0) { 16066 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16067 "sdintr: returning command for resid == 0\n"); 16068 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16069 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16070 SD_UPDATE_B_RESID(bp, pktp); 16071 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16072 "sdintr: returning command for resid != 0\n"); 16073 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16074 SD_UPDATE_B_RESID(bp, pktp); 16075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16076 "sdintr: returning uscsi command\n"); 16077 } else { 16078 goto not_successful; 16079 } 16080 sd_return_command(un, bp); 16081 16082 /* 16083 * Decrement counter to indicate that the callback routine 16084 * is done. 16085 */ 16086 un->un_in_callback--; 16087 ASSERT(un->un_in_callback >= 0); 16088 mutex_exit(SD_MUTEX(un)); 16089 16090 return; 16091 } 16092 16093 not_successful: 16094 16095 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16096 /* 16097 * The following is based upon knowledge of the underlying transport 16098 * and its use of DMA resources. This code should be removed when 16099 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16100 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16101 * and sd_start_cmds(). 16102 * 16103 * Free any DMA resources associated with this command if there 16104 * is a chance it could be retried or enqueued for later retry. 16105 * If we keep the DMA binding then mpxio cannot reissue the 16106 * command on another path whenever a path failure occurs. 16107 * 16108 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16109 * causes the *entire* transfer to start over again from the 16110 * beginning of the request, even for PARTIAL chunks that 16111 * have already transferred successfully. 16112 * 16113 * This is only done for non-uscsi commands (and also skipped for the 16114 * driver's internal RQS command). Also just do this for Fibre Channel 16115 * devices as these are the only ones that support mpxio. 16116 */ 16117 if ((un->un_f_is_fibre == TRUE) && 16118 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16119 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16120 scsi_dmafree(pktp); 16121 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16122 } 16123 #endif 16124 16125 /* 16126 * The command did not successfully complete as requested so check 16127 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16128 * driver command that should not be retried so just return. If 16129 * FLAG_DIAGNOSE is not set the error will be processed below. 16130 */ 16131 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16132 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16133 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16134 /* 16135 * Issue a request sense if a check condition caused the error 16136 * (we handle the auto request sense case above), otherwise 16137 * just fail the command. 16138 */ 16139 if ((pktp->pkt_reason == CMD_CMPLT) && 16140 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16141 sd_send_request_sense_command(un, bp, pktp); 16142 } else { 16143 sd_return_failed_command(un, bp, EIO); 16144 } 16145 goto exit; 16146 } 16147 16148 /* 16149 * The command did not successfully complete as requested so process 16150 * the error, retry, and/or attempt recovery. 16151 */ 16152 switch (pktp->pkt_reason) { 16153 case CMD_CMPLT: 16154 switch (SD_GET_PKT_STATUS(pktp)) { 16155 case STATUS_GOOD: 16156 /* 16157 * The command completed successfully with a non-zero 16158 * residual 16159 */ 16160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16161 "sdintr: STATUS_GOOD \n"); 16162 sd_pkt_status_good(un, bp, xp, pktp); 16163 break; 16164 16165 case STATUS_CHECK: 16166 case STATUS_TERMINATED: 16167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16168 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16169 sd_pkt_status_check_condition(un, bp, xp, pktp); 16170 break; 16171 16172 case STATUS_BUSY: 16173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16174 "sdintr: STATUS_BUSY\n"); 16175 sd_pkt_status_busy(un, bp, xp, pktp); 16176 break; 16177 16178 case STATUS_RESERVATION_CONFLICT: 16179 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16180 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16181 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16182 break; 16183 16184 case STATUS_QFULL: 16185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16186 "sdintr: STATUS_QFULL\n"); 16187 sd_pkt_status_qfull(un, bp, xp, pktp); 16188 break; 16189 16190 case STATUS_MET: 16191 case STATUS_INTERMEDIATE: 16192 case STATUS_SCSI2: 16193 case STATUS_INTERMEDIATE_MET: 16194 case STATUS_ACA_ACTIVE: 16195 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16196 "Unexpected SCSI status received: 0x%x\n", 16197 SD_GET_PKT_STATUS(pktp)); 16198 sd_return_failed_command(un, bp, EIO); 16199 break; 16200 16201 default: 16202 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16203 "Invalid SCSI status received: 0x%x\n", 16204 SD_GET_PKT_STATUS(pktp)); 16205 sd_return_failed_command(un, bp, EIO); 16206 break; 16207 16208 } 16209 break; 16210 16211 case CMD_INCOMPLETE: 16212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16213 "sdintr: CMD_INCOMPLETE\n"); 16214 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16215 break; 16216 case CMD_TRAN_ERR: 16217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16218 "sdintr: CMD_TRAN_ERR\n"); 16219 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16220 break; 16221 case CMD_RESET: 16222 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16223 "sdintr: CMD_RESET \n"); 16224 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16225 break; 16226 case CMD_ABORTED: 16227 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16228 "sdintr: CMD_ABORTED \n"); 16229 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16230 break; 16231 case CMD_TIMEOUT: 16232 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16233 "sdintr: CMD_TIMEOUT\n"); 16234 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16235 break; 16236 case CMD_UNX_BUS_FREE: 16237 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16238 "sdintr: CMD_UNX_BUS_FREE \n"); 16239 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16240 break; 16241 case CMD_TAG_REJECT: 16242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16243 "sdintr: CMD_TAG_REJECT\n"); 16244 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16245 break; 16246 default: 16247 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16248 "sdintr: default\n"); 16249 sd_pkt_reason_default(un, bp, xp, pktp); 16250 break; 16251 } 16252 16253 exit: 16254 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16255 16256 /* Decrement counter to indicate that the callback routine is done. */ 16257 un->un_in_callback--; 16258 ASSERT(un->un_in_callback >= 0); 16259 16260 /* 16261 * At this point, the pkt has been dispatched, ie, it is either 16262 * being re-tried or has been returned to its caller and should 16263 * not be referenced. 16264 */ 16265 16266 mutex_exit(SD_MUTEX(un)); 16267 } 16268 16269 16270 /* 16271 * Function: sd_print_incomplete_msg 16272 * 16273 * Description: Prints the error message for a CMD_INCOMPLETE error. 16274 * 16275 * Arguments: un - ptr to associated softstate for the device. 16276 * bp - ptr to the buf(9S) for the command. 16277 * arg - message string ptr 16278 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16279 * or SD_NO_RETRY_ISSUED. 16280 * 16281 * Context: May be called under interrupt context 16282 */ 16283 16284 static void 16285 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16286 { 16287 struct scsi_pkt *pktp; 16288 char *msgp; 16289 char *cmdp = arg; 16290 16291 ASSERT(un != NULL); 16292 ASSERT(mutex_owned(SD_MUTEX(un))); 16293 ASSERT(bp != NULL); 16294 ASSERT(arg != NULL); 16295 pktp = SD_GET_PKTP(bp); 16296 ASSERT(pktp != NULL); 16297 16298 switch (code) { 16299 case SD_DELAYED_RETRY_ISSUED: 16300 case SD_IMMEDIATE_RETRY_ISSUED: 16301 msgp = "retrying"; 16302 break; 16303 case SD_NO_RETRY_ISSUED: 16304 default: 16305 msgp = "giving up"; 16306 break; 16307 } 16308 16309 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16310 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16311 "incomplete %s- %s\n", cmdp, msgp); 16312 } 16313 } 16314 16315 16316 16317 /* 16318 * Function: sd_pkt_status_good 16319 * 16320 * Description: Processing for a STATUS_GOOD code in pkt_status. 16321 * 16322 * Context: May be called under interrupt context 16323 */ 16324 16325 static void 16326 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16327 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16328 { 16329 char *cmdp; 16330 16331 ASSERT(un != NULL); 16332 ASSERT(mutex_owned(SD_MUTEX(un))); 16333 ASSERT(bp != NULL); 16334 ASSERT(xp != NULL); 16335 ASSERT(pktp != NULL); 16336 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16337 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16338 ASSERT(pktp->pkt_resid != 0); 16339 16340 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16341 16342 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16343 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16344 case SCMD_READ: 16345 cmdp = "read"; 16346 break; 16347 case SCMD_WRITE: 16348 cmdp = "write"; 16349 break; 16350 default: 16351 SD_UPDATE_B_RESID(bp, pktp); 16352 sd_return_command(un, bp); 16353 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16354 return; 16355 } 16356 16357 /* 16358 * See if we can retry the read/write, preferrably immediately. 16359 * If retries are exhaused, then sd_retry_command() will update 16360 * the b_resid count. 16361 */ 16362 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16363 cmdp, EIO, (clock_t)0, NULL); 16364 16365 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16366 } 16367 16368 16369 16370 16371 16372 /* 16373 * Function: sd_handle_request_sense 16374 * 16375 * Description: Processing for non-auto Request Sense command. 16376 * 16377 * Arguments: un - ptr to associated softstate 16378 * sense_bp - ptr to buf(9S) for the RQS command 16379 * sense_xp - ptr to the sd_xbuf for the RQS command 16380 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16381 * 16382 * Context: May be called under interrupt context 16383 */ 16384 16385 static void 16386 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16387 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16388 { 16389 struct buf *cmd_bp; /* buf for the original command */ 16390 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16391 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16392 16393 ASSERT(un != NULL); 16394 ASSERT(mutex_owned(SD_MUTEX(un))); 16395 ASSERT(sense_bp != NULL); 16396 ASSERT(sense_xp != NULL); 16397 ASSERT(sense_pktp != NULL); 16398 16399 /* 16400 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16401 * RQS command and not the original command. 16402 */ 16403 ASSERT(sense_pktp == un->un_rqs_pktp); 16404 ASSERT(sense_bp == un->un_rqs_bp); 16405 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16406 (FLAG_SENSING | FLAG_HEAD)); 16407 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16408 FLAG_SENSING) == FLAG_SENSING); 16409 16410 /* These are the bp, xp, and pktp for the original command */ 16411 cmd_bp = sense_xp->xb_sense_bp; 16412 cmd_xp = SD_GET_XBUF(cmd_bp); 16413 cmd_pktp = SD_GET_PKTP(cmd_bp); 16414 16415 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16416 /* 16417 * The REQUEST SENSE command failed. Release the REQUEST 16418 * SENSE command for re-use, get back the bp for the original 16419 * command, and attempt to re-try the original command if 16420 * FLAG_DIAGNOSE is not set in the original packet. 16421 */ 16422 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16423 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16424 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16425 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16426 NULL, NULL, EIO, (clock_t)0, NULL); 16427 return; 16428 } 16429 } 16430 16431 /* 16432 * Save the relevant sense info into the xp for the original cmd. 16433 * 16434 * Note: if the request sense failed the state info will be zero 16435 * as set in sd_mark_rqs_busy() 16436 */ 16437 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16438 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16439 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16440 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 16441 16442 /* 16443 * Free up the RQS command.... 16444 * NOTE: 16445 * Must do this BEFORE calling sd_validate_sense_data! 16446 * sd_validate_sense_data may return the original command in 16447 * which case the pkt will be freed and the flags can no 16448 * longer be touched. 16449 * SD_MUTEX is held through this process until the command 16450 * is dispatched based upon the sense data, so there are 16451 * no race conditions. 16452 */ 16453 (void) sd_mark_rqs_idle(un, sense_xp); 16454 16455 /* 16456 * For a retryable command see if we have valid sense data, if so then 16457 * turn it over to sd_decode_sense() to figure out the right course of 16458 * action. Just fail a non-retryable command. 16459 */ 16460 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16461 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 16462 SD_SENSE_DATA_IS_VALID) { 16463 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16464 } 16465 } else { 16466 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16467 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16468 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16469 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16470 sd_return_failed_command(un, cmd_bp, EIO); 16471 } 16472 } 16473 16474 16475 16476 16477 /* 16478 * Function: sd_handle_auto_request_sense 16479 * 16480 * Description: Processing for auto-request sense information. 16481 * 16482 * Arguments: un - ptr to associated softstate 16483 * bp - ptr to buf(9S) for the command 16484 * xp - ptr to the sd_xbuf for the command 16485 * pktp - ptr to the scsi_pkt(9S) for the command 16486 * 16487 * Context: May be called under interrupt context 16488 */ 16489 16490 static void 16491 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16492 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16493 { 16494 struct scsi_arq_status *asp; 16495 16496 ASSERT(un != NULL); 16497 ASSERT(mutex_owned(SD_MUTEX(un))); 16498 ASSERT(bp != NULL); 16499 ASSERT(xp != NULL); 16500 ASSERT(pktp != NULL); 16501 ASSERT(pktp != un->un_rqs_pktp); 16502 ASSERT(bp != un->un_rqs_bp); 16503 16504 /* 16505 * For auto-request sense, we get a scsi_arq_status back from 16506 * the HBA, with the sense data in the sts_sensedata member. 16507 * The pkt_scbp of the packet points to this scsi_arq_status. 16508 */ 16509 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16510 16511 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16512 /* 16513 * The auto REQUEST SENSE failed; see if we can re-try 16514 * the original command. 16515 */ 16516 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16517 "auto request sense failed (reason=%s)\n", 16518 scsi_rname(asp->sts_rqpkt_reason)); 16519 16520 sd_reset_target(un, pktp); 16521 16522 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16523 NULL, NULL, EIO, (clock_t)0, NULL); 16524 return; 16525 } 16526 16527 /* Save the relevant sense info into the xp for the original cmd. */ 16528 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16529 xp->xb_sense_state = asp->sts_rqpkt_state; 16530 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16531 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16532 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 16533 16534 /* 16535 * See if we have valid sense data, if so then turn it over to 16536 * sd_decode_sense() to figure out the right course of action. 16537 */ 16538 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 16539 sd_decode_sense(un, bp, xp, pktp); 16540 } 16541 } 16542 16543 16544 /* 16545 * Function: sd_print_sense_failed_msg 16546 * 16547 * Description: Print log message when RQS has failed. 16548 * 16549 * Arguments: un - ptr to associated softstate 16550 * bp - ptr to buf(9S) for the command 16551 * arg - generic message string ptr 16552 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16553 * or SD_NO_RETRY_ISSUED 16554 * 16555 * Context: May be called from interrupt context 16556 */ 16557 16558 static void 16559 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16560 int code) 16561 { 16562 char *msgp = arg; 16563 16564 ASSERT(un != NULL); 16565 ASSERT(mutex_owned(SD_MUTEX(un))); 16566 ASSERT(bp != NULL); 16567 16568 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16569 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16570 } 16571 } 16572 16573 16574 /* 16575 * Function: sd_validate_sense_data 16576 * 16577 * Description: Check the given sense data for validity. 16578 * If the sense data is not valid, the command will 16579 * be either failed or retried! 16580 * 16581 * Return Code: SD_SENSE_DATA_IS_INVALID 16582 * SD_SENSE_DATA_IS_VALID 16583 * 16584 * Context: May be called from interrupt context 16585 */ 16586 16587 static int 16588 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 16589 { 16590 struct scsi_extended_sense *esp; 16591 struct scsi_pkt *pktp; 16592 size_t actual_len; 16593 char *msgp = NULL; 16594 16595 ASSERT(un != NULL); 16596 ASSERT(mutex_owned(SD_MUTEX(un))); 16597 ASSERT(bp != NULL); 16598 ASSERT(bp != un->un_rqs_bp); 16599 ASSERT(xp != NULL); 16600 16601 pktp = SD_GET_PKTP(bp); 16602 ASSERT(pktp != NULL); 16603 16604 /* 16605 * Check the status of the RQS command (auto or manual). 16606 */ 16607 switch (xp->xb_sense_status & STATUS_MASK) { 16608 case STATUS_GOOD: 16609 break; 16610 16611 case STATUS_RESERVATION_CONFLICT: 16612 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16613 return (SD_SENSE_DATA_IS_INVALID); 16614 16615 case STATUS_BUSY: 16616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16617 "Busy Status on REQUEST SENSE\n"); 16618 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16619 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16620 return (SD_SENSE_DATA_IS_INVALID); 16621 16622 case STATUS_QFULL: 16623 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16624 "QFULL Status on REQUEST SENSE\n"); 16625 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16626 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16627 return (SD_SENSE_DATA_IS_INVALID); 16628 16629 case STATUS_CHECK: 16630 case STATUS_TERMINATED: 16631 msgp = "Check Condition on REQUEST SENSE\n"; 16632 goto sense_failed; 16633 16634 default: 16635 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16636 goto sense_failed; 16637 } 16638 16639 /* 16640 * See if we got the minimum required amount of sense data. 16641 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16642 * or less. 16643 */ 16644 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 16645 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16646 (actual_len == 0)) { 16647 msgp = "Request Sense couldn't get sense data\n"; 16648 goto sense_failed; 16649 } 16650 16651 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16652 msgp = "Not enough sense information\n"; 16653 goto sense_failed; 16654 } 16655 16656 /* 16657 * We require the extended sense data 16658 */ 16659 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16660 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16661 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16662 static char tmp[8]; 16663 static char buf[148]; 16664 char *p = (char *)(xp->xb_sense_data); 16665 int i; 16666 16667 mutex_enter(&sd_sense_mutex); 16668 (void) strcpy(buf, "undecodable sense information:"); 16669 for (i = 0; i < actual_len; i++) { 16670 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16671 (void) strcpy(&buf[strlen(buf)], tmp); 16672 } 16673 i = strlen(buf); 16674 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16675 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16676 mutex_exit(&sd_sense_mutex); 16677 } 16678 /* Note: Legacy behavior, fail the command with no retry */ 16679 sd_return_failed_command(un, bp, EIO); 16680 return (SD_SENSE_DATA_IS_INVALID); 16681 } 16682 16683 /* 16684 * Check that es_code is valid (es_class concatenated with es_code 16685 * make up the "response code" field. es_class will always be 7, so 16686 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16687 * format. 16688 */ 16689 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16690 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16691 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16692 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16693 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16694 goto sense_failed; 16695 } 16696 16697 return (SD_SENSE_DATA_IS_VALID); 16698 16699 sense_failed: 16700 /* 16701 * If the request sense failed (for whatever reason), attempt 16702 * to retry the original command. 16703 */ 16704 #if defined(__i386) || defined(__amd64) 16705 /* 16706 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16707 * sddef.h for Sparc platform, and x86 uses 1 binary 16708 * for both SCSI/FC. 16709 * The SD_RETRY_DELAY value need to be adjusted here 16710 * when SD_RETRY_DELAY change in sddef.h 16711 */ 16712 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16713 sd_print_sense_failed_msg, msgp, EIO, 16714 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16715 #else 16716 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16717 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16718 #endif 16719 16720 return (SD_SENSE_DATA_IS_INVALID); 16721 } 16722 16723 16724 16725 /* 16726 * Function: sd_decode_sense 16727 * 16728 * Description: Take recovery action(s) when SCSI Sense Data is received. 16729 * 16730 * Context: Interrupt context. 16731 */ 16732 16733 static void 16734 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16735 struct scsi_pkt *pktp) 16736 { 16737 struct scsi_extended_sense *esp; 16738 struct scsi_descr_sense_hdr *sdsp; 16739 uint8_t asc, ascq, sense_key; 16740 16741 ASSERT(un != NULL); 16742 ASSERT(mutex_owned(SD_MUTEX(un))); 16743 ASSERT(bp != NULL); 16744 ASSERT(bp != un->un_rqs_bp); 16745 ASSERT(xp != NULL); 16746 ASSERT(pktp != NULL); 16747 16748 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16749 16750 switch (esp->es_code) { 16751 case CODE_FMT_DESCR_CURRENT: 16752 case CODE_FMT_DESCR_DEFERRED: 16753 sdsp = (struct scsi_descr_sense_hdr *)xp->xb_sense_data; 16754 sense_key = sdsp->ds_key; 16755 asc = sdsp->ds_add_code; 16756 ascq = sdsp->ds_qual_code; 16757 break; 16758 case CODE_FMT_VENDOR_SPECIFIC: 16759 case CODE_FMT_FIXED_CURRENT: 16760 case CODE_FMT_FIXED_DEFERRED: 16761 default: 16762 sense_key = esp->es_key; 16763 asc = esp->es_add_code; 16764 ascq = esp->es_qual_code; 16765 break; 16766 } 16767 16768 switch (sense_key) { 16769 case KEY_NO_SENSE: 16770 sd_sense_key_no_sense(un, bp, xp, pktp); 16771 break; 16772 case KEY_RECOVERABLE_ERROR: 16773 sd_sense_key_recoverable_error(un, asc, bp, xp, pktp); 16774 break; 16775 case KEY_NOT_READY: 16776 sd_sense_key_not_ready(un, asc, ascq, bp, xp, pktp); 16777 break; 16778 case KEY_MEDIUM_ERROR: 16779 case KEY_HARDWARE_ERROR: 16780 sd_sense_key_medium_or_hardware_error(un, 16781 sense_key, asc, bp, xp, pktp); 16782 break; 16783 case KEY_ILLEGAL_REQUEST: 16784 sd_sense_key_illegal_request(un, bp, xp, pktp); 16785 break; 16786 case KEY_UNIT_ATTENTION: 16787 sd_sense_key_unit_attention(un, asc, bp, xp, pktp); 16788 break; 16789 case KEY_WRITE_PROTECT: 16790 case KEY_VOLUME_OVERFLOW: 16791 case KEY_MISCOMPARE: 16792 sd_sense_key_fail_command(un, bp, xp, pktp); 16793 break; 16794 case KEY_BLANK_CHECK: 16795 sd_sense_key_blank_check(un, bp, xp, pktp); 16796 break; 16797 case KEY_ABORTED_COMMAND: 16798 sd_sense_key_aborted_command(un, bp, xp, pktp); 16799 break; 16800 case KEY_VENDOR_UNIQUE: 16801 case KEY_COPY_ABORTED: 16802 case KEY_EQUAL: 16803 case KEY_RESERVED: 16804 default: 16805 sd_sense_key_default(un, sense_key, bp, xp, pktp); 16806 break; 16807 } 16808 } 16809 16810 16811 /* 16812 * Function: sd_dump_memory 16813 * 16814 * Description: Debug logging routine to print the contents of a user provided 16815 * buffer. The output of the buffer is broken up into 256 byte 16816 * segments due to a size constraint of the scsi_log. 16817 * implementation. 16818 * 16819 * Arguments: un - ptr to softstate 16820 * comp - component mask 16821 * title - "title" string to preceed data when printed 16822 * data - ptr to data block to be printed 16823 * len - size of data block to be printed 16824 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16825 * 16826 * Context: May be called from interrupt context 16827 */ 16828 16829 #define SD_DUMP_MEMORY_BUF_SIZE 256 16830 16831 static char *sd_dump_format_string[] = { 16832 " 0x%02x", 16833 " %c" 16834 }; 16835 16836 static void 16837 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16838 int len, int fmt) 16839 { 16840 int i, j; 16841 int avail_count; 16842 int start_offset; 16843 int end_offset; 16844 size_t entry_len; 16845 char *bufp; 16846 char *local_buf; 16847 char *format_string; 16848 16849 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16850 16851 /* 16852 * In the debug version of the driver, this function is called from a 16853 * number of places which are NOPs in the release driver. 16854 * The debug driver therefore has additional methods of filtering 16855 * debug output. 16856 */ 16857 #ifdef SDDEBUG 16858 /* 16859 * In the debug version of the driver we can reduce the amount of debug 16860 * messages by setting sd_error_level to something other than 16861 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16862 * sd_component_mask. 16863 */ 16864 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16865 (sd_error_level != SCSI_ERR_ALL)) { 16866 return; 16867 } 16868 if (((sd_component_mask & comp) == 0) || 16869 (sd_error_level != SCSI_ERR_ALL)) { 16870 return; 16871 } 16872 #else 16873 if (sd_error_level != SCSI_ERR_ALL) { 16874 return; 16875 } 16876 #endif 16877 16878 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16879 bufp = local_buf; 16880 /* 16881 * Available length is the length of local_buf[], minus the 16882 * length of the title string, minus one for the ":", minus 16883 * one for the newline, minus one for the NULL terminator. 16884 * This gives the #bytes available for holding the printed 16885 * values from the given data buffer. 16886 */ 16887 if (fmt == SD_LOG_HEX) { 16888 format_string = sd_dump_format_string[0]; 16889 } else /* SD_LOG_CHAR */ { 16890 format_string = sd_dump_format_string[1]; 16891 } 16892 /* 16893 * Available count is the number of elements from the given 16894 * data buffer that we can fit into the available length. 16895 * This is based upon the size of the format string used. 16896 * Make one entry and find it's size. 16897 */ 16898 (void) sprintf(bufp, format_string, data[0]); 16899 entry_len = strlen(bufp); 16900 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16901 16902 j = 0; 16903 while (j < len) { 16904 bufp = local_buf; 16905 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16906 start_offset = j; 16907 16908 end_offset = start_offset + avail_count; 16909 16910 (void) sprintf(bufp, "%s:", title); 16911 bufp += strlen(bufp); 16912 for (i = start_offset; ((i < end_offset) && (j < len)); 16913 i++, j++) { 16914 (void) sprintf(bufp, format_string, data[i]); 16915 bufp += entry_len; 16916 } 16917 (void) sprintf(bufp, "\n"); 16918 16919 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16920 } 16921 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16922 } 16923 16924 /* 16925 * Function: sd_print_sense_msg 16926 * 16927 * Description: Log a message based upon the given sense data. 16928 * 16929 * Arguments: un - ptr to associated softstate 16930 * bp - ptr to buf(9S) for the command 16931 * arg - ptr to associate sd_sense_info struct 16932 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16933 * or SD_NO_RETRY_ISSUED 16934 * 16935 * Context: May be called from interrupt context 16936 */ 16937 16938 static void 16939 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16940 { 16941 struct sd_xbuf *xp; 16942 struct scsi_pkt *pktp; 16943 struct scsi_extended_sense *sensep; 16944 daddr_t request_blkno; 16945 diskaddr_t err_blkno; 16946 int severity; 16947 int pfa_flag; 16948 int fixed_format = TRUE; 16949 extern struct scsi_key_strings scsi_cmds[]; 16950 16951 ASSERT(un != NULL); 16952 ASSERT(mutex_owned(SD_MUTEX(un))); 16953 ASSERT(bp != NULL); 16954 xp = SD_GET_XBUF(bp); 16955 ASSERT(xp != NULL); 16956 pktp = SD_GET_PKTP(bp); 16957 ASSERT(pktp != NULL); 16958 ASSERT(arg != NULL); 16959 16960 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16961 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16962 16963 if ((code == SD_DELAYED_RETRY_ISSUED) || 16964 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16965 severity = SCSI_ERR_RETRYABLE; 16966 } 16967 16968 /* Use absolute block number for the request block number */ 16969 request_blkno = xp->xb_blkno; 16970 16971 /* 16972 * Now try to get the error block number from the sense data 16973 */ 16974 sensep = (struct scsi_extended_sense *)xp->xb_sense_data; 16975 switch (sensep->es_code) { 16976 case CODE_FMT_DESCR_CURRENT: 16977 case CODE_FMT_DESCR_DEFERRED: 16978 err_blkno = 16979 sd_extract_sense_info_descr( 16980 (struct scsi_descr_sense_hdr *)sensep); 16981 fixed_format = FALSE; 16982 break; 16983 case CODE_FMT_FIXED_CURRENT: 16984 case CODE_FMT_FIXED_DEFERRED: 16985 case CODE_FMT_VENDOR_SPECIFIC: 16986 default: 16987 /* 16988 * With the es_valid bit set, we assume that the error 16989 * blkno is in the sense data. Also, if xp->xb_blkno is 16990 * greater than 0xffffffff then the target *should* have used 16991 * a descriptor sense format (or it shouldn't have set 16992 * the es_valid bit), and we may as well ignore the 16993 * 32-bit value. 16994 */ 16995 if ((sensep->es_valid != 0) && (xp->xb_blkno <= 0xffffffff)) { 16996 err_blkno = (diskaddr_t) 16997 ((sensep->es_info_1 << 24) | 16998 (sensep->es_info_2 << 16) | 16999 (sensep->es_info_3 << 8) | 17000 (sensep->es_info_4)); 17001 } else { 17002 err_blkno = (diskaddr_t)-1; 17003 } 17004 break; 17005 } 17006 17007 if (err_blkno == (diskaddr_t)-1) { 17008 /* 17009 * Without the es_valid bit set (for fixed format) or an 17010 * information descriptor (for descriptor format) we cannot 17011 * be certain of the error blkno, so just use the 17012 * request_blkno. 17013 */ 17014 err_blkno = (diskaddr_t)request_blkno; 17015 } else { 17016 /* 17017 * We retrieved the error block number from the information 17018 * portion of the sense data. 17019 * 17020 * For USCSI commands we are better off using the error 17021 * block no. as the requested block no. (This is the best 17022 * we can estimate.) 17023 */ 17024 if ((SD_IS_BUFIO(xp) == FALSE) && 17025 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17026 request_blkno = err_blkno; 17027 } 17028 } 17029 17030 /* 17031 * The following will log the buffer contents for the release driver 17032 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17033 * level is set to verbose. 17034 */ 17035 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17036 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17037 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17038 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17039 17040 if (pfa_flag == FALSE) { 17041 /* This is normally only set for USCSI */ 17042 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17043 return; 17044 } 17045 17046 if ((SD_IS_BUFIO(xp) == TRUE) && 17047 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17048 (severity < sd_error_level))) { 17049 return; 17050 } 17051 } 17052 17053 /* 17054 * If the data is fixed format then check for Sonoma Failover, 17055 * and keep a count of how many failed I/O's. We should not have 17056 * to worry about Sonoma returning descriptor format sense data, 17057 * and asc/ascq are in a different location in descriptor format. 17058 */ 17059 if (fixed_format && 17060 (SD_IS_LSI(un)) && (sensep->es_key == KEY_ILLEGAL_REQUEST) && 17061 (sensep->es_add_code == 0x94) && (sensep->es_qual_code == 0x01)) { 17062 un->un_sonoma_failure_count++; 17063 if (un->un_sonoma_failure_count > 1) { 17064 return; 17065 } 17066 } 17067 17068 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17069 request_blkno, err_blkno, scsi_cmds, sensep, 17070 un->un_additional_codes, NULL); 17071 } 17072 17073 /* 17074 * Function: sd_extract_sense_info_descr 17075 * 17076 * Description: Retrieve "information" field from descriptor format 17077 * sense data. Iterates through each sense descriptor 17078 * looking for the information descriptor and returns 17079 * the information field from that descriptor. 17080 * 17081 * Context: May be called from interrupt context 17082 */ 17083 17084 static diskaddr_t 17085 sd_extract_sense_info_descr(struct scsi_descr_sense_hdr *sdsp) 17086 { 17087 diskaddr_t result; 17088 uint8_t *descr_offset; 17089 int valid_sense_length; 17090 struct scsi_information_sense_descr *isd; 17091 17092 /* 17093 * Initialize result to -1 indicating there is no information 17094 * descriptor 17095 */ 17096 result = (diskaddr_t)-1; 17097 17098 /* 17099 * The first descriptor will immediately follow the header 17100 */ 17101 descr_offset = (uint8_t *)(sdsp+1); /* Pointer arithmetic */ 17102 17103 /* 17104 * Calculate the amount of valid sense data 17105 */ 17106 valid_sense_length = 17107 min((sizeof (struct scsi_descr_sense_hdr) + 17108 sdsp->ds_addl_sense_length), 17109 SENSE_LENGTH); 17110 17111 /* 17112 * Iterate through the list of descriptors, stopping when we 17113 * run out of sense data 17114 */ 17115 while ((descr_offset + sizeof (struct scsi_information_sense_descr)) <= 17116 (uint8_t *)sdsp + valid_sense_length) { 17117 /* 17118 * Check if this is an information descriptor. We can 17119 * use the scsi_information_sense_descr structure as a 17120 * template sense the first two fields are always the 17121 * same 17122 */ 17123 isd = (struct scsi_information_sense_descr *)descr_offset; 17124 if (isd->isd_descr_type == DESCR_INFORMATION) { 17125 /* 17126 * Found an information descriptor. Copy the 17127 * information field. There will only be one 17128 * information descriptor so we can stop looking. 17129 */ 17130 result = 17131 (((diskaddr_t)isd->isd_information[0] << 56) | 17132 ((diskaddr_t)isd->isd_information[1] << 48) | 17133 ((diskaddr_t)isd->isd_information[2] << 40) | 17134 ((diskaddr_t)isd->isd_information[3] << 32) | 17135 ((diskaddr_t)isd->isd_information[4] << 24) | 17136 ((diskaddr_t)isd->isd_information[5] << 16) | 17137 ((diskaddr_t)isd->isd_information[6] << 8) | 17138 ((diskaddr_t)isd->isd_information[7])); 17139 break; 17140 } 17141 17142 /* 17143 * Get pointer to the next descriptor. The "additional 17144 * length" field holds the length of the descriptor except 17145 * for the "type" and "additional length" fields, so 17146 * we need to add 2 to get the total length. 17147 */ 17148 descr_offset += (isd->isd_addl_length + 2); 17149 } 17150 17151 return (result); 17152 } 17153 17154 /* 17155 * Function: sd_sense_key_no_sense 17156 * 17157 * Description: Recovery action when sense data was not received. 17158 * 17159 * Context: May be called from interrupt context 17160 */ 17161 17162 static void 17163 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17164 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17165 { 17166 struct sd_sense_info si; 17167 17168 ASSERT(un != NULL); 17169 ASSERT(mutex_owned(SD_MUTEX(un))); 17170 ASSERT(bp != NULL); 17171 ASSERT(xp != NULL); 17172 ASSERT(pktp != NULL); 17173 17174 si.ssi_severity = SCSI_ERR_FATAL; 17175 si.ssi_pfa_flag = FALSE; 17176 17177 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17178 17179 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17180 &si, EIO, (clock_t)0, NULL); 17181 } 17182 17183 17184 /* 17185 * Function: sd_sense_key_recoverable_error 17186 * 17187 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17188 * 17189 * Context: May be called from interrupt context 17190 */ 17191 17192 static void 17193 sd_sense_key_recoverable_error(struct sd_lun *un, 17194 uint8_t asc, 17195 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17196 { 17197 struct sd_sense_info si; 17198 17199 ASSERT(un != NULL); 17200 ASSERT(mutex_owned(SD_MUTEX(un))); 17201 ASSERT(bp != NULL); 17202 ASSERT(xp != NULL); 17203 ASSERT(pktp != NULL); 17204 17205 /* 17206 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17207 */ 17208 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17209 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17210 si.ssi_severity = SCSI_ERR_INFO; 17211 si.ssi_pfa_flag = TRUE; 17212 } else { 17213 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17214 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17215 si.ssi_severity = SCSI_ERR_RECOVERED; 17216 si.ssi_pfa_flag = FALSE; 17217 } 17218 17219 if (pktp->pkt_resid == 0) { 17220 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17221 sd_return_command(un, bp); 17222 return; 17223 } 17224 17225 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17226 &si, EIO, (clock_t)0, NULL); 17227 } 17228 17229 17230 17231 17232 /* 17233 * Function: sd_sense_key_not_ready 17234 * 17235 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17236 * 17237 * Context: May be called from interrupt context 17238 */ 17239 17240 static void 17241 sd_sense_key_not_ready(struct sd_lun *un, 17242 uint8_t asc, uint8_t ascq, 17243 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17244 { 17245 struct sd_sense_info si; 17246 17247 ASSERT(un != NULL); 17248 ASSERT(mutex_owned(SD_MUTEX(un))); 17249 ASSERT(bp != NULL); 17250 ASSERT(xp != NULL); 17251 ASSERT(pktp != NULL); 17252 17253 si.ssi_severity = SCSI_ERR_FATAL; 17254 si.ssi_pfa_flag = FALSE; 17255 17256 /* 17257 * Update error stats after first NOT READY error. Disks may have 17258 * been powered down and may need to be restarted. For CDROMs, 17259 * report NOT READY errors only if media is present. 17260 */ 17261 if ((ISCD(un) && (un->un_f_geometry_is_valid == TRUE)) || 17262 (xp->xb_retry_count > 0)) { 17263 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17264 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17265 } 17266 17267 /* 17268 * Just fail if the "not ready" retry limit has been reached. 17269 */ 17270 if (xp->xb_retry_count >= un->un_notready_retry_count) { 17271 /* Special check for error message printing for removables. */ 17272 if ((ISREMOVABLE(un)) && (asc == 0x04) && 17273 (ascq >= 0x04)) { 17274 si.ssi_severity = SCSI_ERR_ALL; 17275 } 17276 goto fail_command; 17277 } 17278 17279 /* 17280 * Check the ASC and ASCQ in the sense data as needed, to determine 17281 * what to do. 17282 */ 17283 switch (asc) { 17284 case 0x04: /* LOGICAL UNIT NOT READY */ 17285 /* 17286 * disk drives that don't spin up result in a very long delay 17287 * in format without warning messages. We will log a message 17288 * if the error level is set to verbose. 17289 */ 17290 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17292 "logical unit not ready, resetting disk\n"); 17293 } 17294 17295 /* 17296 * There are different requirements for CDROMs and disks for 17297 * the number of retries. If a CD-ROM is giving this, it is 17298 * probably reading TOC and is in the process of getting 17299 * ready, so we should keep on trying for a long time to make 17300 * sure that all types of media are taken in account (for 17301 * some media the drive takes a long time to read TOC). For 17302 * disks we do not want to retry this too many times as this 17303 * can cause a long hang in format when the drive refuses to 17304 * spin up (a very common failure). 17305 */ 17306 switch (ascq) { 17307 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17308 /* 17309 * Disk drives frequently refuse to spin up which 17310 * results in a very long hang in format without 17311 * warning messages. 17312 * 17313 * Note: This code preserves the legacy behavior of 17314 * comparing xb_retry_count against zero for fibre 17315 * channel targets instead of comparing against the 17316 * un_reset_retry_count value. The reason for this 17317 * discrepancy has been so utterly lost beneath the 17318 * Sands of Time that even Indiana Jones could not 17319 * find it. 17320 */ 17321 if (un->un_f_is_fibre == TRUE) { 17322 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17323 (xp->xb_retry_count > 0)) && 17324 (un->un_startstop_timeid == NULL)) { 17325 scsi_log(SD_DEVINFO(un), sd_label, 17326 CE_WARN, "logical unit not ready, " 17327 "resetting disk\n"); 17328 sd_reset_target(un, pktp); 17329 } 17330 } else { 17331 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17332 (xp->xb_retry_count > 17333 un->un_reset_retry_count)) && 17334 (un->un_startstop_timeid == NULL)) { 17335 scsi_log(SD_DEVINFO(un), sd_label, 17336 CE_WARN, "logical unit not ready, " 17337 "resetting disk\n"); 17338 sd_reset_target(un, pktp); 17339 } 17340 } 17341 break; 17342 17343 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17344 /* 17345 * If the target is in the process of becoming 17346 * ready, just proceed with the retry. This can 17347 * happen with CD-ROMs that take a long time to 17348 * read TOC after a power cycle or reset. 17349 */ 17350 goto do_retry; 17351 17352 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17353 break; 17354 17355 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17356 /* 17357 * Retries cannot help here so just fail right away. 17358 */ 17359 goto fail_command; 17360 17361 case 0x88: 17362 /* 17363 * Vendor-unique code for T3/T4: it indicates a 17364 * path problem in a mutipathed config, but as far as 17365 * the target driver is concerned it equates to a fatal 17366 * error, so we should just fail the command right away 17367 * (without printing anything to the console). If this 17368 * is not a T3/T4, fall thru to the default recovery 17369 * action. 17370 * T3/T4 is FC only, don't need to check is_fibre 17371 */ 17372 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17373 sd_return_failed_command(un, bp, EIO); 17374 return; 17375 } 17376 /* FALLTHRU */ 17377 17378 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17379 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17380 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17381 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17382 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17383 default: /* Possible future codes in SCSI spec? */ 17384 /* 17385 * For removable-media devices, do not retry if 17386 * ASCQ > 2 as these result mostly from USCSI commands 17387 * on MMC devices issued to check status of an 17388 * operation initiated in immediate mode. Also for 17389 * ASCQ >= 4 do not print console messages as these 17390 * mainly represent a user-initiated operation 17391 * instead of a system failure. 17392 */ 17393 if (ISREMOVABLE(un)) { 17394 si.ssi_severity = SCSI_ERR_ALL; 17395 goto fail_command; 17396 } 17397 break; 17398 } 17399 17400 /* 17401 * As part of our recovery attempt for the NOT READY 17402 * condition, we issue a START STOP UNIT command. However 17403 * we want to wait for a short delay before attempting this 17404 * as there may still be more commands coming back from the 17405 * target with the check condition. To do this we use 17406 * timeout(9F) to call sd_start_stop_unit_callback() after 17407 * the delay interval expires. (sd_start_stop_unit_callback() 17408 * dispatches sd_start_stop_unit_task(), which will issue 17409 * the actual START STOP UNIT command. The delay interval 17410 * is one-half of the delay that we will use to retry the 17411 * command that generated the NOT READY condition. 17412 * 17413 * Note that we could just dispatch sd_start_stop_unit_task() 17414 * from here and allow it to sleep for the delay interval, 17415 * but then we would be tying up the taskq thread 17416 * uncesessarily for the duration of the delay. 17417 * 17418 * Do not issue the START STOP UNIT if the current command 17419 * is already a START STOP UNIT. 17420 */ 17421 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17422 break; 17423 } 17424 17425 /* 17426 * Do not schedule the timeout if one is already pending. 17427 */ 17428 if (un->un_startstop_timeid != NULL) { 17429 SD_INFO(SD_LOG_ERROR, un, 17430 "sd_sense_key_not_ready: restart already issued to" 17431 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17432 ddi_get_instance(SD_DEVINFO(un))); 17433 break; 17434 } 17435 17436 /* 17437 * Schedule the START STOP UNIT command, then queue the command 17438 * for a retry. 17439 * 17440 * Note: A timeout is not scheduled for this retry because we 17441 * want the retry to be serial with the START_STOP_UNIT. The 17442 * retry will be started when the START_STOP_UNIT is completed 17443 * in sd_start_stop_unit_task. 17444 */ 17445 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17446 un, SD_BSY_TIMEOUT / 2); 17447 xp->xb_retry_count++; 17448 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17449 return; 17450 17451 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17452 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17453 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17454 "unit does not respond to selection\n"); 17455 } 17456 break; 17457 17458 case 0x3A: /* MEDIUM NOT PRESENT */ 17459 if (sd_error_level >= SCSI_ERR_FATAL) { 17460 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17461 "Caddy not inserted in drive\n"); 17462 } 17463 17464 sr_ejected(un); 17465 un->un_mediastate = DKIO_EJECTED; 17466 /* The state has changed, inform the media watch routines */ 17467 cv_broadcast(&un->un_state_cv); 17468 /* Just fail if no media is present in the drive. */ 17469 goto fail_command; 17470 17471 default: 17472 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17473 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17474 "Unit not Ready. Additional sense code 0x%x\n", 17475 asc); 17476 } 17477 break; 17478 } 17479 17480 do_retry: 17481 17482 /* 17483 * Retry the command, as some targets may report NOT READY for 17484 * several seconds after being reset. 17485 */ 17486 xp->xb_retry_count++; 17487 si.ssi_severity = SCSI_ERR_RETRYABLE; 17488 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17489 &si, EIO, SD_BSY_TIMEOUT, NULL); 17490 17491 return; 17492 17493 fail_command: 17494 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17495 sd_return_failed_command(un, bp, EIO); 17496 } 17497 17498 17499 17500 /* 17501 * Function: sd_sense_key_medium_or_hardware_error 17502 * 17503 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17504 * sense key. 17505 * 17506 * Context: May be called from interrupt context 17507 */ 17508 17509 static void 17510 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17511 int sense_key, uint8_t asc, 17512 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17513 { 17514 struct sd_sense_info si; 17515 17516 ASSERT(un != NULL); 17517 ASSERT(mutex_owned(SD_MUTEX(un))); 17518 ASSERT(bp != NULL); 17519 ASSERT(xp != NULL); 17520 ASSERT(pktp != NULL); 17521 17522 si.ssi_severity = SCSI_ERR_FATAL; 17523 si.ssi_pfa_flag = FALSE; 17524 17525 if (sense_key == KEY_MEDIUM_ERROR) { 17526 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17527 } 17528 17529 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17530 17531 if ((un->un_reset_retry_count != 0) && 17532 (xp->xb_retry_count == un->un_reset_retry_count)) { 17533 mutex_exit(SD_MUTEX(un)); 17534 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17535 if (un->un_f_allow_bus_device_reset == TRUE) { 17536 17537 boolean_t try_resetting_target = B_TRUE; 17538 17539 /* 17540 * We need to be able to handle specific ASC when we are 17541 * handling a KEY_HARDWARE_ERROR. In particular 17542 * taking the default action of resetting the target may 17543 * not be the appropriate way to attempt recovery. 17544 * Resetting a target because of a single LUN failure 17545 * victimizes all LUNs on that target. 17546 * 17547 * This is true for the LSI arrays, if an LSI 17548 * array controller returns an ASC of 0x84 (LUN Dead) we 17549 * should trust it. 17550 */ 17551 17552 if (sense_key == KEY_HARDWARE_ERROR) { 17553 switch (asc) { 17554 case 0x84: 17555 if (SD_IS_LSI(un)) { 17556 try_resetting_target = B_FALSE; 17557 } 17558 break; 17559 default: 17560 break; 17561 } 17562 } 17563 17564 if (try_resetting_target == B_TRUE) { 17565 int reset_retval = 0; 17566 if (un->un_f_lun_reset_enabled == TRUE) { 17567 SD_TRACE(SD_LOG_IO_CORE, un, 17568 "sd_sense_key_medium_or_hardware_" 17569 "error: issuing RESET_LUN\n"); 17570 reset_retval = 17571 scsi_reset(SD_ADDRESS(un), 17572 RESET_LUN); 17573 } 17574 if (reset_retval == 0) { 17575 SD_TRACE(SD_LOG_IO_CORE, un, 17576 "sd_sense_key_medium_or_hardware_" 17577 "error: issuing RESET_TARGET\n"); 17578 (void) scsi_reset(SD_ADDRESS(un), 17579 RESET_TARGET); 17580 } 17581 } 17582 } 17583 mutex_enter(SD_MUTEX(un)); 17584 } 17585 17586 /* 17587 * This really ought to be a fatal error, but we will retry anyway 17588 * as some drives report this as a spurious error. 17589 */ 17590 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17591 &si, EIO, (clock_t)0, NULL); 17592 } 17593 17594 17595 17596 /* 17597 * Function: sd_sense_key_illegal_request 17598 * 17599 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17600 * 17601 * Context: May be called from interrupt context 17602 */ 17603 17604 static void 17605 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17606 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17607 { 17608 struct sd_sense_info si; 17609 17610 ASSERT(un != NULL); 17611 ASSERT(mutex_owned(SD_MUTEX(un))); 17612 ASSERT(bp != NULL); 17613 ASSERT(xp != NULL); 17614 ASSERT(pktp != NULL); 17615 17616 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17617 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17618 17619 si.ssi_severity = SCSI_ERR_INFO; 17620 si.ssi_pfa_flag = FALSE; 17621 17622 /* Pointless to retry if the target thinks it's an illegal request */ 17623 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17624 sd_return_failed_command(un, bp, EIO); 17625 } 17626 17627 17628 17629 17630 /* 17631 * Function: sd_sense_key_unit_attention 17632 * 17633 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17634 * 17635 * Context: May be called from interrupt context 17636 */ 17637 17638 static void 17639 sd_sense_key_unit_attention(struct sd_lun *un, 17640 uint8_t asc, 17641 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17642 { 17643 /* 17644 * For UNIT ATTENTION we allow retries for one minute. Devices 17645 * like Sonoma can return UNIT ATTENTION close to a minute 17646 * under certain conditions. 17647 */ 17648 int retry_check_flag = SD_RETRIES_UA; 17649 struct sd_sense_info si; 17650 17651 ASSERT(un != NULL); 17652 ASSERT(mutex_owned(SD_MUTEX(un))); 17653 ASSERT(bp != NULL); 17654 ASSERT(xp != NULL); 17655 ASSERT(pktp != NULL); 17656 17657 si.ssi_severity = SCSI_ERR_INFO; 17658 si.ssi_pfa_flag = FALSE; 17659 17660 17661 switch (asc) { 17662 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17663 if (sd_report_pfa != 0) { 17664 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17665 si.ssi_pfa_flag = TRUE; 17666 retry_check_flag = SD_RETRIES_STANDARD; 17667 goto do_retry; 17668 } 17669 break; 17670 17671 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17672 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17673 un->un_resvd_status |= 17674 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17675 } 17676 /* FALLTHRU */ 17677 17678 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17679 if (!ISREMOVABLE(un)) { 17680 break; 17681 } 17682 17683 /* 17684 * When we get a unit attention from a removable-media device, 17685 * it may be in a state that will take a long time to recover 17686 * (e.g., from a reset). Since we are executing in interrupt 17687 * context here, we cannot wait around for the device to come 17688 * back. So hand this command off to sd_media_change_task() 17689 * for deferred processing under taskq thread context. (Note 17690 * that the command still may be failed if a problem is 17691 * encountered at a later time.) 17692 */ 17693 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17694 KM_NOSLEEP) == 0) { 17695 /* 17696 * Cannot dispatch the request so fail the command. 17697 */ 17698 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17699 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17700 si.ssi_severity = SCSI_ERR_FATAL; 17701 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17702 sd_return_failed_command(un, bp, EIO); 17703 } 17704 /* 17705 * Either the command has been successfully dispatched to a 17706 * task Q for retrying, or the dispatch failed. In either case 17707 * do NOT retry again by calling sd_retry_command. This sets up 17708 * two retries of the same command and when one completes and 17709 * frees the resources the other will access freed memory, 17710 * a bad thing. 17711 */ 17712 return; 17713 17714 default: 17715 break; 17716 } 17717 17718 if (!ISREMOVABLE(un)) { 17719 /* 17720 * Do not update these here for removables. For removables 17721 * these stats are updated (1) above if we failed to dispatch 17722 * sd_media_change_task(), or (2) sd_media_change_task() may 17723 * update these later if it encounters an error. 17724 */ 17725 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17726 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17727 } 17728 17729 do_retry: 17730 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17731 EIO, SD_UA_RETRY_DELAY, NULL); 17732 } 17733 17734 17735 17736 /* 17737 * Function: sd_sense_key_fail_command 17738 * 17739 * Description: Use to fail a command when we don't like the sense key that 17740 * was returned. 17741 * 17742 * Context: May be called from interrupt context 17743 */ 17744 17745 static void 17746 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17747 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17748 { 17749 struct sd_sense_info si; 17750 17751 ASSERT(un != NULL); 17752 ASSERT(mutex_owned(SD_MUTEX(un))); 17753 ASSERT(bp != NULL); 17754 ASSERT(xp != NULL); 17755 ASSERT(pktp != NULL); 17756 17757 si.ssi_severity = SCSI_ERR_FATAL; 17758 si.ssi_pfa_flag = FALSE; 17759 17760 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17761 sd_return_failed_command(un, bp, EIO); 17762 } 17763 17764 17765 17766 /* 17767 * Function: sd_sense_key_blank_check 17768 * 17769 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17770 * Has no monetary connotation. 17771 * 17772 * Context: May be called from interrupt context 17773 */ 17774 17775 static void 17776 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17777 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17778 { 17779 struct sd_sense_info si; 17780 17781 ASSERT(un != NULL); 17782 ASSERT(mutex_owned(SD_MUTEX(un))); 17783 ASSERT(bp != NULL); 17784 ASSERT(xp != NULL); 17785 ASSERT(pktp != NULL); 17786 17787 /* 17788 * Blank check is not fatal for removable devices, therefore 17789 * it does not require a console message. 17790 */ 17791 si.ssi_severity = (ISREMOVABLE(un)) ? SCSI_ERR_ALL : SCSI_ERR_FATAL; 17792 si.ssi_pfa_flag = FALSE; 17793 17794 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17795 sd_return_failed_command(un, bp, EIO); 17796 } 17797 17798 17799 17800 17801 /* 17802 * Function: sd_sense_key_aborted_command 17803 * 17804 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17805 * 17806 * Context: May be called from interrupt context 17807 */ 17808 17809 static void 17810 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17811 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17812 { 17813 struct sd_sense_info si; 17814 17815 ASSERT(un != NULL); 17816 ASSERT(mutex_owned(SD_MUTEX(un))); 17817 ASSERT(bp != NULL); 17818 ASSERT(xp != NULL); 17819 ASSERT(pktp != NULL); 17820 17821 si.ssi_severity = SCSI_ERR_FATAL; 17822 si.ssi_pfa_flag = FALSE; 17823 17824 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17825 17826 /* 17827 * This really ought to be a fatal error, but we will retry anyway 17828 * as some drives report this as a spurious error. 17829 */ 17830 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17831 &si, EIO, (clock_t)0, NULL); 17832 } 17833 17834 17835 17836 /* 17837 * Function: sd_sense_key_default 17838 * 17839 * Description: Default recovery action for several SCSI sense keys (basically 17840 * attempts a retry). 17841 * 17842 * Context: May be called from interrupt context 17843 */ 17844 17845 static void 17846 sd_sense_key_default(struct sd_lun *un, 17847 int sense_key, 17848 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17849 { 17850 struct sd_sense_info si; 17851 17852 ASSERT(un != NULL); 17853 ASSERT(mutex_owned(SD_MUTEX(un))); 17854 ASSERT(bp != NULL); 17855 ASSERT(xp != NULL); 17856 ASSERT(pktp != NULL); 17857 17858 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17859 17860 /* 17861 * Undecoded sense key. Attempt retries and hope that will fix 17862 * the problem. Otherwise, we're dead. 17863 */ 17864 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17865 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17866 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17867 } 17868 17869 si.ssi_severity = SCSI_ERR_FATAL; 17870 si.ssi_pfa_flag = FALSE; 17871 17872 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17873 &si, EIO, (clock_t)0, NULL); 17874 } 17875 17876 17877 17878 /* 17879 * Function: sd_print_retry_msg 17880 * 17881 * Description: Print a message indicating the retry action being taken. 17882 * 17883 * Arguments: un - ptr to associated softstate 17884 * bp - ptr to buf(9S) for the command 17885 * arg - not used. 17886 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17887 * or SD_NO_RETRY_ISSUED 17888 * 17889 * Context: May be called from interrupt context 17890 */ 17891 /* ARGSUSED */ 17892 static void 17893 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17894 { 17895 struct sd_xbuf *xp; 17896 struct scsi_pkt *pktp; 17897 char *reasonp; 17898 char *msgp; 17899 17900 ASSERT(un != NULL); 17901 ASSERT(mutex_owned(SD_MUTEX(un))); 17902 ASSERT(bp != NULL); 17903 pktp = SD_GET_PKTP(bp); 17904 ASSERT(pktp != NULL); 17905 xp = SD_GET_XBUF(bp); 17906 ASSERT(xp != NULL); 17907 17908 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17909 mutex_enter(&un->un_pm_mutex); 17910 if ((un->un_state == SD_STATE_SUSPENDED) || 17911 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17912 (pktp->pkt_flags & FLAG_SILENT)) { 17913 mutex_exit(&un->un_pm_mutex); 17914 goto update_pkt_reason; 17915 } 17916 mutex_exit(&un->un_pm_mutex); 17917 17918 /* 17919 * Suppress messages if they are all the same pkt_reason; with 17920 * TQ, many (up to 256) are returned with the same pkt_reason. 17921 * If we are in panic, then suppress the retry messages. 17922 */ 17923 switch (flag) { 17924 case SD_NO_RETRY_ISSUED: 17925 msgp = "giving up"; 17926 break; 17927 case SD_IMMEDIATE_RETRY_ISSUED: 17928 case SD_DELAYED_RETRY_ISSUED: 17929 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17930 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17931 (sd_error_level != SCSI_ERR_ALL))) { 17932 return; 17933 } 17934 msgp = "retrying command"; 17935 break; 17936 default: 17937 goto update_pkt_reason; 17938 } 17939 17940 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17941 scsi_rname(pktp->pkt_reason)); 17942 17943 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17944 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17945 17946 update_pkt_reason: 17947 /* 17948 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17949 * This is to prevent multiple console messages for the same failure 17950 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17951 * when the command is retried successfully because there still may be 17952 * more commands coming back with the same value of pktp->pkt_reason. 17953 */ 17954 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17955 un->un_last_pkt_reason = pktp->pkt_reason; 17956 } 17957 } 17958 17959 17960 /* 17961 * Function: sd_print_cmd_incomplete_msg 17962 * 17963 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17964 * 17965 * Arguments: un - ptr to associated softstate 17966 * bp - ptr to buf(9S) for the command 17967 * arg - passed to sd_print_retry_msg() 17968 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17969 * or SD_NO_RETRY_ISSUED 17970 * 17971 * Context: May be called from interrupt context 17972 */ 17973 17974 static void 17975 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17976 int code) 17977 { 17978 dev_info_t *dip; 17979 17980 ASSERT(un != NULL); 17981 ASSERT(mutex_owned(SD_MUTEX(un))); 17982 ASSERT(bp != NULL); 17983 17984 switch (code) { 17985 case SD_NO_RETRY_ISSUED: 17986 /* Command was failed. Someone turned off this target? */ 17987 if (un->un_state != SD_STATE_OFFLINE) { 17988 /* 17989 * Suppress message if we are detaching and 17990 * device has been disconnected 17991 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17992 * private interface and not part of the DDI 17993 */ 17994 dip = un->un_sd->sd_dev; 17995 if (!(DEVI_IS_DETACHING(dip) && 17996 DEVI_IS_DEVICE_REMOVED(dip))) { 17997 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17998 "disk not responding to selection\n"); 17999 } 18000 New_state(un, SD_STATE_OFFLINE); 18001 } 18002 break; 18003 18004 case SD_DELAYED_RETRY_ISSUED: 18005 case SD_IMMEDIATE_RETRY_ISSUED: 18006 default: 18007 /* Command was successfully queued for retry */ 18008 sd_print_retry_msg(un, bp, arg, code); 18009 break; 18010 } 18011 } 18012 18013 18014 /* 18015 * Function: sd_pkt_reason_cmd_incomplete 18016 * 18017 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18018 * 18019 * Context: May be called from interrupt context 18020 */ 18021 18022 static void 18023 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18024 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18025 { 18026 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18027 18028 ASSERT(un != NULL); 18029 ASSERT(mutex_owned(SD_MUTEX(un))); 18030 ASSERT(bp != NULL); 18031 ASSERT(xp != NULL); 18032 ASSERT(pktp != NULL); 18033 18034 /* Do not do a reset if selection did not complete */ 18035 /* Note: Should this not just check the bit? */ 18036 if (pktp->pkt_state != STATE_GOT_BUS) { 18037 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18038 sd_reset_target(un, pktp); 18039 } 18040 18041 /* 18042 * If the target was not successfully selected, then set 18043 * SD_RETRIES_FAILFAST to indicate that we lost communication 18044 * with the target, and further retries and/or commands are 18045 * likely to take a long time. 18046 */ 18047 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18048 flag |= SD_RETRIES_FAILFAST; 18049 } 18050 18051 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18052 18053 sd_retry_command(un, bp, flag, 18054 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18055 } 18056 18057 18058 18059 /* 18060 * Function: sd_pkt_reason_cmd_tran_err 18061 * 18062 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18063 * 18064 * Context: May be called from interrupt context 18065 */ 18066 18067 static void 18068 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18069 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18070 { 18071 ASSERT(un != NULL); 18072 ASSERT(mutex_owned(SD_MUTEX(un))); 18073 ASSERT(bp != NULL); 18074 ASSERT(xp != NULL); 18075 ASSERT(pktp != NULL); 18076 18077 /* 18078 * Do not reset if we got a parity error, or if 18079 * selection did not complete. 18080 */ 18081 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18082 /* Note: Should this not just check the bit for pkt_state? */ 18083 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18084 (pktp->pkt_state != STATE_GOT_BUS)) { 18085 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18086 sd_reset_target(un, pktp); 18087 } 18088 18089 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18090 18091 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18092 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18093 } 18094 18095 18096 18097 /* 18098 * Function: sd_pkt_reason_cmd_reset 18099 * 18100 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18101 * 18102 * Context: May be called from interrupt context 18103 */ 18104 18105 static void 18106 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18107 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18108 { 18109 ASSERT(un != NULL); 18110 ASSERT(mutex_owned(SD_MUTEX(un))); 18111 ASSERT(bp != NULL); 18112 ASSERT(xp != NULL); 18113 ASSERT(pktp != NULL); 18114 18115 /* The target may still be running the command, so try to reset. */ 18116 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18117 sd_reset_target(un, pktp); 18118 18119 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18120 18121 /* 18122 * If pkt_reason is CMD_RESET chances are that this pkt got 18123 * reset because another target on this bus caused it. The target 18124 * that caused it should get CMD_TIMEOUT with pkt_statistics 18125 * of STAT_TIMEOUT/STAT_DEV_RESET. 18126 */ 18127 18128 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18129 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18130 } 18131 18132 18133 18134 18135 /* 18136 * Function: sd_pkt_reason_cmd_aborted 18137 * 18138 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18139 * 18140 * Context: May be called from interrupt context 18141 */ 18142 18143 static void 18144 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18145 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18146 { 18147 ASSERT(un != NULL); 18148 ASSERT(mutex_owned(SD_MUTEX(un))); 18149 ASSERT(bp != NULL); 18150 ASSERT(xp != NULL); 18151 ASSERT(pktp != NULL); 18152 18153 /* The target may still be running the command, so try to reset. */ 18154 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18155 sd_reset_target(un, pktp); 18156 18157 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18158 18159 /* 18160 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18161 * aborted because another target on this bus caused it. The target 18162 * that caused it should get CMD_TIMEOUT with pkt_statistics 18163 * of STAT_TIMEOUT/STAT_DEV_RESET. 18164 */ 18165 18166 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18167 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18168 } 18169 18170 18171 18172 /* 18173 * Function: sd_pkt_reason_cmd_timeout 18174 * 18175 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18176 * 18177 * Context: May be called from interrupt context 18178 */ 18179 18180 static void 18181 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18182 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18183 { 18184 ASSERT(un != NULL); 18185 ASSERT(mutex_owned(SD_MUTEX(un))); 18186 ASSERT(bp != NULL); 18187 ASSERT(xp != NULL); 18188 ASSERT(pktp != NULL); 18189 18190 18191 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18192 sd_reset_target(un, pktp); 18193 18194 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18195 18196 /* 18197 * A command timeout indicates that we could not establish 18198 * communication with the target, so set SD_RETRIES_FAILFAST 18199 * as further retries/commands are likely to take a long time. 18200 */ 18201 sd_retry_command(un, bp, 18202 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18203 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18204 } 18205 18206 18207 18208 /* 18209 * Function: sd_pkt_reason_cmd_unx_bus_free 18210 * 18211 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18212 * 18213 * Context: May be called from interrupt context 18214 */ 18215 18216 static void 18217 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18218 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18219 { 18220 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18221 18222 ASSERT(un != NULL); 18223 ASSERT(mutex_owned(SD_MUTEX(un))); 18224 ASSERT(bp != NULL); 18225 ASSERT(xp != NULL); 18226 ASSERT(pktp != NULL); 18227 18228 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18229 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18230 18231 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18232 sd_print_retry_msg : NULL; 18233 18234 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18235 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18236 } 18237 18238 18239 /* 18240 * Function: sd_pkt_reason_cmd_tag_reject 18241 * 18242 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18243 * 18244 * Context: May be called from interrupt context 18245 */ 18246 18247 static void 18248 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18249 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18250 { 18251 ASSERT(un != NULL); 18252 ASSERT(mutex_owned(SD_MUTEX(un))); 18253 ASSERT(bp != NULL); 18254 ASSERT(xp != NULL); 18255 ASSERT(pktp != NULL); 18256 18257 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18258 pktp->pkt_flags = 0; 18259 un->un_tagflags = 0; 18260 if (un->un_f_opt_queueing == TRUE) { 18261 un->un_throttle = min(un->un_throttle, 3); 18262 } else { 18263 un->un_throttle = 1; 18264 } 18265 mutex_exit(SD_MUTEX(un)); 18266 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18267 mutex_enter(SD_MUTEX(un)); 18268 18269 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18270 18271 /* Legacy behavior not to check retry counts here. */ 18272 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18273 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18274 } 18275 18276 18277 /* 18278 * Function: sd_pkt_reason_default 18279 * 18280 * Description: Default recovery actions for SCSA pkt_reason values that 18281 * do not have more explicit recovery actions. 18282 * 18283 * Context: May be called from interrupt context 18284 */ 18285 18286 static void 18287 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18288 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18289 { 18290 ASSERT(un != NULL); 18291 ASSERT(mutex_owned(SD_MUTEX(un))); 18292 ASSERT(bp != NULL); 18293 ASSERT(xp != NULL); 18294 ASSERT(pktp != NULL); 18295 18296 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18297 sd_reset_target(un, pktp); 18298 18299 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18300 18301 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18302 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18303 } 18304 18305 18306 18307 /* 18308 * Function: sd_pkt_status_check_condition 18309 * 18310 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18311 * 18312 * Context: May be called from interrupt context 18313 */ 18314 18315 static void 18316 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18317 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18318 { 18319 ASSERT(un != NULL); 18320 ASSERT(mutex_owned(SD_MUTEX(un))); 18321 ASSERT(bp != NULL); 18322 ASSERT(xp != NULL); 18323 ASSERT(pktp != NULL); 18324 18325 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18326 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18327 18328 /* 18329 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18330 * command will be retried after the request sense). Otherwise, retry 18331 * the command. Note: we are issuing the request sense even though the 18332 * retry limit may have been reached for the failed command. 18333 */ 18334 if (un->un_f_arq_enabled == FALSE) { 18335 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18336 "no ARQ, sending request sense command\n"); 18337 sd_send_request_sense_command(un, bp, pktp); 18338 } else { 18339 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18340 "ARQ,retrying request sense command\n"); 18341 #if defined(__i386) || defined(__amd64) 18342 /* 18343 * The SD_RETRY_DELAY value need to be adjusted here 18344 * when SD_RETRY_DELAY change in sddef.h 18345 */ 18346 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 0, 18347 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18348 NULL); 18349 #else 18350 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18351 0, SD_RETRY_DELAY, NULL); 18352 #endif 18353 } 18354 18355 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18356 } 18357 18358 18359 /* 18360 * Function: sd_pkt_status_busy 18361 * 18362 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18363 * 18364 * Context: May be called from interrupt context 18365 */ 18366 18367 static void 18368 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18369 struct scsi_pkt *pktp) 18370 { 18371 ASSERT(un != NULL); 18372 ASSERT(mutex_owned(SD_MUTEX(un))); 18373 ASSERT(bp != NULL); 18374 ASSERT(xp != NULL); 18375 ASSERT(pktp != NULL); 18376 18377 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18378 "sd_pkt_status_busy: entry\n"); 18379 18380 /* If retries are exhausted, just fail the command. */ 18381 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18382 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18383 "device busy too long\n"); 18384 sd_return_failed_command(un, bp, EIO); 18385 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18386 "sd_pkt_status_busy: exit\n"); 18387 return; 18388 } 18389 xp->xb_retry_count++; 18390 18391 /* 18392 * Try to reset the target. However, we do not want to perform 18393 * more than one reset if the device continues to fail. The reset 18394 * will be performed when the retry count reaches the reset 18395 * threshold. This threshold should be set such that at least 18396 * one retry is issued before the reset is performed. 18397 */ 18398 if (xp->xb_retry_count == 18399 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18400 int rval = 0; 18401 mutex_exit(SD_MUTEX(un)); 18402 if (un->un_f_allow_bus_device_reset == TRUE) { 18403 /* 18404 * First try to reset the LUN; if we cannot then 18405 * try to reset the target. 18406 */ 18407 if (un->un_f_lun_reset_enabled == TRUE) { 18408 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18409 "sd_pkt_status_busy: RESET_LUN\n"); 18410 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18411 } 18412 if (rval == 0) { 18413 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18414 "sd_pkt_status_busy: RESET_TARGET\n"); 18415 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18416 } 18417 } 18418 if (rval == 0) { 18419 /* 18420 * If the RESET_LUN and/or RESET_TARGET failed, 18421 * try RESET_ALL 18422 */ 18423 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18424 "sd_pkt_status_busy: RESET_ALL\n"); 18425 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18426 } 18427 mutex_enter(SD_MUTEX(un)); 18428 if (rval == 0) { 18429 /* 18430 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18431 * At this point we give up & fail the command. 18432 */ 18433 sd_return_failed_command(un, bp, EIO); 18434 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18435 "sd_pkt_status_busy: exit (failed cmd)\n"); 18436 return; 18437 } 18438 } 18439 18440 /* 18441 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18442 * we have already checked the retry counts above. 18443 */ 18444 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18445 EIO, SD_BSY_TIMEOUT, NULL); 18446 18447 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18448 "sd_pkt_status_busy: exit\n"); 18449 } 18450 18451 18452 /* 18453 * Function: sd_pkt_status_reservation_conflict 18454 * 18455 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18456 * command status. 18457 * 18458 * Context: May be called from interrupt context 18459 */ 18460 18461 static void 18462 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18463 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18464 { 18465 ASSERT(un != NULL); 18466 ASSERT(mutex_owned(SD_MUTEX(un))); 18467 ASSERT(bp != NULL); 18468 ASSERT(xp != NULL); 18469 ASSERT(pktp != NULL); 18470 18471 /* 18472 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18473 * conflict could be due to various reasons like incorrect keys, not 18474 * registered or not reserved etc. So, we return EACCES to the caller. 18475 */ 18476 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18477 int cmd = SD_GET_PKT_OPCODE(pktp); 18478 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18479 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18480 sd_return_failed_command(un, bp, EACCES); 18481 return; 18482 } 18483 } 18484 18485 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18486 18487 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18488 if (sd_failfast_enable != 0) { 18489 /* By definition, we must panic here.... */ 18490 panic("Reservation Conflict"); 18491 /*NOTREACHED*/ 18492 } 18493 SD_ERROR(SD_LOG_IO, un, 18494 "sd_handle_resv_conflict: Disk Reserved\n"); 18495 sd_return_failed_command(un, bp, EACCES); 18496 return; 18497 } 18498 18499 /* 18500 * 1147670: retry only if sd_retry_on_reservation_conflict 18501 * property is set (default is 1). Retries will not succeed 18502 * on a disk reserved by another initiator. HA systems 18503 * may reset this via sd.conf to avoid these retries. 18504 * 18505 * Note: The legacy return code for this failure is EIO, however EACCES 18506 * seems more appropriate for a reservation conflict. 18507 */ 18508 if (sd_retry_on_reservation_conflict == 0) { 18509 SD_ERROR(SD_LOG_IO, un, 18510 "sd_handle_resv_conflict: Device Reserved\n"); 18511 sd_return_failed_command(un, bp, EIO); 18512 return; 18513 } 18514 18515 /* 18516 * Retry the command if we can. 18517 * 18518 * Note: The legacy return code for this failure is EIO, however EACCES 18519 * seems more appropriate for a reservation conflict. 18520 */ 18521 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18522 (clock_t)2, NULL); 18523 } 18524 18525 18526 18527 /* 18528 * Function: sd_pkt_status_qfull 18529 * 18530 * Description: Handle a QUEUE FULL condition from the target. This can 18531 * occur if the HBA does not handle the queue full condition. 18532 * (Basically this means third-party HBAs as Sun HBAs will 18533 * handle the queue full condition.) Note that if there are 18534 * some commands already in the transport, then the queue full 18535 * has occurred because the queue for this nexus is actually 18536 * full. If there are no commands in the transport, then the 18537 * queue full is resulting from some other initiator or lun 18538 * consuming all the resources at the target. 18539 * 18540 * Context: May be called from interrupt context 18541 */ 18542 18543 static void 18544 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18545 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18546 { 18547 ASSERT(un != NULL); 18548 ASSERT(mutex_owned(SD_MUTEX(un))); 18549 ASSERT(bp != NULL); 18550 ASSERT(xp != NULL); 18551 ASSERT(pktp != NULL); 18552 18553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18554 "sd_pkt_status_qfull: entry\n"); 18555 18556 /* 18557 * Just lower the QFULL throttle and retry the command. Note that 18558 * we do not limit the number of retries here. 18559 */ 18560 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18561 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18562 SD_RESTART_TIMEOUT, NULL); 18563 18564 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18565 "sd_pkt_status_qfull: exit\n"); 18566 } 18567 18568 18569 /* 18570 * Function: sd_reset_target 18571 * 18572 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18573 * RESET_TARGET, or RESET_ALL. 18574 * 18575 * Context: May be called under interrupt context. 18576 */ 18577 18578 static void 18579 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18580 { 18581 int rval = 0; 18582 18583 ASSERT(un != NULL); 18584 ASSERT(mutex_owned(SD_MUTEX(un))); 18585 ASSERT(pktp != NULL); 18586 18587 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18588 18589 /* 18590 * No need to reset if the transport layer has already done so. 18591 */ 18592 if ((pktp->pkt_statistics & 18593 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18594 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18595 "sd_reset_target: no reset\n"); 18596 return; 18597 } 18598 18599 mutex_exit(SD_MUTEX(un)); 18600 18601 if (un->un_f_allow_bus_device_reset == TRUE) { 18602 if (un->un_f_lun_reset_enabled == TRUE) { 18603 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18604 "sd_reset_target: RESET_LUN\n"); 18605 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18606 } 18607 if (rval == 0) { 18608 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18609 "sd_reset_target: RESET_TARGET\n"); 18610 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18611 } 18612 } 18613 18614 if (rval == 0) { 18615 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18616 "sd_reset_target: RESET_ALL\n"); 18617 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18618 } 18619 18620 mutex_enter(SD_MUTEX(un)); 18621 18622 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18623 } 18624 18625 18626 /* 18627 * Function: sd_media_change_task 18628 * 18629 * Description: Recovery action for CDROM to become available. 18630 * 18631 * Context: Executes in a taskq() thread context 18632 */ 18633 18634 static void 18635 sd_media_change_task(void *arg) 18636 { 18637 struct scsi_pkt *pktp = arg; 18638 struct sd_lun *un; 18639 struct buf *bp; 18640 struct sd_xbuf *xp; 18641 int err = 0; 18642 int retry_count = 0; 18643 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18644 struct sd_sense_info si; 18645 18646 ASSERT(pktp != NULL); 18647 bp = (struct buf *)pktp->pkt_private; 18648 ASSERT(bp != NULL); 18649 xp = SD_GET_XBUF(bp); 18650 ASSERT(xp != NULL); 18651 un = SD_GET_UN(bp); 18652 ASSERT(un != NULL); 18653 ASSERT(!mutex_owned(SD_MUTEX(un))); 18654 ASSERT(ISREMOVABLE(un)); 18655 18656 si.ssi_severity = SCSI_ERR_INFO; 18657 si.ssi_pfa_flag = FALSE; 18658 18659 /* 18660 * When a reset is issued on a CDROM, it takes a long time to 18661 * recover. First few attempts to read capacity and other things 18662 * related to handling unit attention fail (with a ASC 0x4 and 18663 * ASCQ 0x1). In that case we want to do enough retries and we want 18664 * to limit the retries in other cases of genuine failures like 18665 * no media in drive. 18666 */ 18667 while (retry_count++ < retry_limit) { 18668 if ((err = sd_handle_mchange(un)) == 0) { 18669 break; 18670 } 18671 if (err == EAGAIN) { 18672 retry_limit = SD_UNIT_ATTENTION_RETRY; 18673 } 18674 /* Sleep for 0.5 sec. & try again */ 18675 delay(drv_usectohz(500000)); 18676 } 18677 18678 /* 18679 * Dispatch (retry or fail) the original command here, 18680 * along with appropriate console messages.... 18681 * 18682 * Must grab the mutex before calling sd_retry_command, 18683 * sd_print_sense_msg and sd_return_failed_command. 18684 */ 18685 mutex_enter(SD_MUTEX(un)); 18686 if (err != SD_CMD_SUCCESS) { 18687 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18688 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18689 si.ssi_severity = SCSI_ERR_FATAL; 18690 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18691 sd_return_failed_command(un, bp, EIO); 18692 } else { 18693 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18694 &si, EIO, (clock_t)0, NULL); 18695 } 18696 mutex_exit(SD_MUTEX(un)); 18697 } 18698 18699 18700 18701 /* 18702 * Function: sd_handle_mchange 18703 * 18704 * Description: Perform geometry validation & other recovery when CDROM 18705 * has been removed from drive. 18706 * 18707 * Return Code: 0 for success 18708 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18709 * sd_send_scsi_READ_CAPACITY() 18710 * 18711 * Context: Executes in a taskq() thread context 18712 */ 18713 18714 static int 18715 sd_handle_mchange(struct sd_lun *un) 18716 { 18717 uint64_t capacity; 18718 uint32_t lbasize; 18719 int rval; 18720 18721 ASSERT(!mutex_owned(SD_MUTEX(un))); 18722 ASSERT(ISREMOVABLE(un)); 18723 18724 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 18725 SD_PATH_DIRECT_PRIORITY)) != 0) { 18726 return (rval); 18727 } 18728 18729 mutex_enter(SD_MUTEX(un)); 18730 sd_update_block_info(un, lbasize, capacity); 18731 18732 if (un->un_errstats != NULL) { 18733 struct sd_errstats *stp = 18734 (struct sd_errstats *)un->un_errstats->ks_data; 18735 stp->sd_capacity.value.ui64 = (uint64_t) 18736 ((uint64_t)un->un_blockcount * 18737 (uint64_t)un->un_tgt_blocksize); 18738 } 18739 18740 /* 18741 * Note: Maybe let the strategy/partitioning chain worry about getting 18742 * valid geometry. 18743 */ 18744 un->un_f_geometry_is_valid = FALSE; 18745 (void) sd_validate_geometry(un, SD_PATH_DIRECT_PRIORITY); 18746 if (un->un_f_geometry_is_valid == FALSE) { 18747 mutex_exit(SD_MUTEX(un)); 18748 return (EIO); 18749 } 18750 18751 mutex_exit(SD_MUTEX(un)); 18752 18753 /* 18754 * Try to lock the door 18755 */ 18756 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18757 SD_PATH_DIRECT_PRIORITY)); 18758 } 18759 18760 18761 /* 18762 * Function: sd_send_scsi_DOORLOCK 18763 * 18764 * Description: Issue the scsi DOOR LOCK command 18765 * 18766 * Arguments: un - pointer to driver soft state (unit) structure for 18767 * this target. 18768 * flag - SD_REMOVAL_ALLOW 18769 * SD_REMOVAL_PREVENT 18770 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18771 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18772 * to use the USCSI "direct" chain and bypass the normal 18773 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18774 * command is issued as part of an error recovery action. 18775 * 18776 * Return Code: 0 - Success 18777 * errno return code from sd_send_scsi_cmd() 18778 * 18779 * Context: Can sleep. 18780 */ 18781 18782 static int 18783 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18784 { 18785 union scsi_cdb cdb; 18786 struct uscsi_cmd ucmd_buf; 18787 struct scsi_extended_sense sense_buf; 18788 int status; 18789 18790 ASSERT(un != NULL); 18791 ASSERT(!mutex_owned(SD_MUTEX(un))); 18792 18793 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18794 18795 /* already determined doorlock is not supported, fake success */ 18796 if (un->un_f_doorlock_supported == FALSE) { 18797 return (0); 18798 } 18799 18800 bzero(&cdb, sizeof (cdb)); 18801 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18802 18803 cdb.scc_cmd = SCMD_DOORLOCK; 18804 cdb.cdb_opaque[4] = (uchar_t)flag; 18805 18806 ucmd_buf.uscsi_cdb = (char *)&cdb; 18807 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18808 ucmd_buf.uscsi_bufaddr = NULL; 18809 ucmd_buf.uscsi_buflen = 0; 18810 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18811 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18812 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18813 ucmd_buf.uscsi_timeout = 15; 18814 18815 SD_TRACE(SD_LOG_IO, un, 18816 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18817 18818 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18819 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18820 18821 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18822 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18823 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 18824 /* fake success and skip subsequent doorlock commands */ 18825 un->un_f_doorlock_supported = FALSE; 18826 return (0); 18827 } 18828 18829 return (status); 18830 } 18831 18832 18833 /* 18834 * Function: sd_send_scsi_READ_CAPACITY 18835 * 18836 * Description: This routine uses the scsi READ CAPACITY command to determine 18837 * the device capacity in number of blocks and the device native 18838 * block size. If this function returns a failure, then the 18839 * values in *capp and *lbap are undefined. If the capacity 18840 * returned is 0xffffffff then the lun is too large for a 18841 * normal READ CAPACITY command and the results of a 18842 * READ CAPACITY 16 will be used instead. 18843 * 18844 * Arguments: un - ptr to soft state struct for the target 18845 * capp - ptr to unsigned 64-bit variable to receive the 18846 * capacity value from the command. 18847 * lbap - ptr to unsigned 32-bit varaible to receive the 18848 * block size value from the command 18849 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18850 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18851 * to use the USCSI "direct" chain and bypass the normal 18852 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18853 * command is issued as part of an error recovery action. 18854 * 18855 * Return Code: 0 - Success 18856 * EIO - IO error 18857 * EACCES - Reservation conflict detected 18858 * EAGAIN - Device is becoming ready 18859 * errno return code from sd_send_scsi_cmd() 18860 * 18861 * Context: Can sleep. Blocks until command completes. 18862 */ 18863 18864 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18865 18866 static int 18867 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18868 int path_flag) 18869 { 18870 struct scsi_extended_sense sense_buf; 18871 struct uscsi_cmd ucmd_buf; 18872 union scsi_cdb cdb; 18873 uint32_t *capacity_buf; 18874 uint64_t capacity; 18875 uint32_t lbasize; 18876 int status; 18877 18878 ASSERT(un != NULL); 18879 ASSERT(!mutex_owned(SD_MUTEX(un))); 18880 ASSERT(capp != NULL); 18881 ASSERT(lbap != NULL); 18882 18883 SD_TRACE(SD_LOG_IO, un, 18884 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18885 18886 /* 18887 * First send a READ_CAPACITY command to the target. 18888 * (This command is mandatory under SCSI-2.) 18889 * 18890 * Set up the CDB for the READ_CAPACITY command. The Partial 18891 * Medium Indicator bit is cleared. The address field must be 18892 * zero if the PMI bit is zero. 18893 */ 18894 bzero(&cdb, sizeof (cdb)); 18895 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18896 18897 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18898 18899 cdb.scc_cmd = SCMD_READ_CAPACITY; 18900 18901 ucmd_buf.uscsi_cdb = (char *)&cdb; 18902 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18903 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18904 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18905 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18906 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18907 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18908 ucmd_buf.uscsi_timeout = 60; 18909 18910 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18911 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18912 18913 switch (status) { 18914 case 0: 18915 /* Return failure if we did not get valid capacity data. */ 18916 if (ucmd_buf.uscsi_resid != 0) { 18917 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18918 return (EIO); 18919 } 18920 18921 /* 18922 * Read capacity and block size from the READ CAPACITY 10 data. 18923 * This data may be adjusted later due to device specific 18924 * issues. 18925 * 18926 * According to the SCSI spec, the READ CAPACITY 10 18927 * command returns the following: 18928 * 18929 * bytes 0-3: Maximum logical block address available. 18930 * (MSB in byte:0 & LSB in byte:3) 18931 * 18932 * bytes 4-7: Block length in bytes 18933 * (MSB in byte:4 & LSB in byte:7) 18934 * 18935 */ 18936 capacity = BE_32(capacity_buf[0]); 18937 lbasize = BE_32(capacity_buf[1]); 18938 18939 /* 18940 * Done with capacity_buf 18941 */ 18942 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18943 18944 /* 18945 * if the reported capacity is set to all 0xf's, then 18946 * this disk is too large and requires SBC-2 commands. 18947 * Reissue the request using READ CAPACITY 16. 18948 */ 18949 if (capacity == 0xffffffff) { 18950 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18951 &lbasize, path_flag); 18952 if (status != 0) { 18953 return (status); 18954 } 18955 } 18956 break; /* Success! */ 18957 case EIO: 18958 switch (ucmd_buf.uscsi_status) { 18959 case STATUS_RESERVATION_CONFLICT: 18960 status = EACCES; 18961 break; 18962 case STATUS_CHECK: 18963 /* 18964 * Check condition; look for ASC/ASCQ of 0x04/0x01 18965 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18966 */ 18967 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18968 (sense_buf.es_add_code == 0x04) && 18969 (sense_buf.es_qual_code == 0x01)) { 18970 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18971 return (EAGAIN); 18972 } 18973 break; 18974 default: 18975 break; 18976 } 18977 /* FALLTHRU */ 18978 default: 18979 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18980 return (status); 18981 } 18982 18983 /* 18984 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18985 * (2352 and 0 are common) so for these devices always force the value 18986 * to 2048 as required by the ATAPI specs. 18987 */ 18988 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18989 lbasize = 2048; 18990 } 18991 18992 /* 18993 * Get the maximum LBA value from the READ CAPACITY data. 18994 * Here we assume that the Partial Medium Indicator (PMI) bit 18995 * was cleared when issuing the command. This means that the LBA 18996 * returned from the device is the LBA of the last logical block 18997 * on the logical unit. The actual logical block count will be 18998 * this value plus one. 18999 * 19000 * Currently the capacity is saved in terms of un->un_sys_blocksize, 19001 * so scale the capacity value to reflect this. 19002 */ 19003 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 19004 19005 #if defined(__i386) || defined(__amd64) 19006 /* 19007 * On x86, compensate for off-by-1 error (number of sectors on 19008 * media) (1175930) 19009 */ 19010 if (!ISREMOVABLE(un) && (lbasize == un->un_sys_blocksize)) { 19011 capacity -= 1; 19012 } 19013 #endif 19014 19015 /* 19016 * Copy the values from the READ CAPACITY command into the space 19017 * provided by the caller. 19018 */ 19019 *capp = capacity; 19020 *lbap = lbasize; 19021 19022 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19023 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19024 19025 /* 19026 * Both the lbasize and capacity from the device must be nonzero, 19027 * otherwise we assume that the values are not valid and return 19028 * failure to the caller. (4203735) 19029 */ 19030 if ((capacity == 0) || (lbasize == 0)) { 19031 return (EIO); 19032 } 19033 19034 return (0); 19035 } 19036 19037 /* 19038 * Function: sd_send_scsi_READ_CAPACITY_16 19039 * 19040 * Description: This routine uses the scsi READ CAPACITY 16 command to 19041 * determine the device capacity in number of blocks and the 19042 * device native block size. If this function returns a failure, 19043 * then the values in *capp and *lbap are undefined. 19044 * This routine should always be called by 19045 * sd_send_scsi_READ_CAPACITY which will appy any device 19046 * specific adjustments to capacity and lbasize. 19047 * 19048 * Arguments: un - ptr to soft state struct for the target 19049 * capp - ptr to unsigned 64-bit variable to receive the 19050 * capacity value from the command. 19051 * lbap - ptr to unsigned 32-bit varaible to receive the 19052 * block size value from the command 19053 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19054 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19055 * to use the USCSI "direct" chain and bypass the normal 19056 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19057 * this command is issued as part of an error recovery 19058 * action. 19059 * 19060 * Return Code: 0 - Success 19061 * EIO - IO error 19062 * EACCES - Reservation conflict detected 19063 * EAGAIN - Device is becoming ready 19064 * errno return code from sd_send_scsi_cmd() 19065 * 19066 * Context: Can sleep. Blocks until command completes. 19067 */ 19068 19069 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19070 19071 static int 19072 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 19073 uint32_t *lbap, int path_flag) 19074 { 19075 struct scsi_extended_sense sense_buf; 19076 struct uscsi_cmd ucmd_buf; 19077 union scsi_cdb cdb; 19078 uint64_t *capacity16_buf; 19079 uint64_t capacity; 19080 uint32_t lbasize; 19081 int status; 19082 19083 ASSERT(un != NULL); 19084 ASSERT(!mutex_owned(SD_MUTEX(un))); 19085 ASSERT(capp != NULL); 19086 ASSERT(lbap != NULL); 19087 19088 SD_TRACE(SD_LOG_IO, un, 19089 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19090 19091 /* 19092 * First send a READ_CAPACITY_16 command to the target. 19093 * 19094 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19095 * Medium Indicator bit is cleared. The address field must be 19096 * zero if the PMI bit is zero. 19097 */ 19098 bzero(&cdb, sizeof (cdb)); 19099 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19100 19101 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19102 19103 ucmd_buf.uscsi_cdb = (char *)&cdb; 19104 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19105 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19106 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19107 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19108 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19109 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19110 ucmd_buf.uscsi_timeout = 60; 19111 19112 /* 19113 * Read Capacity (16) is a Service Action In command. One 19114 * command byte (0x9E) is overloaded for multiple operations, 19115 * with the second CDB byte specifying the desired operation 19116 */ 19117 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19118 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19119 19120 /* 19121 * Fill in allocation length field 19122 */ 19123 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19124 19125 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19126 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19127 19128 switch (status) { 19129 case 0: 19130 /* Return failure if we did not get valid capacity data. */ 19131 if (ucmd_buf.uscsi_resid > 20) { 19132 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19133 return (EIO); 19134 } 19135 19136 /* 19137 * Read capacity and block size from the READ CAPACITY 10 data. 19138 * This data may be adjusted later due to device specific 19139 * issues. 19140 * 19141 * According to the SCSI spec, the READ CAPACITY 10 19142 * command returns the following: 19143 * 19144 * bytes 0-7: Maximum logical block address available. 19145 * (MSB in byte:0 & LSB in byte:7) 19146 * 19147 * bytes 8-11: Block length in bytes 19148 * (MSB in byte:8 & LSB in byte:11) 19149 * 19150 */ 19151 capacity = BE_64(capacity16_buf[0]); 19152 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19153 19154 /* 19155 * Done with capacity16_buf 19156 */ 19157 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19158 19159 /* 19160 * if the reported capacity is set to all 0xf's, then 19161 * this disk is too large. This could only happen with 19162 * a device that supports LBAs larger than 64 bits which 19163 * are not defined by any current T10 standards. 19164 */ 19165 if (capacity == 0xffffffffffffffff) { 19166 return (EIO); 19167 } 19168 break; /* Success! */ 19169 case EIO: 19170 switch (ucmd_buf.uscsi_status) { 19171 case STATUS_RESERVATION_CONFLICT: 19172 status = EACCES; 19173 break; 19174 case STATUS_CHECK: 19175 /* 19176 * Check condition; look for ASC/ASCQ of 0x04/0x01 19177 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19178 */ 19179 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19180 (sense_buf.es_add_code == 0x04) && 19181 (sense_buf.es_qual_code == 0x01)) { 19182 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19183 return (EAGAIN); 19184 } 19185 break; 19186 default: 19187 break; 19188 } 19189 /* FALLTHRU */ 19190 default: 19191 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19192 return (status); 19193 } 19194 19195 *capp = capacity; 19196 *lbap = lbasize; 19197 19198 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19199 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19200 19201 return (0); 19202 } 19203 19204 19205 /* 19206 * Function: sd_send_scsi_START_STOP_UNIT 19207 * 19208 * Description: Issue a scsi START STOP UNIT command to the target. 19209 * 19210 * Arguments: un - pointer to driver soft state (unit) structure for 19211 * this target. 19212 * flag - SD_TARGET_START 19213 * SD_TARGET_STOP 19214 * SD_TARGET_EJECT 19215 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19216 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19217 * to use the USCSI "direct" chain and bypass the normal 19218 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19219 * command is issued as part of an error recovery action. 19220 * 19221 * Return Code: 0 - Success 19222 * EIO - IO error 19223 * EACCES - Reservation conflict detected 19224 * ENXIO - Not Ready, medium not present 19225 * errno return code from sd_send_scsi_cmd() 19226 * 19227 * Context: Can sleep. 19228 */ 19229 19230 static int 19231 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 19232 { 19233 struct scsi_extended_sense sense_buf; 19234 union scsi_cdb cdb; 19235 struct uscsi_cmd ucmd_buf; 19236 int status; 19237 19238 ASSERT(un != NULL); 19239 ASSERT(!mutex_owned(SD_MUTEX(un))); 19240 19241 SD_TRACE(SD_LOG_IO, un, 19242 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19243 19244 if (ISREMOVABLE(un) && 19245 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19246 (un->un_f_start_stop_supported != TRUE)) { 19247 return (0); 19248 } 19249 19250 bzero(&cdb, sizeof (cdb)); 19251 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19252 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19253 19254 cdb.scc_cmd = SCMD_START_STOP; 19255 cdb.cdb_opaque[4] = (uchar_t)flag; 19256 19257 ucmd_buf.uscsi_cdb = (char *)&cdb; 19258 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19259 ucmd_buf.uscsi_bufaddr = NULL; 19260 ucmd_buf.uscsi_buflen = 0; 19261 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19262 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19263 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19264 ucmd_buf.uscsi_timeout = 200; 19265 19266 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19267 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19268 19269 switch (status) { 19270 case 0: 19271 break; /* Success! */ 19272 case EIO: 19273 switch (ucmd_buf.uscsi_status) { 19274 case STATUS_RESERVATION_CONFLICT: 19275 status = EACCES; 19276 break; 19277 case STATUS_CHECK: 19278 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19279 switch (sense_buf.es_key) { 19280 case KEY_ILLEGAL_REQUEST: 19281 status = ENOTSUP; 19282 break; 19283 case KEY_NOT_READY: 19284 if (sense_buf.es_add_code == 0x3A) { 19285 status = ENXIO; 19286 } 19287 break; 19288 default: 19289 break; 19290 } 19291 } 19292 break; 19293 default: 19294 break; 19295 } 19296 break; 19297 default: 19298 break; 19299 } 19300 19301 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19302 19303 return (status); 19304 } 19305 19306 19307 /* 19308 * Function: sd_start_stop_unit_callback 19309 * 19310 * Description: timeout(9F) callback to begin recovery process for a 19311 * device that has spun down. 19312 * 19313 * Arguments: arg - pointer to associated softstate struct. 19314 * 19315 * Context: Executes in a timeout(9F) thread context 19316 */ 19317 19318 static void 19319 sd_start_stop_unit_callback(void *arg) 19320 { 19321 struct sd_lun *un = arg; 19322 ASSERT(un != NULL); 19323 ASSERT(!mutex_owned(SD_MUTEX(un))); 19324 19325 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19326 19327 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19328 } 19329 19330 19331 /* 19332 * Function: sd_start_stop_unit_task 19333 * 19334 * Description: Recovery procedure when a drive is spun down. 19335 * 19336 * Arguments: arg - pointer to associated softstate struct. 19337 * 19338 * Context: Executes in a taskq() thread context 19339 */ 19340 19341 static void 19342 sd_start_stop_unit_task(void *arg) 19343 { 19344 struct sd_lun *un = arg; 19345 19346 ASSERT(un != NULL); 19347 ASSERT(!mutex_owned(SD_MUTEX(un))); 19348 19349 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19350 19351 /* 19352 * Some unformatted drives report not ready error, no need to 19353 * restart if format has been initiated. 19354 */ 19355 mutex_enter(SD_MUTEX(un)); 19356 if (un->un_f_format_in_progress == TRUE) { 19357 mutex_exit(SD_MUTEX(un)); 19358 return; 19359 } 19360 mutex_exit(SD_MUTEX(un)); 19361 19362 /* 19363 * When a START STOP command is issued from here, it is part of a 19364 * failure recovery operation and must be issued before any other 19365 * commands, including any pending retries. Thus it must be sent 19366 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19367 * succeeds or not, we will start I/O after the attempt. 19368 */ 19369 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19370 SD_PATH_DIRECT_PRIORITY); 19371 19372 /* 19373 * The above call blocks until the START_STOP_UNIT command completes. 19374 * Now that it has completed, we must re-try the original IO that 19375 * received the NOT READY condition in the first place. There are 19376 * three possible conditions here: 19377 * 19378 * (1) The original IO is on un_retry_bp. 19379 * (2) The original IO is on the regular wait queue, and un_retry_bp 19380 * is NULL. 19381 * (3) The original IO is on the regular wait queue, and un_retry_bp 19382 * points to some other, unrelated bp. 19383 * 19384 * For each case, we must call sd_start_cmds() with un_retry_bp 19385 * as the argument. If un_retry_bp is NULL, this will initiate 19386 * processing of the regular wait queue. If un_retry_bp is not NULL, 19387 * then this will process the bp on un_retry_bp. That may or may not 19388 * be the original IO, but that does not matter: the important thing 19389 * is to keep the IO processing going at this point. 19390 * 19391 * Note: This is a very specific error recovery sequence associated 19392 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19393 * serialize the I/O with completion of the spin-up. 19394 */ 19395 mutex_enter(SD_MUTEX(un)); 19396 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19397 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19398 un, un->un_retry_bp); 19399 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19400 sd_start_cmds(un, un->un_retry_bp); 19401 mutex_exit(SD_MUTEX(un)); 19402 19403 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19404 } 19405 19406 19407 /* 19408 * Function: sd_send_scsi_INQUIRY 19409 * 19410 * Description: Issue the scsi INQUIRY command. 19411 * 19412 * Arguments: un 19413 * bufaddr 19414 * buflen 19415 * evpd 19416 * page_code 19417 * page_length 19418 * 19419 * Return Code: 0 - Success 19420 * errno return code from sd_send_scsi_cmd() 19421 * 19422 * Context: Can sleep. Does not return until command is completed. 19423 */ 19424 19425 static int 19426 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 19427 uchar_t evpd, uchar_t page_code, size_t *residp) 19428 { 19429 union scsi_cdb cdb; 19430 struct uscsi_cmd ucmd_buf; 19431 int status; 19432 19433 ASSERT(un != NULL); 19434 ASSERT(!mutex_owned(SD_MUTEX(un))); 19435 ASSERT(bufaddr != NULL); 19436 19437 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19438 19439 bzero(&cdb, sizeof (cdb)); 19440 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19441 bzero(bufaddr, buflen); 19442 19443 cdb.scc_cmd = SCMD_INQUIRY; 19444 cdb.cdb_opaque[1] = evpd; 19445 cdb.cdb_opaque[2] = page_code; 19446 FORMG0COUNT(&cdb, buflen); 19447 19448 ucmd_buf.uscsi_cdb = (char *)&cdb; 19449 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19450 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19451 ucmd_buf.uscsi_buflen = buflen; 19452 ucmd_buf.uscsi_rqbuf = NULL; 19453 ucmd_buf.uscsi_rqlen = 0; 19454 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19455 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19456 19457 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19458 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19459 19460 if ((status == 0) && (residp != NULL)) { 19461 *residp = ucmd_buf.uscsi_resid; 19462 } 19463 19464 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19465 19466 return (status); 19467 } 19468 19469 19470 /* 19471 * Function: sd_send_scsi_TEST_UNIT_READY 19472 * 19473 * Description: Issue the scsi TEST UNIT READY command. 19474 * This routine can be told to set the flag USCSI_DIAGNOSE to 19475 * prevent retrying failed commands. Use this when the intent 19476 * is either to check for device readiness, to clear a Unit 19477 * Attention, or to clear any outstanding sense data. 19478 * However under specific conditions the expected behavior 19479 * is for retries to bring a device ready, so use the flag 19480 * with caution. 19481 * 19482 * Arguments: un 19483 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19484 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19485 * 0: dont check for media present, do retries on cmd. 19486 * 19487 * Return Code: 0 - Success 19488 * EIO - IO error 19489 * EACCES - Reservation conflict detected 19490 * ENXIO - Not Ready, medium not present 19491 * errno return code from sd_send_scsi_cmd() 19492 * 19493 * Context: Can sleep. Does not return until command is completed. 19494 */ 19495 19496 static int 19497 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 19498 { 19499 struct scsi_extended_sense sense_buf; 19500 union scsi_cdb cdb; 19501 struct uscsi_cmd ucmd_buf; 19502 int status; 19503 19504 ASSERT(un != NULL); 19505 ASSERT(!mutex_owned(SD_MUTEX(un))); 19506 19507 SD_TRACE(SD_LOG_IO, un, 19508 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19509 19510 /* 19511 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19512 * timeouts when they receive a TUR and the queue is not empty. Check 19513 * the configuration flag set during attach (indicating the drive has 19514 * this firmware bug) and un_ncmds_in_transport before issuing the 19515 * TUR. If there are 19516 * pending commands return success, this is a bit arbitrary but is ok 19517 * for non-removables (i.e. the eliteI disks) and non-clustering 19518 * configurations. 19519 */ 19520 if (un->un_f_cfg_tur_check == TRUE) { 19521 mutex_enter(SD_MUTEX(un)); 19522 if (un->un_ncmds_in_transport != 0) { 19523 mutex_exit(SD_MUTEX(un)); 19524 return (0); 19525 } 19526 mutex_exit(SD_MUTEX(un)); 19527 } 19528 19529 bzero(&cdb, sizeof (cdb)); 19530 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19531 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19532 19533 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19534 19535 ucmd_buf.uscsi_cdb = (char *)&cdb; 19536 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19537 ucmd_buf.uscsi_bufaddr = NULL; 19538 ucmd_buf.uscsi_buflen = 0; 19539 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19540 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19541 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19542 19543 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19544 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19545 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19546 } 19547 ucmd_buf.uscsi_timeout = 60; 19548 19549 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19550 UIO_SYSSPACE, UIO_SYSSPACE, 19551 ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); 19552 19553 switch (status) { 19554 case 0: 19555 break; /* Success! */ 19556 case EIO: 19557 switch (ucmd_buf.uscsi_status) { 19558 case STATUS_RESERVATION_CONFLICT: 19559 status = EACCES; 19560 break; 19561 case STATUS_CHECK: 19562 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19563 break; 19564 } 19565 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19566 (sense_buf.es_key == KEY_NOT_READY) && 19567 (sense_buf.es_add_code == 0x3A)) { 19568 status = ENXIO; 19569 } 19570 break; 19571 default: 19572 break; 19573 } 19574 break; 19575 default: 19576 break; 19577 } 19578 19579 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19580 19581 return (status); 19582 } 19583 19584 19585 /* 19586 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19587 * 19588 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19589 * 19590 * Arguments: un 19591 * 19592 * Return Code: 0 - Success 19593 * EACCES 19594 * ENOTSUP 19595 * errno return code from sd_send_scsi_cmd() 19596 * 19597 * Context: Can sleep. Does not return until command is completed. 19598 */ 19599 19600 static int 19601 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 19602 uint16_t data_len, uchar_t *data_bufp) 19603 { 19604 struct scsi_extended_sense sense_buf; 19605 union scsi_cdb cdb; 19606 struct uscsi_cmd ucmd_buf; 19607 int status; 19608 int no_caller_buf = FALSE; 19609 19610 ASSERT(un != NULL); 19611 ASSERT(!mutex_owned(SD_MUTEX(un))); 19612 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19613 19614 SD_TRACE(SD_LOG_IO, un, 19615 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19616 19617 bzero(&cdb, sizeof (cdb)); 19618 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19619 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19620 if (data_bufp == NULL) { 19621 /* Allocate a default buf if the caller did not give one */ 19622 ASSERT(data_len == 0); 19623 data_len = MHIOC_RESV_KEY_SIZE; 19624 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19625 no_caller_buf = TRUE; 19626 } 19627 19628 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19629 cdb.cdb_opaque[1] = usr_cmd; 19630 FORMG1COUNT(&cdb, data_len); 19631 19632 ucmd_buf.uscsi_cdb = (char *)&cdb; 19633 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19634 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19635 ucmd_buf.uscsi_buflen = data_len; 19636 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19637 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19638 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19639 ucmd_buf.uscsi_timeout = 60; 19640 19641 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19642 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19643 19644 switch (status) { 19645 case 0: 19646 break; /* Success! */ 19647 case EIO: 19648 switch (ucmd_buf.uscsi_status) { 19649 case STATUS_RESERVATION_CONFLICT: 19650 status = EACCES; 19651 break; 19652 case STATUS_CHECK: 19653 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19654 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19655 status = ENOTSUP; 19656 } 19657 break; 19658 default: 19659 break; 19660 } 19661 break; 19662 default: 19663 break; 19664 } 19665 19666 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 19667 19668 if (no_caller_buf == TRUE) { 19669 kmem_free(data_bufp, data_len); 19670 } 19671 19672 return (status); 19673 } 19674 19675 19676 /* 19677 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 19678 * 19679 * Description: This routine is the driver entry point for handling CD-ROM 19680 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 19681 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 19682 * device. 19683 * 19684 * Arguments: un - Pointer to soft state struct for the target. 19685 * usr_cmd SCSI-3 reservation facility command (one of 19686 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 19687 * SD_SCSI3_PREEMPTANDABORT) 19688 * usr_bufp - user provided pointer register, reserve descriptor or 19689 * preempt and abort structure (mhioc_register_t, 19690 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 19691 * 19692 * Return Code: 0 - Success 19693 * EACCES 19694 * ENOTSUP 19695 * errno return code from sd_send_scsi_cmd() 19696 * 19697 * Context: Can sleep. Does not return until command is completed. 19698 */ 19699 19700 static int 19701 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19702 uchar_t *usr_bufp) 19703 { 19704 struct scsi_extended_sense sense_buf; 19705 union scsi_cdb cdb; 19706 struct uscsi_cmd ucmd_buf; 19707 int status; 19708 uchar_t data_len = sizeof (sd_prout_t); 19709 sd_prout_t *prp; 19710 19711 ASSERT(un != NULL); 19712 ASSERT(!mutex_owned(SD_MUTEX(un))); 19713 ASSERT(data_len == 24); /* required by scsi spec */ 19714 19715 SD_TRACE(SD_LOG_IO, un, 19716 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19717 19718 if (usr_bufp == NULL) { 19719 return (EINVAL); 19720 } 19721 19722 bzero(&cdb, sizeof (cdb)); 19723 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19724 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19725 prp = kmem_zalloc(data_len, KM_SLEEP); 19726 19727 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19728 cdb.cdb_opaque[1] = usr_cmd; 19729 FORMG1COUNT(&cdb, data_len); 19730 19731 ucmd_buf.uscsi_cdb = (char *)&cdb; 19732 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19733 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19734 ucmd_buf.uscsi_buflen = data_len; 19735 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19736 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19737 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19738 ucmd_buf.uscsi_timeout = 60; 19739 19740 switch (usr_cmd) { 19741 case SD_SCSI3_REGISTER: { 19742 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19743 19744 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19745 bcopy(ptr->newkey.key, prp->service_key, 19746 MHIOC_RESV_KEY_SIZE); 19747 prp->aptpl = ptr->aptpl; 19748 break; 19749 } 19750 case SD_SCSI3_RESERVE: 19751 case SD_SCSI3_RELEASE: { 19752 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19753 19754 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19755 prp->scope_address = BE_32(ptr->scope_specific_addr); 19756 cdb.cdb_opaque[2] = ptr->type; 19757 break; 19758 } 19759 case SD_SCSI3_PREEMPTANDABORT: { 19760 mhioc_preemptandabort_t *ptr = 19761 (mhioc_preemptandabort_t *)usr_bufp; 19762 19763 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19764 bcopy(ptr->victim_key.key, prp->service_key, 19765 MHIOC_RESV_KEY_SIZE); 19766 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19767 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19768 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19769 break; 19770 } 19771 case SD_SCSI3_REGISTERANDIGNOREKEY: 19772 { 19773 mhioc_registerandignorekey_t *ptr; 19774 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19775 bcopy(ptr->newkey.key, 19776 prp->service_key, MHIOC_RESV_KEY_SIZE); 19777 prp->aptpl = ptr->aptpl; 19778 break; 19779 } 19780 default: 19781 ASSERT(FALSE); 19782 break; 19783 } 19784 19785 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19786 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19787 19788 switch (status) { 19789 case 0: 19790 break; /* Success! */ 19791 case EIO: 19792 switch (ucmd_buf.uscsi_status) { 19793 case STATUS_RESERVATION_CONFLICT: 19794 status = EACCES; 19795 break; 19796 case STATUS_CHECK: 19797 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19798 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19799 status = ENOTSUP; 19800 } 19801 break; 19802 default: 19803 break; 19804 } 19805 break; 19806 default: 19807 break; 19808 } 19809 19810 kmem_free(prp, data_len); 19811 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19812 return (status); 19813 } 19814 19815 19816 /* 19817 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19818 * 19819 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19820 * 19821 * Arguments: un - pointer to the target's soft state struct 19822 * 19823 * Return Code: 0 - success 19824 * errno-type error code 19825 * 19826 * Context: kernel thread context only. 19827 */ 19828 19829 static int 19830 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un) 19831 { 19832 struct scsi_extended_sense sense_buf; 19833 union scsi_cdb cdb; 19834 struct uscsi_cmd ucmd_buf; 19835 int status; 19836 19837 ASSERT(un != NULL); 19838 ASSERT(!mutex_owned(SD_MUTEX(un))); 19839 19840 SD_TRACE(SD_LOG_IO, un, 19841 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19842 19843 bzero(&cdb, sizeof (cdb)); 19844 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19845 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19846 19847 cdb.scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19848 19849 ucmd_buf.uscsi_cdb = (char *)&cdb; 19850 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19851 ucmd_buf.uscsi_bufaddr = NULL; 19852 ucmd_buf.uscsi_buflen = 0; 19853 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19854 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19855 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19856 ucmd_buf.uscsi_timeout = 240; 19857 19858 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19859 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19860 19861 switch (status) { 19862 case 0: 19863 break; /* Success! */ 19864 case EIO: 19865 switch (ucmd_buf.uscsi_status) { 19866 case STATUS_RESERVATION_CONFLICT: 19867 /* Ignore reservation conflict */ 19868 status = 0; 19869 goto done; 19870 19871 case STATUS_CHECK: 19872 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19873 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19874 /* Ignore Illegal Request error */ 19875 status = 0; 19876 goto done; 19877 } 19878 break; 19879 default: 19880 break; 19881 } 19882 /* FALLTHRU */ 19883 default: 19884 /* Ignore error if the media is not present. */ 19885 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 19886 status = 0; 19887 goto done; 19888 } 19889 /* If we reach this, we had an error */ 19890 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19891 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19892 break; 19893 } 19894 19895 done: 19896 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: exit\n"); 19897 19898 return (status); 19899 } 19900 19901 19902 /* 19903 * Function: sd_send_scsi_GET_CONFIGURATION 19904 * 19905 * Description: Issues the get configuration command to the device. 19906 * Called from sd_check_for_writable_cd & sd_get_media_info 19907 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19908 * Arguments: un 19909 * ucmdbuf 19910 * rqbuf 19911 * rqbuflen 19912 * bufaddr 19913 * buflen 19914 * 19915 * Return Code: 0 - Success 19916 * errno return code from sd_send_scsi_cmd() 19917 * 19918 * Context: Can sleep. Does not return until command is completed. 19919 * 19920 */ 19921 19922 static int 19923 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19924 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen) 19925 { 19926 char cdb[CDB_GROUP1]; 19927 int status; 19928 19929 ASSERT(un != NULL); 19930 ASSERT(!mutex_owned(SD_MUTEX(un))); 19931 ASSERT(bufaddr != NULL); 19932 ASSERT(ucmdbuf != NULL); 19933 ASSERT(rqbuf != NULL); 19934 19935 SD_TRACE(SD_LOG_IO, un, 19936 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19937 19938 bzero(cdb, sizeof (cdb)); 19939 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19940 bzero(rqbuf, rqbuflen); 19941 bzero(bufaddr, buflen); 19942 19943 /* 19944 * Set up cdb field for the get configuration command. 19945 */ 19946 cdb[0] = SCMD_GET_CONFIGURATION; 19947 cdb[1] = 0x02; /* Requested Type */ 19948 cdb[8] = SD_PROFILE_HEADER_LEN; 19949 ucmdbuf->uscsi_cdb = cdb; 19950 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19951 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19952 ucmdbuf->uscsi_buflen = buflen; 19953 ucmdbuf->uscsi_timeout = sd_io_time; 19954 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19955 ucmdbuf->uscsi_rqlen = rqbuflen; 19956 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19957 19958 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19959 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19960 19961 switch (status) { 19962 case 0: 19963 break; /* Success! */ 19964 case EIO: 19965 switch (ucmdbuf->uscsi_status) { 19966 case STATUS_RESERVATION_CONFLICT: 19967 status = EACCES; 19968 break; 19969 default: 19970 break; 19971 } 19972 break; 19973 default: 19974 break; 19975 } 19976 19977 if (status == 0) { 19978 SD_DUMP_MEMORY(un, SD_LOG_IO, 19979 "sd_send_scsi_GET_CONFIGURATION: data", 19980 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19981 } 19982 19983 SD_TRACE(SD_LOG_IO, un, 19984 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19985 19986 return (status); 19987 } 19988 19989 /* 19990 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19991 * 19992 * Description: Issues the get configuration command to the device to 19993 * retrieve a specfic feature. Called from 19994 * sd_check_for_writable_cd & sd_set_mmc_caps. 19995 * Arguments: un 19996 * ucmdbuf 19997 * rqbuf 19998 * rqbuflen 19999 * bufaddr 20000 * buflen 20001 * feature 20002 * 20003 * Return Code: 0 - Success 20004 * errno return code from sd_send_scsi_cmd() 20005 * 20006 * Context: Can sleep. Does not return until command is completed. 20007 * 20008 */ 20009 static int 20010 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 20011 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 20012 uchar_t *bufaddr, uint_t buflen, char feature) 20013 { 20014 char cdb[CDB_GROUP1]; 20015 int status; 20016 20017 ASSERT(un != NULL); 20018 ASSERT(!mutex_owned(SD_MUTEX(un))); 20019 ASSERT(bufaddr != NULL); 20020 ASSERT(ucmdbuf != NULL); 20021 ASSERT(rqbuf != NULL); 20022 20023 SD_TRACE(SD_LOG_IO, un, 20024 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20025 20026 bzero(cdb, sizeof (cdb)); 20027 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20028 bzero(rqbuf, rqbuflen); 20029 bzero(bufaddr, buflen); 20030 20031 /* 20032 * Set up cdb field for the get configuration command. 20033 */ 20034 cdb[0] = SCMD_GET_CONFIGURATION; 20035 cdb[1] = 0x02; /* Requested Type */ 20036 cdb[3] = feature; 20037 cdb[8] = buflen; 20038 ucmdbuf->uscsi_cdb = cdb; 20039 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20040 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20041 ucmdbuf->uscsi_buflen = buflen; 20042 ucmdbuf->uscsi_timeout = sd_io_time; 20043 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20044 ucmdbuf->uscsi_rqlen = rqbuflen; 20045 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20046 20047 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 20048 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 20049 20050 switch (status) { 20051 case 0: 20052 break; /* Success! */ 20053 case EIO: 20054 switch (ucmdbuf->uscsi_status) { 20055 case STATUS_RESERVATION_CONFLICT: 20056 status = EACCES; 20057 break; 20058 default: 20059 break; 20060 } 20061 break; 20062 default: 20063 break; 20064 } 20065 20066 if (status == 0) { 20067 SD_DUMP_MEMORY(un, SD_LOG_IO, 20068 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20069 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20070 } 20071 20072 SD_TRACE(SD_LOG_IO, un, 20073 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20074 20075 return (status); 20076 } 20077 20078 20079 /* 20080 * Function: sd_send_scsi_MODE_SENSE 20081 * 20082 * Description: Utility function for issuing a scsi MODE SENSE command. 20083 * Note: This routine uses a consistent implementation for Group0, 20084 * Group1, and Group2 commands across all platforms. ATAPI devices 20085 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20086 * 20087 * Arguments: un - pointer to the softstate struct for the target. 20088 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20089 * CDB_GROUP[1|2] (10 byte). 20090 * bufaddr - buffer for page data retrieved from the target. 20091 * buflen - size of page to be retrieved. 20092 * page_code - page code of data to be retrieved from the target. 20093 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20094 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20095 * to use the USCSI "direct" chain and bypass the normal 20096 * command waitq. 20097 * 20098 * Return Code: 0 - Success 20099 * errno return code from sd_send_scsi_cmd() 20100 * 20101 * Context: Can sleep. Does not return until command is completed. 20102 */ 20103 20104 static int 20105 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20106 size_t buflen, uchar_t page_code, int path_flag) 20107 { 20108 struct scsi_extended_sense sense_buf; 20109 union scsi_cdb cdb; 20110 struct uscsi_cmd ucmd_buf; 20111 int status; 20112 20113 ASSERT(un != NULL); 20114 ASSERT(!mutex_owned(SD_MUTEX(un))); 20115 ASSERT(bufaddr != NULL); 20116 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20117 (cdbsize == CDB_GROUP2)); 20118 20119 SD_TRACE(SD_LOG_IO, un, 20120 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20121 20122 bzero(&cdb, sizeof (cdb)); 20123 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20124 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20125 bzero(bufaddr, buflen); 20126 20127 if (cdbsize == CDB_GROUP0) { 20128 cdb.scc_cmd = SCMD_MODE_SENSE; 20129 cdb.cdb_opaque[2] = page_code; 20130 FORMG0COUNT(&cdb, buflen); 20131 } else { 20132 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20133 cdb.cdb_opaque[2] = page_code; 20134 FORMG1COUNT(&cdb, buflen); 20135 } 20136 20137 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20138 20139 ucmd_buf.uscsi_cdb = (char *)&cdb; 20140 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20141 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20142 ucmd_buf.uscsi_buflen = buflen; 20143 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20144 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20145 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20146 ucmd_buf.uscsi_timeout = 60; 20147 20148 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20149 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20150 20151 switch (status) { 20152 case 0: 20153 break; /* Success! */ 20154 case EIO: 20155 switch (ucmd_buf.uscsi_status) { 20156 case STATUS_RESERVATION_CONFLICT: 20157 status = EACCES; 20158 break; 20159 default: 20160 break; 20161 } 20162 break; 20163 default: 20164 break; 20165 } 20166 20167 if (status == 0) { 20168 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20169 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20170 } 20171 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20172 20173 return (status); 20174 } 20175 20176 20177 /* 20178 * Function: sd_send_scsi_MODE_SELECT 20179 * 20180 * Description: Utility function for issuing a scsi MODE SELECT command. 20181 * Note: This routine uses a consistent implementation for Group0, 20182 * Group1, and Group2 commands across all platforms. ATAPI devices 20183 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20184 * 20185 * Arguments: un - pointer to the softstate struct for the target. 20186 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20187 * CDB_GROUP[1|2] (10 byte). 20188 * bufaddr - buffer for page data retrieved from the target. 20189 * buflen - size of page to be retrieved. 20190 * save_page - boolean to determin if SP bit should be set. 20191 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20192 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20193 * to use the USCSI "direct" chain and bypass the normal 20194 * command waitq. 20195 * 20196 * Return Code: 0 - Success 20197 * errno return code from sd_send_scsi_cmd() 20198 * 20199 * Context: Can sleep. Does not return until command is completed. 20200 */ 20201 20202 static int 20203 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20204 size_t buflen, uchar_t save_page, int path_flag) 20205 { 20206 struct scsi_extended_sense sense_buf; 20207 union scsi_cdb cdb; 20208 struct uscsi_cmd ucmd_buf; 20209 int status; 20210 20211 ASSERT(un != NULL); 20212 ASSERT(!mutex_owned(SD_MUTEX(un))); 20213 ASSERT(bufaddr != NULL); 20214 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20215 (cdbsize == CDB_GROUP2)); 20216 20217 SD_TRACE(SD_LOG_IO, un, 20218 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20219 20220 bzero(&cdb, sizeof (cdb)); 20221 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20222 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20223 20224 /* Set the PF bit for many third party drives */ 20225 cdb.cdb_opaque[1] = 0x10; 20226 20227 /* Set the savepage(SP) bit if given */ 20228 if (save_page == SD_SAVE_PAGE) { 20229 cdb.cdb_opaque[1] |= 0x01; 20230 } 20231 20232 if (cdbsize == CDB_GROUP0) { 20233 cdb.scc_cmd = SCMD_MODE_SELECT; 20234 FORMG0COUNT(&cdb, buflen); 20235 } else { 20236 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20237 FORMG1COUNT(&cdb, buflen); 20238 } 20239 20240 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20241 20242 ucmd_buf.uscsi_cdb = (char *)&cdb; 20243 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20244 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20245 ucmd_buf.uscsi_buflen = buflen; 20246 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20247 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20248 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20249 ucmd_buf.uscsi_timeout = 60; 20250 20251 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20252 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20253 20254 switch (status) { 20255 case 0: 20256 break; /* Success! */ 20257 case EIO: 20258 switch (ucmd_buf.uscsi_status) { 20259 case STATUS_RESERVATION_CONFLICT: 20260 status = EACCES; 20261 break; 20262 default: 20263 break; 20264 } 20265 break; 20266 default: 20267 break; 20268 } 20269 20270 if (status == 0) { 20271 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20272 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20273 } 20274 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20275 20276 return (status); 20277 } 20278 20279 20280 /* 20281 * Function: sd_send_scsi_RDWR 20282 * 20283 * Description: Issue a scsi READ or WRITE command with the given parameters. 20284 * 20285 * Arguments: un: Pointer to the sd_lun struct for the target. 20286 * cmd: SCMD_READ or SCMD_WRITE 20287 * bufaddr: Address of caller's buffer to receive the RDWR data 20288 * buflen: Length of caller's buffer receive the RDWR data. 20289 * start_block: Block number for the start of the RDWR operation. 20290 * (Assumes target-native block size.) 20291 * residp: Pointer to variable to receive the redisual of the 20292 * RDWR operation (may be NULL of no residual requested). 20293 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20294 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20295 * to use the USCSI "direct" chain and bypass the normal 20296 * command waitq. 20297 * 20298 * Return Code: 0 - Success 20299 * errno return code from sd_send_scsi_cmd() 20300 * 20301 * Context: Can sleep. Does not return until command is completed. 20302 */ 20303 20304 static int 20305 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 20306 size_t buflen, daddr_t start_block, int path_flag) 20307 { 20308 struct scsi_extended_sense sense_buf; 20309 union scsi_cdb cdb; 20310 struct uscsi_cmd ucmd_buf; 20311 uint32_t block_count; 20312 int status; 20313 int cdbsize; 20314 uchar_t flag; 20315 20316 ASSERT(un != NULL); 20317 ASSERT(!mutex_owned(SD_MUTEX(un))); 20318 ASSERT(bufaddr != NULL); 20319 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20320 20321 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20322 20323 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20324 return (EINVAL); 20325 } 20326 20327 mutex_enter(SD_MUTEX(un)); 20328 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20329 mutex_exit(SD_MUTEX(un)); 20330 20331 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20332 20333 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20334 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20335 bufaddr, buflen, start_block, block_count); 20336 20337 bzero(&cdb, sizeof (cdb)); 20338 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20339 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20340 20341 /* Compute CDB size to use */ 20342 if (start_block > 0xffffffff) 20343 cdbsize = CDB_GROUP4; 20344 else if ((start_block & 0xFFE00000) || 20345 (un->un_f_cfg_is_atapi == TRUE)) 20346 cdbsize = CDB_GROUP1; 20347 else 20348 cdbsize = CDB_GROUP0; 20349 20350 switch (cdbsize) { 20351 case CDB_GROUP0: /* 6-byte CDBs */ 20352 cdb.scc_cmd = cmd; 20353 FORMG0ADDR(&cdb, start_block); 20354 FORMG0COUNT(&cdb, block_count); 20355 break; 20356 case CDB_GROUP1: /* 10-byte CDBs */ 20357 cdb.scc_cmd = cmd | SCMD_GROUP1; 20358 FORMG1ADDR(&cdb, start_block); 20359 FORMG1COUNT(&cdb, block_count); 20360 break; 20361 case CDB_GROUP4: /* 16-byte CDBs */ 20362 cdb.scc_cmd = cmd | SCMD_GROUP4; 20363 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20364 FORMG4COUNT(&cdb, block_count); 20365 break; 20366 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20367 default: 20368 /* All others reserved */ 20369 return (EINVAL); 20370 } 20371 20372 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20373 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20374 20375 ucmd_buf.uscsi_cdb = (char *)&cdb; 20376 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20377 ucmd_buf.uscsi_bufaddr = bufaddr; 20378 ucmd_buf.uscsi_buflen = buflen; 20379 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20380 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20381 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20382 ucmd_buf.uscsi_timeout = 60; 20383 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20384 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20385 switch (status) { 20386 case 0: 20387 break; /* Success! */ 20388 case EIO: 20389 switch (ucmd_buf.uscsi_status) { 20390 case STATUS_RESERVATION_CONFLICT: 20391 status = EACCES; 20392 break; 20393 default: 20394 break; 20395 } 20396 break; 20397 default: 20398 break; 20399 } 20400 20401 if (status == 0) { 20402 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20403 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20404 } 20405 20406 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20407 20408 return (status); 20409 } 20410 20411 20412 /* 20413 * Function: sd_send_scsi_LOG_SENSE 20414 * 20415 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20416 * 20417 * Arguments: un: Pointer to the sd_lun struct for the target. 20418 * 20419 * Return Code: 0 - Success 20420 * errno return code from sd_send_scsi_cmd() 20421 * 20422 * Context: Can sleep. Does not return until command is completed. 20423 */ 20424 20425 static int 20426 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 20427 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20428 int path_flag) 20429 20430 { 20431 struct scsi_extended_sense sense_buf; 20432 union scsi_cdb cdb; 20433 struct uscsi_cmd ucmd_buf; 20434 int status; 20435 20436 ASSERT(un != NULL); 20437 ASSERT(!mutex_owned(SD_MUTEX(un))); 20438 20439 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 20440 20441 bzero(&cdb, sizeof (cdb)); 20442 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20443 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20444 20445 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 20446 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 20447 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 20448 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 20449 FORMG1COUNT(&cdb, buflen); 20450 20451 ucmd_buf.uscsi_cdb = (char *)&cdb; 20452 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20453 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20454 ucmd_buf.uscsi_buflen = buflen; 20455 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20456 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20457 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20458 ucmd_buf.uscsi_timeout = 60; 20459 20460 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20461 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20462 20463 switch (status) { 20464 case 0: 20465 break; 20466 case EIO: 20467 switch (ucmd_buf.uscsi_status) { 20468 case STATUS_RESERVATION_CONFLICT: 20469 status = EACCES; 20470 break; 20471 case STATUS_CHECK: 20472 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20473 (sense_buf.es_key == KEY_ILLEGAL_REQUEST) && 20474 (sense_buf.es_add_code == 0x24)) { 20475 /* 20476 * ASC 0x24: INVALID FIELD IN CDB 20477 */ 20478 switch (page_code) { 20479 case START_STOP_CYCLE_PAGE: 20480 /* 20481 * The start stop cycle counter is 20482 * implemented as page 0x31 in earlier 20483 * generation disks. In new generation 20484 * disks the start stop cycle counter is 20485 * implemented as page 0xE. To properly 20486 * handle this case if an attempt for 20487 * log page 0xE is made and fails we 20488 * will try again using page 0x31. 20489 * 20490 * Network storage BU committed to 20491 * maintain the page 0x31 for this 20492 * purpose and will not have any other 20493 * page implemented with page code 0x31 20494 * until all disks transition to the 20495 * standard page. 20496 */ 20497 mutex_enter(SD_MUTEX(un)); 20498 un->un_start_stop_cycle_page = 20499 START_STOP_CYCLE_VU_PAGE; 20500 cdb.cdb_opaque[2] = 20501 (char)(page_control << 6) | 20502 un->un_start_stop_cycle_page; 20503 mutex_exit(SD_MUTEX(un)); 20504 status = sd_send_scsi_cmd( 20505 SD_GET_DEV(un), &ucmd_buf, 20506 UIO_SYSSPACE, UIO_SYSSPACE, 20507 UIO_SYSSPACE, path_flag); 20508 20509 break; 20510 case TEMPERATURE_PAGE: 20511 status = ENOTTY; 20512 break; 20513 default: 20514 break; 20515 } 20516 } 20517 break; 20518 default: 20519 break; 20520 } 20521 break; 20522 default: 20523 break; 20524 } 20525 20526 if (status == 0) { 20527 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20528 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20529 } 20530 20531 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20532 20533 return (status); 20534 } 20535 20536 20537 /* 20538 * Function: sdioctl 20539 * 20540 * Description: Driver's ioctl(9e) entry point function. 20541 * 20542 * Arguments: dev - device number 20543 * cmd - ioctl operation to be performed 20544 * arg - user argument, contains data to be set or reference 20545 * parameter for get 20546 * flag - bit flag, indicating open settings, 32/64 bit type 20547 * cred_p - user credential pointer 20548 * rval_p - calling process return value (OPT) 20549 * 20550 * Return Code: EINVAL 20551 * ENOTTY 20552 * ENXIO 20553 * EIO 20554 * EFAULT 20555 * ENOTSUP 20556 * EPERM 20557 * 20558 * Context: Called from the device switch at normal priority. 20559 */ 20560 20561 static int 20562 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20563 { 20564 struct sd_lun *un = NULL; 20565 int geom_validated = FALSE; 20566 int err = 0; 20567 int i = 0; 20568 cred_t *cr; 20569 20570 /* 20571 * All device accesses go thru sdstrategy where we check on suspend 20572 * status 20573 */ 20574 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20575 return (ENXIO); 20576 } 20577 20578 ASSERT(!mutex_owned(SD_MUTEX(un))); 20579 20580 /* 20581 * Moved this wait from sd_uscsi_strategy to here for 20582 * reasons of deadlock prevention. Internal driver commands, 20583 * specifically those to change a devices power level, result 20584 * in a call to sd_uscsi_strategy. 20585 */ 20586 mutex_enter(SD_MUTEX(un)); 20587 while ((un->un_state == SD_STATE_SUSPENDED) || 20588 (un->un_state == SD_STATE_PM_CHANGING)) { 20589 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20590 } 20591 /* 20592 * Twiddling the counter here protects commands from now 20593 * through to the top of sd_uscsi_strategy. Without the 20594 * counter inc. a power down, for example, could get in 20595 * after the above check for state is made and before 20596 * execution gets to the top of sd_uscsi_strategy. 20597 * That would cause problems. 20598 */ 20599 un->un_ncmds_in_driver++; 20600 20601 if ((un->un_f_geometry_is_valid == FALSE) && 20602 (flag & (FNDELAY | FNONBLOCK))) { 20603 switch (cmd) { 20604 case CDROMPAUSE: 20605 case CDROMRESUME: 20606 case CDROMPLAYMSF: 20607 case CDROMPLAYTRKIND: 20608 case CDROMREADTOCHDR: 20609 case CDROMREADTOCENTRY: 20610 case CDROMSTOP: 20611 case CDROMSTART: 20612 case CDROMVOLCTRL: 20613 case CDROMSUBCHNL: 20614 case CDROMREADMODE2: 20615 case CDROMREADMODE1: 20616 case CDROMREADOFFSET: 20617 case CDROMSBLKMODE: 20618 case CDROMGBLKMODE: 20619 case CDROMGDRVSPEED: 20620 case CDROMSDRVSPEED: 20621 case CDROMCDDA: 20622 case CDROMCDXA: 20623 case CDROMSUBCODE: 20624 if (!ISCD(un)) { 20625 un->un_ncmds_in_driver--; 20626 ASSERT(un->un_ncmds_in_driver >= 0); 20627 mutex_exit(SD_MUTEX(un)); 20628 return (ENOTTY); 20629 } 20630 break; 20631 case FDEJECT: 20632 case DKIOCEJECT: 20633 case CDROMEJECT: 20634 if (!ISREMOVABLE(un)) { 20635 un->un_ncmds_in_driver--; 20636 ASSERT(un->un_ncmds_in_driver >= 0); 20637 mutex_exit(SD_MUTEX(un)); 20638 return (ENOTTY); 20639 } 20640 break; 20641 case DKIOCSVTOC: 20642 case DKIOCSETEFI: 20643 case DKIOCSMBOOT: 20644 mutex_exit(SD_MUTEX(un)); 20645 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20646 if (err != 0) { 20647 mutex_enter(SD_MUTEX(un)); 20648 un->un_ncmds_in_driver--; 20649 ASSERT(un->un_ncmds_in_driver >= 0); 20650 mutex_exit(SD_MUTEX(un)); 20651 return (EIO); 20652 } 20653 mutex_enter(SD_MUTEX(un)); 20654 /* FALLTHROUGH */ 20655 case DKIOCREMOVABLE: 20656 case DKIOCINFO: 20657 case DKIOCGMEDIAINFO: 20658 case MHIOCENFAILFAST: 20659 case MHIOCSTATUS: 20660 case MHIOCTKOWN: 20661 case MHIOCRELEASE: 20662 case MHIOCGRP_INKEYS: 20663 case MHIOCGRP_INRESV: 20664 case MHIOCGRP_REGISTER: 20665 case MHIOCGRP_RESERVE: 20666 case MHIOCGRP_PREEMPTANDABORT: 20667 case MHIOCGRP_REGISTERANDIGNOREKEY: 20668 case CDROMCLOSETRAY: 20669 case USCSICMD: 20670 goto skip_ready_valid; 20671 default: 20672 break; 20673 } 20674 20675 mutex_exit(SD_MUTEX(un)); 20676 err = sd_ready_and_valid(un); 20677 mutex_enter(SD_MUTEX(un)); 20678 if (err == SD_READY_NOT_VALID) { 20679 switch (cmd) { 20680 case DKIOCGAPART: 20681 case DKIOCGGEOM: 20682 case DKIOCSGEOM: 20683 case DKIOCGVTOC: 20684 case DKIOCSVTOC: 20685 case DKIOCSAPART: 20686 case DKIOCG_PHYGEOM: 20687 case DKIOCG_VIRTGEOM: 20688 err = ENOTSUP; 20689 un->un_ncmds_in_driver--; 20690 ASSERT(un->un_ncmds_in_driver >= 0); 20691 mutex_exit(SD_MUTEX(un)); 20692 return (err); 20693 } 20694 } 20695 if (err != SD_READY_VALID) { 20696 switch (cmd) { 20697 case DKIOCSTATE: 20698 case CDROMGDRVSPEED: 20699 case CDROMSDRVSPEED: 20700 case FDEJECT: /* for eject command */ 20701 case DKIOCEJECT: 20702 case CDROMEJECT: 20703 case DKIOCGETEFI: 20704 case DKIOCSGEOM: 20705 case DKIOCREMOVABLE: 20706 case DKIOCSAPART: 20707 case DKIOCSETEFI: 20708 break; 20709 default: 20710 if (ISREMOVABLE(un)) { 20711 err = ENXIO; 20712 } else { 20713 /* Do not map EACCES to EIO */ 20714 if (err != EACCES) 20715 err = EIO; 20716 } 20717 un->un_ncmds_in_driver--; 20718 ASSERT(un->un_ncmds_in_driver >= 0); 20719 mutex_exit(SD_MUTEX(un)); 20720 return (err); 20721 } 20722 } 20723 geom_validated = TRUE; 20724 } 20725 if ((un->un_f_geometry_is_valid == TRUE) && 20726 (un->un_solaris_size > 0)) { 20727 /* 20728 * the "geometry_is_valid" flag could be true if we 20729 * have an fdisk table but no Solaris partition 20730 */ 20731 if (un->un_vtoc.v_sanity != VTOC_SANE) { 20732 /* it is EFI, so return ENOTSUP for these */ 20733 switch (cmd) { 20734 case DKIOCGAPART: 20735 case DKIOCGGEOM: 20736 case DKIOCGVTOC: 20737 case DKIOCSVTOC: 20738 case DKIOCSAPART: 20739 err = ENOTSUP; 20740 un->un_ncmds_in_driver--; 20741 ASSERT(un->un_ncmds_in_driver >= 0); 20742 mutex_exit(SD_MUTEX(un)); 20743 return (err); 20744 } 20745 } 20746 } 20747 20748 skip_ready_valid: 20749 mutex_exit(SD_MUTEX(un)); 20750 20751 switch (cmd) { 20752 case DKIOCINFO: 20753 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20754 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20755 break; 20756 20757 case DKIOCGMEDIAINFO: 20758 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20759 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20760 break; 20761 20762 case DKIOCGGEOM: 20763 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGGEOM\n"); 20764 err = sd_dkio_get_geometry(dev, (caddr_t)arg, flag, 20765 geom_validated); 20766 break; 20767 20768 case DKIOCSGEOM: 20769 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSGEOM\n"); 20770 err = sd_dkio_set_geometry(dev, (caddr_t)arg, flag); 20771 break; 20772 20773 case DKIOCGAPART: 20774 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGAPART\n"); 20775 err = sd_dkio_get_partition(dev, (caddr_t)arg, flag, 20776 geom_validated); 20777 break; 20778 20779 case DKIOCSAPART: 20780 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSAPART\n"); 20781 err = sd_dkio_set_partition(dev, (caddr_t)arg, flag); 20782 break; 20783 20784 case DKIOCGVTOC: 20785 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGVTOC\n"); 20786 err = sd_dkio_get_vtoc(dev, (caddr_t)arg, flag, 20787 geom_validated); 20788 break; 20789 20790 case DKIOCGETEFI: 20791 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGETEFI\n"); 20792 err = sd_dkio_get_efi(dev, (caddr_t)arg, flag); 20793 break; 20794 20795 case DKIOCPARTITION: 20796 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTITION\n"); 20797 err = sd_dkio_partition(dev, (caddr_t)arg, flag); 20798 break; 20799 20800 case DKIOCSVTOC: 20801 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSVTOC\n"); 20802 err = sd_dkio_set_vtoc(dev, (caddr_t)arg, flag); 20803 break; 20804 20805 case DKIOCSETEFI: 20806 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSETEFI\n"); 20807 err = sd_dkio_set_efi(dev, (caddr_t)arg, flag); 20808 break; 20809 20810 case DKIOCGMBOOT: 20811 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMBOOT\n"); 20812 err = sd_dkio_get_mboot(dev, (caddr_t)arg, flag); 20813 break; 20814 20815 case DKIOCSMBOOT: 20816 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSMBOOT\n"); 20817 err = sd_dkio_set_mboot(dev, (caddr_t)arg, flag); 20818 break; 20819 20820 case DKIOCLOCK: 20821 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20822 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20823 SD_PATH_STANDARD); 20824 break; 20825 20826 case DKIOCUNLOCK: 20827 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20828 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20829 SD_PATH_STANDARD); 20830 break; 20831 20832 case DKIOCSTATE: { 20833 enum dkio_state state; 20834 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20835 20836 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20837 err = EFAULT; 20838 } else { 20839 err = sd_check_media(dev, state); 20840 if (err == 0) { 20841 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20842 sizeof (int), flag) != 0) 20843 err = EFAULT; 20844 } 20845 } 20846 break; 20847 } 20848 20849 case DKIOCREMOVABLE: 20850 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20851 if (ISREMOVABLE(un)) { 20852 i = 1; 20853 } else { 20854 i = 0; 20855 } 20856 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20857 err = EFAULT; 20858 } else { 20859 err = 0; 20860 } 20861 break; 20862 20863 case DKIOCGTEMPERATURE: 20864 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20865 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20866 break; 20867 20868 case MHIOCENFAILFAST: 20869 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20870 if ((err = drv_priv(cred_p)) == 0) { 20871 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20872 } 20873 break; 20874 20875 case MHIOCTKOWN: 20876 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20877 if ((err = drv_priv(cred_p)) == 0) { 20878 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20879 } 20880 break; 20881 20882 case MHIOCRELEASE: 20883 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20884 if ((err = drv_priv(cred_p)) == 0) { 20885 err = sd_mhdioc_release(dev); 20886 } 20887 break; 20888 20889 case MHIOCSTATUS: 20890 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20891 if ((err = drv_priv(cred_p)) == 0) { 20892 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20893 case 0: 20894 err = 0; 20895 break; 20896 case EACCES: 20897 *rval_p = 1; 20898 err = 0; 20899 break; 20900 default: 20901 err = EIO; 20902 break; 20903 } 20904 } 20905 break; 20906 20907 case MHIOCQRESERVE: 20908 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20909 if ((err = drv_priv(cred_p)) == 0) { 20910 err = sd_reserve_release(dev, SD_RESERVE); 20911 } 20912 break; 20913 20914 case MHIOCREREGISTERDEVID: 20915 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20916 if (drv_priv(cred_p) == EPERM) { 20917 err = EPERM; 20918 } else if (ISREMOVABLE(un) || ISCD(un)) { 20919 err = ENOTTY; 20920 } else { 20921 err = sd_mhdioc_register_devid(dev); 20922 } 20923 break; 20924 20925 case MHIOCGRP_INKEYS: 20926 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20927 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20928 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20929 err = ENOTSUP; 20930 } else { 20931 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20932 flag); 20933 } 20934 } 20935 break; 20936 20937 case MHIOCGRP_INRESV: 20938 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20939 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20940 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20941 err = ENOTSUP; 20942 } else { 20943 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20944 } 20945 } 20946 break; 20947 20948 case MHIOCGRP_REGISTER: 20949 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20950 if ((err = drv_priv(cred_p)) != EPERM) { 20951 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20952 err = ENOTSUP; 20953 } else if (arg != NULL) { 20954 mhioc_register_t reg; 20955 if (ddi_copyin((void *)arg, ®, 20956 sizeof (mhioc_register_t), flag) != 0) { 20957 err = EFAULT; 20958 } else { 20959 err = 20960 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20961 un, SD_SCSI3_REGISTER, 20962 (uchar_t *)®); 20963 } 20964 } 20965 } 20966 break; 20967 20968 case MHIOCGRP_RESERVE: 20969 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20970 if ((err = drv_priv(cred_p)) != EPERM) { 20971 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20972 err = ENOTSUP; 20973 } else if (arg != NULL) { 20974 mhioc_resv_desc_t resv_desc; 20975 if (ddi_copyin((void *)arg, &resv_desc, 20976 sizeof (mhioc_resv_desc_t), flag) != 0) { 20977 err = EFAULT; 20978 } else { 20979 err = 20980 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20981 un, SD_SCSI3_RESERVE, 20982 (uchar_t *)&resv_desc); 20983 } 20984 } 20985 } 20986 break; 20987 20988 case MHIOCGRP_PREEMPTANDABORT: 20989 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20990 if ((err = drv_priv(cred_p)) != EPERM) { 20991 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20992 err = ENOTSUP; 20993 } else if (arg != NULL) { 20994 mhioc_preemptandabort_t preempt_abort; 20995 if (ddi_copyin((void *)arg, &preempt_abort, 20996 sizeof (mhioc_preemptandabort_t), 20997 flag) != 0) { 20998 err = EFAULT; 20999 } else { 21000 err = 21001 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21002 un, SD_SCSI3_PREEMPTANDABORT, 21003 (uchar_t *)&preempt_abort); 21004 } 21005 } 21006 } 21007 break; 21008 21009 case MHIOCGRP_REGISTERANDIGNOREKEY: 21010 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 21011 if ((err = drv_priv(cred_p)) != EPERM) { 21012 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 21013 err = ENOTSUP; 21014 } else if (arg != NULL) { 21015 mhioc_registerandignorekey_t r_and_i; 21016 if (ddi_copyin((void *)arg, (void *)&r_and_i, 21017 sizeof (mhioc_registerandignorekey_t), 21018 flag) != 0) { 21019 err = EFAULT; 21020 } else { 21021 err = 21022 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21023 un, SD_SCSI3_REGISTERANDIGNOREKEY, 21024 (uchar_t *)&r_and_i); 21025 } 21026 } 21027 } 21028 break; 21029 21030 case USCSICMD: 21031 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21032 cr = ddi_get_cred(); 21033 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21034 err = EPERM; 21035 } else { 21036 err = sd_uscsi_ioctl(dev, (caddr_t)arg, flag); 21037 } 21038 break; 21039 21040 case CDROMPAUSE: 21041 case CDROMRESUME: 21042 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21043 if (!ISCD(un)) { 21044 err = ENOTTY; 21045 } else { 21046 err = sr_pause_resume(dev, cmd); 21047 } 21048 break; 21049 21050 case CDROMPLAYMSF: 21051 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21052 if (!ISCD(un)) { 21053 err = ENOTTY; 21054 } else { 21055 err = sr_play_msf(dev, (caddr_t)arg, flag); 21056 } 21057 break; 21058 21059 case CDROMPLAYTRKIND: 21060 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21061 #if defined(__i386) || defined(__amd64) 21062 /* 21063 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21064 */ 21065 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21066 #else 21067 if (!ISCD(un)) { 21068 #endif 21069 err = ENOTTY; 21070 } else { 21071 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21072 } 21073 break; 21074 21075 case CDROMREADTOCHDR: 21076 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21077 if (!ISCD(un)) { 21078 err = ENOTTY; 21079 } else { 21080 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21081 } 21082 break; 21083 21084 case CDROMREADTOCENTRY: 21085 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21086 if (!ISCD(un)) { 21087 err = ENOTTY; 21088 } else { 21089 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21090 } 21091 break; 21092 21093 case CDROMSTOP: 21094 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21095 if (!ISCD(un)) { 21096 err = ENOTTY; 21097 } else { 21098 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 21099 SD_PATH_STANDARD); 21100 } 21101 break; 21102 21103 case CDROMSTART: 21104 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21105 if (!ISCD(un)) { 21106 err = ENOTTY; 21107 } else { 21108 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 21109 SD_PATH_STANDARD); 21110 } 21111 break; 21112 21113 case CDROMCLOSETRAY: 21114 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21115 if (!ISCD(un)) { 21116 err = ENOTTY; 21117 } else { 21118 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 21119 SD_PATH_STANDARD); 21120 } 21121 break; 21122 21123 case FDEJECT: /* for eject command */ 21124 case DKIOCEJECT: 21125 case CDROMEJECT: 21126 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21127 if (!ISREMOVABLE(un)) { 21128 err = ENOTTY; 21129 } else { 21130 err = sr_eject(dev); 21131 } 21132 break; 21133 21134 case CDROMVOLCTRL: 21135 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21136 if (!ISCD(un)) { 21137 err = ENOTTY; 21138 } else { 21139 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21140 } 21141 break; 21142 21143 case CDROMSUBCHNL: 21144 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21145 if (!ISCD(un)) { 21146 err = ENOTTY; 21147 } else { 21148 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21149 } 21150 break; 21151 21152 case CDROMREADMODE2: 21153 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21154 if (!ISCD(un)) { 21155 err = ENOTTY; 21156 } else if (un->un_f_cfg_is_atapi == TRUE) { 21157 /* 21158 * If the drive supports READ CD, use that instead of 21159 * switching the LBA size via a MODE SELECT 21160 * Block Descriptor 21161 */ 21162 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21163 } else { 21164 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21165 } 21166 break; 21167 21168 case CDROMREADMODE1: 21169 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21170 if (!ISCD(un)) { 21171 err = ENOTTY; 21172 } else { 21173 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21174 } 21175 break; 21176 21177 case CDROMREADOFFSET: 21178 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21179 if (!ISCD(un)) { 21180 err = ENOTTY; 21181 } else { 21182 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21183 flag); 21184 } 21185 break; 21186 21187 case CDROMSBLKMODE: 21188 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21189 /* 21190 * There is no means of changing block size in case of atapi 21191 * drives, thus return ENOTTY if drive type is atapi 21192 */ 21193 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21194 err = ENOTTY; 21195 } else if (un->un_f_mmc_cap == TRUE) { 21196 21197 /* 21198 * MMC Devices do not support changing the 21199 * logical block size 21200 * 21201 * Note: EINVAL is being returned instead of ENOTTY to 21202 * maintain consistancy with the original mmc 21203 * driver update. 21204 */ 21205 err = EINVAL; 21206 } else { 21207 mutex_enter(SD_MUTEX(un)); 21208 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21209 (un->un_ncmds_in_transport > 0)) { 21210 mutex_exit(SD_MUTEX(un)); 21211 err = EINVAL; 21212 } else { 21213 mutex_exit(SD_MUTEX(un)); 21214 err = sr_change_blkmode(dev, cmd, arg, flag); 21215 } 21216 } 21217 break; 21218 21219 case CDROMGBLKMODE: 21220 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21221 if (!ISCD(un)) { 21222 err = ENOTTY; 21223 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21224 (un->un_f_blockcount_is_valid != FALSE)) { 21225 /* 21226 * Drive is an ATAPI drive so return target block 21227 * size for ATAPI drives since we cannot change the 21228 * blocksize on ATAPI drives. Used primarily to detect 21229 * if an ATAPI cdrom is present. 21230 */ 21231 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21232 sizeof (int), flag) != 0) { 21233 err = EFAULT; 21234 } else { 21235 err = 0; 21236 } 21237 21238 } else { 21239 /* 21240 * Drive supports changing block sizes via a Mode 21241 * Select. 21242 */ 21243 err = sr_change_blkmode(dev, cmd, arg, flag); 21244 } 21245 break; 21246 21247 case CDROMGDRVSPEED: 21248 case CDROMSDRVSPEED: 21249 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21250 if (!ISCD(un)) { 21251 err = ENOTTY; 21252 } else if (un->un_f_mmc_cap == TRUE) { 21253 /* 21254 * Note: In the future the driver implementation 21255 * for getting and 21256 * setting cd speed should entail: 21257 * 1) If non-mmc try the Toshiba mode page 21258 * (sr_change_speed) 21259 * 2) If mmc but no support for Real Time Streaming try 21260 * the SET CD SPEED (0xBB) command 21261 * (sr_atapi_change_speed) 21262 * 3) If mmc and support for Real Time Streaming 21263 * try the GET PERFORMANCE and SET STREAMING 21264 * commands (not yet implemented, 4380808) 21265 */ 21266 /* 21267 * As per recent MMC spec, CD-ROM speed is variable 21268 * and changes with LBA. Since there is no such 21269 * things as drive speed now, fail this ioctl. 21270 * 21271 * Note: EINVAL is returned for consistancy of original 21272 * implementation which included support for getting 21273 * the drive speed of mmc devices but not setting 21274 * the drive speed. Thus EINVAL would be returned 21275 * if a set request was made for an mmc device. 21276 * We no longer support get or set speed for 21277 * mmc but need to remain consistant with regard 21278 * to the error code returned. 21279 */ 21280 err = EINVAL; 21281 } else if (un->un_f_cfg_is_atapi == TRUE) { 21282 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21283 } else { 21284 err = sr_change_speed(dev, cmd, arg, flag); 21285 } 21286 break; 21287 21288 case CDROMCDDA: 21289 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21290 if (!ISCD(un)) { 21291 err = ENOTTY; 21292 } else { 21293 err = sr_read_cdda(dev, (void *)arg, flag); 21294 } 21295 break; 21296 21297 case CDROMCDXA: 21298 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21299 if (!ISCD(un)) { 21300 err = ENOTTY; 21301 } else { 21302 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21303 } 21304 break; 21305 21306 case CDROMSUBCODE: 21307 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21308 if (!ISCD(un)) { 21309 err = ENOTTY; 21310 } else { 21311 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21312 } 21313 break; 21314 21315 case DKIOCPARTINFO: { 21316 /* 21317 * Return parameters describing the selected disk slice. 21318 * Note: this ioctl is for the intel platform only 21319 */ 21320 #if defined(__i386) || defined(__amd64) 21321 int part; 21322 21323 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21324 part = SDPART(dev); 21325 21326 /* don't check un_solaris_size for pN */ 21327 if (part < P0_RAW_DISK && un->un_solaris_size == 0) { 21328 err = EIO; 21329 } else { 21330 struct part_info p; 21331 21332 p.p_start = (daddr_t)un->un_offset[part]; 21333 p.p_length = (int)un->un_map[part].dkl_nblk; 21334 #ifdef _MULTI_DATAMODEL 21335 switch (ddi_model_convert_from(flag & FMODELS)) { 21336 case DDI_MODEL_ILP32: 21337 { 21338 struct part_info32 p32; 21339 21340 p32.p_start = (daddr32_t)p.p_start; 21341 p32.p_length = p.p_length; 21342 if (ddi_copyout(&p32, (void *)arg, 21343 sizeof (p32), flag)) 21344 err = EFAULT; 21345 break; 21346 } 21347 21348 case DDI_MODEL_NONE: 21349 { 21350 if (ddi_copyout(&p, (void *)arg, sizeof (p), 21351 flag)) 21352 err = EFAULT; 21353 break; 21354 } 21355 } 21356 #else /* ! _MULTI_DATAMODEL */ 21357 if (ddi_copyout(&p, (void *)arg, sizeof (p), flag)) 21358 err = EFAULT; 21359 #endif /* _MULTI_DATAMODEL */ 21360 } 21361 #else 21362 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21363 err = ENOTTY; 21364 #endif 21365 break; 21366 } 21367 21368 case DKIOCG_PHYGEOM: { 21369 /* Return the driver's notion of the media physical geometry */ 21370 #if defined(__i386) || defined(__amd64) 21371 struct dk_geom disk_geom; 21372 struct dk_geom *dkgp = &disk_geom; 21373 21374 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21375 mutex_enter(SD_MUTEX(un)); 21376 21377 if (un->un_g.dkg_nhead != 0 && 21378 un->un_g.dkg_nsect != 0) { 21379 /* 21380 * We succeeded in getting a geometry, but 21381 * right now it is being reported as just the 21382 * Solaris fdisk partition, just like for 21383 * DKIOCGGEOM. We need to change that to be 21384 * correct for the entire disk now. 21385 */ 21386 bcopy(&un->un_g, dkgp, sizeof (*dkgp)); 21387 dkgp->dkg_acyl = 0; 21388 dkgp->dkg_ncyl = un->un_blockcount / 21389 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21390 } else { 21391 bzero(dkgp, sizeof (struct dk_geom)); 21392 /* 21393 * This disk does not have a Solaris VTOC 21394 * so we must present a physical geometry 21395 * that will remain consistent regardless 21396 * of how the disk is used. This will ensure 21397 * that the geometry does not change regardless 21398 * of the fdisk partition type (ie. EFI, FAT32, 21399 * Solaris, etc). 21400 */ 21401 if (ISCD(un)) { 21402 dkgp->dkg_nhead = un->un_pgeom.g_nhead; 21403 dkgp->dkg_nsect = un->un_pgeom.g_nsect; 21404 dkgp->dkg_ncyl = un->un_pgeom.g_ncyl; 21405 dkgp->dkg_acyl = un->un_pgeom.g_acyl; 21406 } else { 21407 sd_convert_geometry(un->un_blockcount, dkgp); 21408 dkgp->dkg_acyl = 0; 21409 dkgp->dkg_ncyl = un->un_blockcount / 21410 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21411 } 21412 } 21413 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21414 21415 if (ddi_copyout(dkgp, (void *)arg, 21416 sizeof (struct dk_geom), flag)) { 21417 mutex_exit(SD_MUTEX(un)); 21418 err = EFAULT; 21419 } else { 21420 mutex_exit(SD_MUTEX(un)); 21421 err = 0; 21422 } 21423 #else 21424 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21425 err = ENOTTY; 21426 #endif 21427 break; 21428 } 21429 21430 case DKIOCG_VIRTGEOM: { 21431 /* Return the driver's notion of the media's logical geometry */ 21432 #if defined(__i386) || defined(__amd64) 21433 struct dk_geom disk_geom; 21434 struct dk_geom *dkgp = &disk_geom; 21435 21436 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21437 mutex_enter(SD_MUTEX(un)); 21438 /* 21439 * If there is no HBA geometry available, or 21440 * if the HBA returned us something that doesn't 21441 * really fit into an Int 13/function 8 geometry 21442 * result, just fail the ioctl. See PSARC 1998/313. 21443 */ 21444 if (un->un_lgeom.g_nhead == 0 || 21445 un->un_lgeom.g_nsect == 0 || 21446 un->un_lgeom.g_ncyl > 1024) { 21447 mutex_exit(SD_MUTEX(un)); 21448 err = EINVAL; 21449 } else { 21450 dkgp->dkg_ncyl = un->un_lgeom.g_ncyl; 21451 dkgp->dkg_acyl = un->un_lgeom.g_acyl; 21452 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21453 dkgp->dkg_nhead = un->un_lgeom.g_nhead; 21454 dkgp->dkg_nsect = un->un_lgeom.g_nsect; 21455 21456 if (ddi_copyout(dkgp, (void *)arg, 21457 sizeof (struct dk_geom), flag)) { 21458 mutex_exit(SD_MUTEX(un)); 21459 err = EFAULT; 21460 } else { 21461 mutex_exit(SD_MUTEX(un)); 21462 err = 0; 21463 } 21464 } 21465 #else 21466 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21467 err = ENOTTY; 21468 #endif 21469 break; 21470 } 21471 #ifdef SDDEBUG 21472 /* RESET/ABORTS testing ioctls */ 21473 case DKIOCRESET: { 21474 int reset_level; 21475 21476 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21477 err = EFAULT; 21478 } else { 21479 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21480 "reset_level = 0x%lx\n", reset_level); 21481 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21482 err = 0; 21483 } else { 21484 err = EIO; 21485 } 21486 } 21487 break; 21488 } 21489 21490 case DKIOCABORT: 21491 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21492 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21493 err = 0; 21494 } else { 21495 err = EIO; 21496 } 21497 break; 21498 #endif 21499 21500 #ifdef SD_FAULT_INJECTION 21501 /* SDIOC FaultInjection testing ioctls */ 21502 case SDIOCSTART: 21503 case SDIOCSTOP: 21504 case SDIOCINSERTPKT: 21505 case SDIOCINSERTXB: 21506 case SDIOCINSERTUN: 21507 case SDIOCINSERTARQ: 21508 case SDIOCPUSH: 21509 case SDIOCRETRIEVE: 21510 case SDIOCRUN: 21511 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21512 "SDIOC detected cmd:0x%X:\n", cmd); 21513 /* call error generator */ 21514 sd_faultinjection_ioctl(cmd, arg, un); 21515 err = 0; 21516 break; 21517 21518 #endif /* SD_FAULT_INJECTION */ 21519 21520 default: 21521 err = ENOTTY; 21522 break; 21523 } 21524 mutex_enter(SD_MUTEX(un)); 21525 un->un_ncmds_in_driver--; 21526 ASSERT(un->un_ncmds_in_driver >= 0); 21527 mutex_exit(SD_MUTEX(un)); 21528 21529 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21530 return (err); 21531 } 21532 21533 21534 /* 21535 * Function: sd_uscsi_ioctl 21536 * 21537 * Description: This routine is the driver entry point for handling USCSI ioctl 21538 * requests (USCSICMD). 21539 * 21540 * Arguments: dev - the device number 21541 * arg - user provided scsi command 21542 * flag - this argument is a pass through to ddi_copyxxx() 21543 * directly from the mode argument of ioctl(). 21544 * 21545 * Return Code: code returned by sd_send_scsi_cmd 21546 * ENXIO 21547 * EFAULT 21548 * EAGAIN 21549 */ 21550 21551 static int 21552 sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag) 21553 { 21554 #ifdef _MULTI_DATAMODEL 21555 /* 21556 * For use when a 32 bit app makes a call into a 21557 * 64 bit ioctl 21558 */ 21559 struct uscsi_cmd32 uscsi_cmd_32_for_64; 21560 struct uscsi_cmd32 *ucmd32 = &uscsi_cmd_32_for_64; 21561 model_t model; 21562 #endif /* _MULTI_DATAMODEL */ 21563 struct uscsi_cmd *scmd = NULL; 21564 struct sd_lun *un = NULL; 21565 enum uio_seg uioseg; 21566 char cdb[CDB_GROUP0]; 21567 int rval = 0; 21568 21569 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21570 return (ENXIO); 21571 } 21572 21573 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: entry: un:0x%p\n", un); 21574 21575 scmd = (struct uscsi_cmd *) 21576 kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21577 21578 #ifdef _MULTI_DATAMODEL 21579 switch (model = ddi_model_convert_from(flag & FMODELS)) { 21580 case DDI_MODEL_ILP32: 21581 { 21582 if (ddi_copyin((void *)arg, ucmd32, sizeof (*ucmd32), flag)) { 21583 rval = EFAULT; 21584 goto done; 21585 } 21586 /* 21587 * Convert the ILP32 uscsi data from the 21588 * application to LP64 for internal use. 21589 */ 21590 uscsi_cmd32touscsi_cmd(ucmd32, scmd); 21591 break; 21592 } 21593 case DDI_MODEL_NONE: 21594 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21595 rval = EFAULT; 21596 goto done; 21597 } 21598 break; 21599 } 21600 #else /* ! _MULTI_DATAMODEL */ 21601 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21602 rval = EFAULT; 21603 goto done; 21604 } 21605 #endif /* _MULTI_DATAMODEL */ 21606 21607 scmd->uscsi_flags &= ~USCSI_NOINTR; 21608 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE; 21609 if (un->un_f_format_in_progress == TRUE) { 21610 rval = EAGAIN; 21611 goto done; 21612 } 21613 21614 /* 21615 * Gotta do the ddi_copyin() here on the uscsi_cdb so that 21616 * we will have a valid cdb[0] to test. 21617 */ 21618 if ((ddi_copyin(scmd->uscsi_cdb, cdb, CDB_GROUP0, flag) == 0) && 21619 (cdb[0] == SCMD_FORMAT)) { 21620 SD_TRACE(SD_LOG_IOCTL, un, 21621 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21622 mutex_enter(SD_MUTEX(un)); 21623 un->un_f_format_in_progress = TRUE; 21624 mutex_exit(SD_MUTEX(un)); 21625 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21626 SD_PATH_STANDARD); 21627 mutex_enter(SD_MUTEX(un)); 21628 un->un_f_format_in_progress = FALSE; 21629 mutex_exit(SD_MUTEX(un)); 21630 } else { 21631 SD_TRACE(SD_LOG_IOCTL, un, 21632 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21633 /* 21634 * It's OK to fall into here even if the ddi_copyin() 21635 * on the uscsi_cdb above fails, because sd_send_scsi_cmd() 21636 * does this same copyin and will return the EFAULT 21637 * if it fails. 21638 */ 21639 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21640 SD_PATH_STANDARD); 21641 } 21642 #ifdef _MULTI_DATAMODEL 21643 switch (model) { 21644 case DDI_MODEL_ILP32: 21645 /* 21646 * Convert back to ILP32 before copyout to the 21647 * application 21648 */ 21649 uscsi_cmdtouscsi_cmd32(scmd, ucmd32); 21650 if (ddi_copyout(ucmd32, (void *)arg, sizeof (*ucmd32), flag)) { 21651 if (rval != 0) { 21652 rval = EFAULT; 21653 } 21654 } 21655 break; 21656 case DDI_MODEL_NONE: 21657 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21658 if (rval != 0) { 21659 rval = EFAULT; 21660 } 21661 } 21662 break; 21663 } 21664 #else /* ! _MULTI_DATAMODE */ 21665 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21666 if (rval != 0) { 21667 rval = EFAULT; 21668 } 21669 } 21670 #endif /* _MULTI_DATAMODE */ 21671 done: 21672 kmem_free(scmd, sizeof (struct uscsi_cmd)); 21673 21674 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: exit: un:0x%p\n", un); 21675 21676 return (rval); 21677 } 21678 21679 21680 /* 21681 * Function: sd_dkio_ctrl_info 21682 * 21683 * Description: This routine is the driver entry point for handling controller 21684 * information ioctl requests (DKIOCINFO). 21685 * 21686 * Arguments: dev - the device number 21687 * arg - pointer to user provided dk_cinfo structure 21688 * specifying the controller type and attributes. 21689 * flag - this argument is a pass through to ddi_copyxxx() 21690 * directly from the mode argument of ioctl(). 21691 * 21692 * Return Code: 0 21693 * EFAULT 21694 * ENXIO 21695 */ 21696 21697 static int 21698 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21699 { 21700 struct sd_lun *un = NULL; 21701 struct dk_cinfo *info; 21702 dev_info_t *pdip; 21703 int lun, tgt; 21704 21705 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21706 return (ENXIO); 21707 } 21708 21709 info = (struct dk_cinfo *) 21710 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21711 21712 switch (un->un_ctype) { 21713 case CTYPE_CDROM: 21714 info->dki_ctype = DKC_CDROM; 21715 break; 21716 default: 21717 info->dki_ctype = DKC_SCSI_CCS; 21718 break; 21719 } 21720 pdip = ddi_get_parent(SD_DEVINFO(un)); 21721 info->dki_cnum = ddi_get_instance(pdip); 21722 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21723 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21724 } else { 21725 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21726 DK_DEVLEN - 1); 21727 } 21728 21729 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21730 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21731 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21732 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21733 21734 /* Unit Information */ 21735 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21736 info->dki_slave = ((tgt << 3) | lun); 21737 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21738 DK_DEVLEN - 1); 21739 info->dki_flags = DKI_FMTVOL; 21740 info->dki_partition = SDPART(dev); 21741 21742 /* Max Transfer size of this device in blocks */ 21743 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21744 info->dki_addr = 0; 21745 info->dki_space = 0; 21746 info->dki_prio = 0; 21747 info->dki_vec = 0; 21748 21749 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21750 kmem_free(info, sizeof (struct dk_cinfo)); 21751 return (EFAULT); 21752 } else { 21753 kmem_free(info, sizeof (struct dk_cinfo)); 21754 return (0); 21755 } 21756 } 21757 21758 21759 /* 21760 * Function: sd_get_media_info 21761 * 21762 * Description: This routine is the driver entry point for handling ioctl 21763 * requests for the media type or command set profile used by the 21764 * drive to operate on the media (DKIOCGMEDIAINFO). 21765 * 21766 * Arguments: dev - the device number 21767 * arg - pointer to user provided dk_minfo structure 21768 * specifying the media type, logical block size and 21769 * drive capacity. 21770 * flag - this argument is a pass through to ddi_copyxxx() 21771 * directly from the mode argument of ioctl(). 21772 * 21773 * Return Code: 0 21774 * EACCESS 21775 * EFAULT 21776 * ENXIO 21777 * EIO 21778 */ 21779 21780 static int 21781 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21782 { 21783 struct sd_lun *un = NULL; 21784 struct uscsi_cmd com; 21785 struct scsi_inquiry *sinq; 21786 struct dk_minfo media_info; 21787 u_longlong_t media_capacity; 21788 uint64_t capacity; 21789 uint_t lbasize; 21790 uchar_t *out_data; 21791 uchar_t *rqbuf; 21792 int rval = 0; 21793 int rtn; 21794 21795 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21796 (un->un_state == SD_STATE_OFFLINE)) { 21797 return (ENXIO); 21798 } 21799 21800 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21801 21802 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21803 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21804 21805 /* Issue a TUR to determine if the drive is ready with media present */ 21806 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21807 if (rval == ENXIO) { 21808 goto done; 21809 } 21810 21811 /* Now get configuration data */ 21812 if (ISCD(un)) { 21813 media_info.dki_media_type = DK_CDROM; 21814 21815 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21816 if (un->un_f_mmc_cap == TRUE) { 21817 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21818 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN); 21819 21820 if (rtn) { 21821 /* 21822 * Failed for other than an illegal request 21823 * or command not supported 21824 */ 21825 if ((com.uscsi_status == STATUS_CHECK) && 21826 (com.uscsi_rqstatus == STATUS_GOOD)) { 21827 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21828 (rqbuf[12] != 0x20)) { 21829 rval = EIO; 21830 goto done; 21831 } 21832 } 21833 } else { 21834 /* 21835 * The GET CONFIGURATION command succeeded 21836 * so set the media type according to the 21837 * returned data 21838 */ 21839 media_info.dki_media_type = out_data[6]; 21840 media_info.dki_media_type <<= 8; 21841 media_info.dki_media_type |= out_data[7]; 21842 } 21843 } 21844 } else { 21845 /* 21846 * The profile list is not available, so we attempt to identify 21847 * the media type based on the inquiry data 21848 */ 21849 sinq = un->un_sd->sd_inq; 21850 if (sinq->inq_qual == 0) { 21851 /* This is a direct access device */ 21852 media_info.dki_media_type = DK_FIXED_DISK; 21853 21854 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21855 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21856 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21857 media_info.dki_media_type = DK_ZIP; 21858 } else if ( 21859 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21860 media_info.dki_media_type = DK_JAZ; 21861 } 21862 } 21863 } else { 21864 /* Not a CD or direct access so return unknown media */ 21865 media_info.dki_media_type = DK_UNKNOWN; 21866 } 21867 } 21868 21869 /* Now read the capacity so we can provide the lbasize and capacity */ 21870 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21871 SD_PATH_DIRECT)) { 21872 case 0: 21873 break; 21874 case EACCES: 21875 rval = EACCES; 21876 goto done; 21877 default: 21878 rval = EIO; 21879 goto done; 21880 } 21881 21882 media_info.dki_lbsize = lbasize; 21883 media_capacity = capacity; 21884 21885 /* 21886 * sd_send_scsi_READ_CAPACITY() reports capacity in 21887 * un->un_sys_blocksize chunks. So we need to convert it into 21888 * cap.lbasize chunks. 21889 */ 21890 media_capacity *= un->un_sys_blocksize; 21891 media_capacity /= lbasize; 21892 media_info.dki_capacity = media_capacity; 21893 21894 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21895 rval = EFAULT; 21896 /* Put goto. Anybody might add some code below in future */ 21897 goto done; 21898 } 21899 done: 21900 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21901 kmem_free(rqbuf, SENSE_LENGTH); 21902 return (rval); 21903 } 21904 21905 21906 /* 21907 * Function: sd_dkio_get_geometry 21908 * 21909 * Description: This routine is the driver entry point for handling user 21910 * requests to get the device geometry (DKIOCGGEOM). 21911 * 21912 * Arguments: dev - the device number 21913 * arg - pointer to user provided dk_geom structure specifying 21914 * the controller's notion of the current geometry. 21915 * flag - this argument is a pass through to ddi_copyxxx() 21916 * directly from the mode argument of ioctl(). 21917 * geom_validated - flag indicating if the device geometry has been 21918 * previously validated in the sdioctl routine. 21919 * 21920 * Return Code: 0 21921 * EFAULT 21922 * ENXIO 21923 * EIO 21924 */ 21925 21926 static int 21927 sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, int geom_validated) 21928 { 21929 struct sd_lun *un = NULL; 21930 struct dk_geom *tmp_geom = NULL; 21931 int rval = 0; 21932 21933 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21934 return (ENXIO); 21935 } 21936 21937 #if defined(__i386) || defined(__amd64) 21938 if (un->un_solaris_size == 0) { 21939 return (EIO); 21940 } 21941 #endif 21942 if (geom_validated == FALSE) { 21943 /* 21944 * sd_validate_geometry does not spin a disk up 21945 * if it was spun down. We need to make sure it 21946 * is ready. 21947 */ 21948 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21949 return (rval); 21950 } 21951 mutex_enter(SD_MUTEX(un)); 21952 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 21953 mutex_exit(SD_MUTEX(un)); 21954 } 21955 if (rval) 21956 return (rval); 21957 21958 /* 21959 * Make a local copy of the soft state geometry to avoid some potential 21960 * race conditions associated with holding the mutex and updating the 21961 * write_reinstruct value 21962 */ 21963 tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21964 mutex_enter(SD_MUTEX(un)); 21965 bcopy(&un->un_g, tmp_geom, sizeof (struct dk_geom)); 21966 mutex_exit(SD_MUTEX(un)); 21967 21968 if (tmp_geom->dkg_write_reinstruct == 0) { 21969 tmp_geom->dkg_write_reinstruct = 21970 (int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm * 21971 sd_rot_delay) / (int)60000); 21972 } 21973 21974 rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom), 21975 flag); 21976 if (rval != 0) { 21977 rval = EFAULT; 21978 } 21979 21980 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21981 return (rval); 21982 21983 } 21984 21985 21986 /* 21987 * Function: sd_dkio_set_geometry 21988 * 21989 * Description: This routine is the driver entry point for handling user 21990 * requests to set the device geometry (DKIOCSGEOM). The actual 21991 * device geometry is not updated, just the driver "notion" of it. 21992 * 21993 * Arguments: dev - the device number 21994 * arg - pointer to user provided dk_geom structure used to set 21995 * the controller's notion of the current geometry. 21996 * flag - this argument is a pass through to ddi_copyxxx() 21997 * directly from the mode argument of ioctl(). 21998 * 21999 * Return Code: 0 22000 * EFAULT 22001 * ENXIO 22002 * EIO 22003 */ 22004 22005 static int 22006 sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag) 22007 { 22008 struct sd_lun *un = NULL; 22009 struct dk_geom *tmp_geom; 22010 struct dk_map *lp; 22011 int rval = 0; 22012 int i; 22013 22014 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22015 return (ENXIO); 22016 } 22017 22018 #if defined(__i386) || defined(__amd64) 22019 if (un->un_solaris_size == 0) { 22020 return (EIO); 22021 } 22022 #endif 22023 /* 22024 * We need to copy the user specified geometry into local 22025 * storage and then update the softstate. We don't want to hold 22026 * the mutex and copyin directly from the user to the soft state 22027 */ 22028 tmp_geom = (struct dk_geom *) 22029 kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 22030 rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag); 22031 if (rval != 0) { 22032 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22033 return (EFAULT); 22034 } 22035 22036 mutex_enter(SD_MUTEX(un)); 22037 bcopy(tmp_geom, &un->un_g, sizeof (struct dk_geom)); 22038 for (i = 0; i < NDKMAP; i++) { 22039 lp = &un->un_map[i]; 22040 un->un_offset[i] = 22041 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22042 #if defined(__i386) || defined(__amd64) 22043 un->un_offset[i] += un->un_solaris_offset; 22044 #endif 22045 } 22046 un->un_f_geometry_is_valid = FALSE; 22047 mutex_exit(SD_MUTEX(un)); 22048 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22049 22050 return (rval); 22051 } 22052 22053 22054 /* 22055 * Function: sd_dkio_get_partition 22056 * 22057 * Description: This routine is the driver entry point for handling user 22058 * requests to get the partition table (DKIOCGAPART). 22059 * 22060 * Arguments: dev - the device number 22061 * arg - pointer to user provided dk_allmap structure specifying 22062 * the controller's notion of the current partition table. 22063 * flag - this argument is a pass through to ddi_copyxxx() 22064 * directly from the mode argument of ioctl(). 22065 * geom_validated - flag indicating if the device geometry has been 22066 * previously validated in the sdioctl routine. 22067 * 22068 * Return Code: 0 22069 * EFAULT 22070 * ENXIO 22071 * EIO 22072 */ 22073 22074 static int 22075 sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, int geom_validated) 22076 { 22077 struct sd_lun *un = NULL; 22078 int rval = 0; 22079 int size; 22080 22081 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22082 return (ENXIO); 22083 } 22084 22085 #if defined(__i386) || defined(__amd64) 22086 if (un->un_solaris_size == 0) { 22087 return (EIO); 22088 } 22089 #endif 22090 /* 22091 * Make sure the geometry is valid before getting the partition 22092 * information. 22093 */ 22094 mutex_enter(SD_MUTEX(un)); 22095 if (geom_validated == FALSE) { 22096 /* 22097 * sd_validate_geometry does not spin a disk up 22098 * if it was spun down. We need to make sure it 22099 * is ready before validating the geometry. 22100 */ 22101 mutex_exit(SD_MUTEX(un)); 22102 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22103 return (rval); 22104 } 22105 mutex_enter(SD_MUTEX(un)); 22106 22107 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22108 mutex_exit(SD_MUTEX(un)); 22109 return (rval); 22110 } 22111 } 22112 mutex_exit(SD_MUTEX(un)); 22113 22114 #ifdef _MULTI_DATAMODEL 22115 switch (ddi_model_convert_from(flag & FMODELS)) { 22116 case DDI_MODEL_ILP32: { 22117 struct dk_map32 dk_map32[NDKMAP]; 22118 int i; 22119 22120 for (i = 0; i < NDKMAP; i++) { 22121 dk_map32[i].dkl_cylno = un->un_map[i].dkl_cylno; 22122 dk_map32[i].dkl_nblk = un->un_map[i].dkl_nblk; 22123 } 22124 size = NDKMAP * sizeof (struct dk_map32); 22125 rval = ddi_copyout(dk_map32, (void *)arg, size, flag); 22126 if (rval != 0) { 22127 rval = EFAULT; 22128 } 22129 break; 22130 } 22131 case DDI_MODEL_NONE: 22132 size = NDKMAP * sizeof (struct dk_map); 22133 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22134 if (rval != 0) { 22135 rval = EFAULT; 22136 } 22137 break; 22138 } 22139 #else /* ! _MULTI_DATAMODEL */ 22140 size = NDKMAP * sizeof (struct dk_map); 22141 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22142 if (rval != 0) { 22143 rval = EFAULT; 22144 } 22145 #endif /* _MULTI_DATAMODEL */ 22146 return (rval); 22147 } 22148 22149 22150 /* 22151 * Function: sd_dkio_set_partition 22152 * 22153 * Description: This routine is the driver entry point for handling user 22154 * requests to set the partition table (DKIOCSAPART). The actual 22155 * device partition is not updated. 22156 * 22157 * Arguments: dev - the device number 22158 * arg - pointer to user provided dk_allmap structure used to set 22159 * the controller's notion of the partition table. 22160 * flag - this argument is a pass through to ddi_copyxxx() 22161 * directly from the mode argument of ioctl(). 22162 * 22163 * Return Code: 0 22164 * EINVAL 22165 * EFAULT 22166 * ENXIO 22167 * EIO 22168 */ 22169 22170 static int 22171 sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag) 22172 { 22173 struct sd_lun *un = NULL; 22174 struct dk_map dk_map[NDKMAP]; 22175 struct dk_map *lp; 22176 int rval = 0; 22177 int size; 22178 int i; 22179 #if defined(_SUNOS_VTOC_16) 22180 struct dkl_partition *vp; 22181 #endif 22182 22183 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22184 return (ENXIO); 22185 } 22186 22187 /* 22188 * Set the map for all logical partitions. We lock 22189 * the priority just to make sure an interrupt doesn't 22190 * come in while the map is half updated. 22191 */ 22192 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_solaris_size)) 22193 mutex_enter(SD_MUTEX(un)); 22194 if (un->un_blockcount > DK_MAX_BLOCKS) { 22195 mutex_exit(SD_MUTEX(un)); 22196 return (ENOTSUP); 22197 } 22198 mutex_exit(SD_MUTEX(un)); 22199 if (un->un_solaris_size == 0) { 22200 return (EIO); 22201 } 22202 22203 #ifdef _MULTI_DATAMODEL 22204 switch (ddi_model_convert_from(flag & FMODELS)) { 22205 case DDI_MODEL_ILP32: { 22206 struct dk_map32 dk_map32[NDKMAP]; 22207 22208 size = NDKMAP * sizeof (struct dk_map32); 22209 rval = ddi_copyin((void *)arg, dk_map32, size, flag); 22210 if (rval != 0) { 22211 return (EFAULT); 22212 } 22213 for (i = 0; i < NDKMAP; i++) { 22214 dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno; 22215 dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk; 22216 } 22217 break; 22218 } 22219 case DDI_MODEL_NONE: 22220 size = NDKMAP * sizeof (struct dk_map); 22221 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22222 if (rval != 0) { 22223 return (EFAULT); 22224 } 22225 break; 22226 } 22227 #else /* ! _MULTI_DATAMODEL */ 22228 size = NDKMAP * sizeof (struct dk_map); 22229 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22230 if (rval != 0) { 22231 return (EFAULT); 22232 } 22233 #endif /* _MULTI_DATAMODEL */ 22234 22235 mutex_enter(SD_MUTEX(un)); 22236 /* Note: The size used in this bcopy is set based upon the data model */ 22237 bcopy(dk_map, un->un_map, size); 22238 #if defined(_SUNOS_VTOC_16) 22239 vp = (struct dkl_partition *)&(un->un_vtoc); 22240 #endif /* defined(_SUNOS_VTOC_16) */ 22241 for (i = 0; i < NDKMAP; i++) { 22242 lp = &un->un_map[i]; 22243 un->un_offset[i] = 22244 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22245 #if defined(_SUNOS_VTOC_16) 22246 vp->p_start = un->un_offset[i]; 22247 vp->p_size = lp->dkl_nblk; 22248 vp++; 22249 #endif /* defined(_SUNOS_VTOC_16) */ 22250 #if defined(__i386) || defined(__amd64) 22251 un->un_offset[i] += un->un_solaris_offset; 22252 #endif 22253 } 22254 mutex_exit(SD_MUTEX(un)); 22255 return (rval); 22256 } 22257 22258 22259 /* 22260 * Function: sd_dkio_get_vtoc 22261 * 22262 * Description: This routine is the driver entry point for handling user 22263 * requests to get the current volume table of contents 22264 * (DKIOCGVTOC). 22265 * 22266 * Arguments: dev - the device number 22267 * arg - pointer to user provided vtoc structure specifying 22268 * the current vtoc. 22269 * flag - this argument is a pass through to ddi_copyxxx() 22270 * directly from the mode argument of ioctl(). 22271 * geom_validated - flag indicating if the device geometry has been 22272 * previously validated in the sdioctl routine. 22273 * 22274 * Return Code: 0 22275 * EFAULT 22276 * ENXIO 22277 * EIO 22278 */ 22279 22280 static int 22281 sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, int geom_validated) 22282 { 22283 struct sd_lun *un = NULL; 22284 #if defined(_SUNOS_VTOC_8) 22285 struct vtoc user_vtoc; 22286 #endif /* defined(_SUNOS_VTOC_8) */ 22287 int rval = 0; 22288 22289 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22290 return (ENXIO); 22291 } 22292 22293 mutex_enter(SD_MUTEX(un)); 22294 if (geom_validated == FALSE) { 22295 /* 22296 * sd_validate_geometry does not spin a disk up 22297 * if it was spun down. We need to make sure it 22298 * is ready. 22299 */ 22300 mutex_exit(SD_MUTEX(un)); 22301 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22302 return (rval); 22303 } 22304 mutex_enter(SD_MUTEX(un)); 22305 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22306 mutex_exit(SD_MUTEX(un)); 22307 return (rval); 22308 } 22309 } 22310 22311 #if defined(_SUNOS_VTOC_8) 22312 sd_build_user_vtoc(un, &user_vtoc); 22313 mutex_exit(SD_MUTEX(un)); 22314 22315 #ifdef _MULTI_DATAMODEL 22316 switch (ddi_model_convert_from(flag & FMODELS)) { 22317 case DDI_MODEL_ILP32: { 22318 struct vtoc32 user_vtoc32; 22319 22320 vtoctovtoc32(user_vtoc, user_vtoc32); 22321 if (ddi_copyout(&user_vtoc32, (void *)arg, 22322 sizeof (struct vtoc32), flag)) { 22323 return (EFAULT); 22324 } 22325 break; 22326 } 22327 22328 case DDI_MODEL_NONE: 22329 if (ddi_copyout(&user_vtoc, (void *)arg, 22330 sizeof (struct vtoc), flag)) { 22331 return (EFAULT); 22332 } 22333 break; 22334 } 22335 #else /* ! _MULTI_DATAMODEL */ 22336 if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) { 22337 return (EFAULT); 22338 } 22339 #endif /* _MULTI_DATAMODEL */ 22340 22341 #elif defined(_SUNOS_VTOC_16) 22342 mutex_exit(SD_MUTEX(un)); 22343 22344 #ifdef _MULTI_DATAMODEL 22345 /* 22346 * The un_vtoc structure is a "struct dk_vtoc" which is always 22347 * 32-bit to maintain compatibility with existing on-disk 22348 * structures. Thus, we need to convert the structure when copying 22349 * it out to a datamodel-dependent "struct vtoc" in a 64-bit 22350 * program. If the target is a 32-bit program, then no conversion 22351 * is necessary. 22352 */ 22353 /* LINTED: logical expression always true: op "||" */ 22354 ASSERT(sizeof (un->un_vtoc) == sizeof (struct vtoc32)); 22355 switch (ddi_model_convert_from(flag & FMODELS)) { 22356 case DDI_MODEL_ILP32: 22357 if (ddi_copyout(&(un->un_vtoc), (void *)arg, 22358 sizeof (un->un_vtoc), flag)) { 22359 return (EFAULT); 22360 } 22361 break; 22362 22363 case DDI_MODEL_NONE: { 22364 struct vtoc user_vtoc; 22365 22366 vtoc32tovtoc(un->un_vtoc, user_vtoc); 22367 if (ddi_copyout(&user_vtoc, (void *)arg, 22368 sizeof (struct vtoc), flag)) { 22369 return (EFAULT); 22370 } 22371 break; 22372 } 22373 } 22374 #else /* ! _MULTI_DATAMODEL */ 22375 if (ddi_copyout(&(un->un_vtoc), (void *)arg, sizeof (un->un_vtoc), 22376 flag)) { 22377 return (EFAULT); 22378 } 22379 #endif /* _MULTI_DATAMODEL */ 22380 #else 22381 #error "No VTOC format defined." 22382 #endif 22383 22384 return (rval); 22385 } 22386 22387 static int 22388 sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag) 22389 { 22390 struct sd_lun *un = NULL; 22391 dk_efi_t user_efi; 22392 int rval = 0; 22393 void *buffer; 22394 22395 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22396 return (ENXIO); 22397 22398 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22399 return (EFAULT); 22400 22401 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22402 22403 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22404 (user_efi.dki_length > un->un_max_xfer_size)) 22405 return (EINVAL); 22406 22407 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22408 rval = sd_send_scsi_READ(un, buffer, user_efi.dki_length, 22409 user_efi.dki_lba, SD_PATH_DIRECT); 22410 if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data, 22411 user_efi.dki_length, flag) != 0) 22412 rval = EFAULT; 22413 22414 kmem_free(buffer, user_efi.dki_length); 22415 return (rval); 22416 } 22417 22418 /* 22419 * Function: sd_build_user_vtoc 22420 * 22421 * Description: This routine populates a pass by reference variable with the 22422 * current volume table of contents. 22423 * 22424 * Arguments: un - driver soft state (unit) structure 22425 * user_vtoc - pointer to vtoc structure to be populated 22426 */ 22427 22428 static void 22429 sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22430 { 22431 struct dk_map2 *lpart; 22432 struct dk_map *lmap; 22433 struct partition *vpart; 22434 int nblks; 22435 int i; 22436 22437 ASSERT(mutex_owned(SD_MUTEX(un))); 22438 22439 /* 22440 * Return vtoc structure fields in the provided VTOC area, addressed 22441 * by *vtoc. 22442 */ 22443 bzero(user_vtoc, sizeof (struct vtoc)); 22444 user_vtoc->v_bootinfo[0] = un->un_vtoc.v_bootinfo[0]; 22445 user_vtoc->v_bootinfo[1] = un->un_vtoc.v_bootinfo[1]; 22446 user_vtoc->v_bootinfo[2] = un->un_vtoc.v_bootinfo[2]; 22447 user_vtoc->v_sanity = VTOC_SANE; 22448 user_vtoc->v_version = un->un_vtoc.v_version; 22449 bcopy(un->un_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL); 22450 user_vtoc->v_sectorsz = un->un_sys_blocksize; 22451 user_vtoc->v_nparts = un->un_vtoc.v_nparts; 22452 bcopy(un->un_vtoc.v_reserved, user_vtoc->v_reserved, 22453 sizeof (un->un_vtoc.v_reserved)); 22454 /* 22455 * Convert partitioning information. 22456 * 22457 * Note the conversion from starting cylinder number 22458 * to starting sector number. 22459 */ 22460 lmap = un->un_map; 22461 lpart = (struct dk_map2 *)un->un_vtoc.v_part; 22462 vpart = user_vtoc->v_part; 22463 22464 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22465 22466 for (i = 0; i < V_NUMPAR; i++) { 22467 vpart->p_tag = lpart->p_tag; 22468 vpart->p_flag = lpart->p_flag; 22469 vpart->p_start = lmap->dkl_cylno * nblks; 22470 vpart->p_size = lmap->dkl_nblk; 22471 lmap++; 22472 lpart++; 22473 vpart++; 22474 22475 /* (4364927) */ 22476 user_vtoc->timestamp[i] = (time_t)un->un_vtoc.v_timestamp[i]; 22477 } 22478 22479 bcopy(un->un_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII); 22480 } 22481 22482 static int 22483 sd_dkio_partition(dev_t dev, caddr_t arg, int flag) 22484 { 22485 struct sd_lun *un = NULL; 22486 struct partition64 p64; 22487 int rval = 0; 22488 uint_t nparts; 22489 efi_gpe_t *partitions; 22490 efi_gpt_t *buffer; 22491 diskaddr_t gpe_lba; 22492 22493 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22494 return (ENXIO); 22495 } 22496 22497 if (ddi_copyin((const void *)arg, &p64, 22498 sizeof (struct partition64), flag)) { 22499 return (EFAULT); 22500 } 22501 22502 buffer = kmem_alloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 22503 rval = sd_send_scsi_READ(un, buffer, DEV_BSIZE, 22504 1, SD_PATH_DIRECT); 22505 if (rval != 0) 22506 goto done_error; 22507 22508 sd_swap_efi_gpt(buffer); 22509 22510 if ((rval = sd_validate_efi(buffer)) != 0) 22511 goto done_error; 22512 22513 nparts = buffer->efi_gpt_NumberOfPartitionEntries; 22514 gpe_lba = buffer->efi_gpt_PartitionEntryLBA; 22515 if (p64.p_partno > nparts) { 22516 /* couldn't find it */ 22517 rval = ESRCH; 22518 goto done_error; 22519 } 22520 /* 22521 * if we're dealing with a partition that's out of the normal 22522 * 16K block, adjust accordingly 22523 */ 22524 gpe_lba += p64.p_partno / sizeof (efi_gpe_t); 22525 rval = sd_send_scsi_READ(un, buffer, EFI_MIN_ARRAY_SIZE, 22526 gpe_lba, SD_PATH_DIRECT); 22527 if (rval) { 22528 goto done_error; 22529 } 22530 partitions = (efi_gpe_t *)buffer; 22531 22532 sd_swap_efi_gpe(nparts, partitions); 22533 22534 partitions += p64.p_partno; 22535 bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type, 22536 sizeof (struct uuid)); 22537 p64.p_start = partitions->efi_gpe_StartingLBA; 22538 p64.p_size = partitions->efi_gpe_EndingLBA - 22539 p64.p_start + 1; 22540 22541 if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag)) 22542 rval = EFAULT; 22543 22544 done_error: 22545 kmem_free(buffer, EFI_MIN_ARRAY_SIZE); 22546 return (rval); 22547 } 22548 22549 22550 /* 22551 * Function: sd_dkio_set_vtoc 22552 * 22553 * Description: This routine is the driver entry point for handling user 22554 * requests to set the current volume table of contents 22555 * (DKIOCSVTOC). 22556 * 22557 * Arguments: dev - the device number 22558 * arg - pointer to user provided vtoc structure used to set the 22559 * current vtoc. 22560 * flag - this argument is a pass through to ddi_copyxxx() 22561 * directly from the mode argument of ioctl(). 22562 * 22563 * Return Code: 0 22564 * EFAULT 22565 * ENXIO 22566 * EINVAL 22567 * ENOTSUP 22568 */ 22569 22570 static int 22571 sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag) 22572 { 22573 struct sd_lun *un = NULL; 22574 struct vtoc user_vtoc; 22575 int rval = 0; 22576 22577 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22578 return (ENXIO); 22579 } 22580 22581 #if defined(__i386) || defined(__amd64) 22582 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 22583 return (EINVAL); 22584 } 22585 #endif 22586 22587 #ifdef _MULTI_DATAMODEL 22588 switch (ddi_model_convert_from(flag & FMODELS)) { 22589 case DDI_MODEL_ILP32: { 22590 struct vtoc32 user_vtoc32; 22591 22592 if (ddi_copyin((const void *)arg, &user_vtoc32, 22593 sizeof (struct vtoc32), flag)) { 22594 return (EFAULT); 22595 } 22596 vtoc32tovtoc(user_vtoc32, user_vtoc); 22597 break; 22598 } 22599 22600 case DDI_MODEL_NONE: 22601 if (ddi_copyin((const void *)arg, &user_vtoc, 22602 sizeof (struct vtoc), flag)) { 22603 return (EFAULT); 22604 } 22605 break; 22606 } 22607 #else /* ! _MULTI_DATAMODEL */ 22608 if (ddi_copyin((const void *)arg, &user_vtoc, 22609 sizeof (struct vtoc), flag)) { 22610 return (EFAULT); 22611 } 22612 #endif /* _MULTI_DATAMODEL */ 22613 22614 mutex_enter(SD_MUTEX(un)); 22615 if (un->un_blockcount > DK_MAX_BLOCKS) { 22616 mutex_exit(SD_MUTEX(un)); 22617 return (ENOTSUP); 22618 } 22619 if (un->un_g.dkg_ncyl == 0) { 22620 mutex_exit(SD_MUTEX(un)); 22621 return (EINVAL); 22622 } 22623 22624 mutex_exit(SD_MUTEX(un)); 22625 sd_clear_efi(un); 22626 ddi_remove_minor_node(SD_DEVINFO(un), "wd"); 22627 ddi_remove_minor_node(SD_DEVINFO(un), "wd,raw"); 22628 (void) ddi_create_minor_node(SD_DEVINFO(un), "h", 22629 S_IFBLK, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22630 un->un_node_type, NULL); 22631 (void) ddi_create_minor_node(SD_DEVINFO(un), "h,raw", 22632 S_IFCHR, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22633 un->un_node_type, NULL); 22634 mutex_enter(SD_MUTEX(un)); 22635 22636 if ((rval = sd_build_label_vtoc(un, &user_vtoc)) == 0) { 22637 if ((rval = sd_write_label(dev)) == 0) { 22638 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) 22639 != 0) { 22640 SD_ERROR(SD_LOG_IOCTL_DKIO, un, 22641 "sd_dkio_set_vtoc: " 22642 "Failed validate geometry\n"); 22643 } 22644 } 22645 } 22646 22647 /* 22648 * If sd_build_label_vtoc, or sd_write_label failed above write the 22649 * devid anyway, what can it hurt? Also preserve the device id by 22650 * writing to the disk acyl for the case where a devid has been 22651 * fabricated. 22652 */ 22653 if (!ISREMOVABLE(un) && !ISCD(un) && 22654 (un->un_f_opt_fab_devid == TRUE)) { 22655 if (un->un_devid == NULL) { 22656 sd_register_devid(un, SD_DEVINFO(un), 22657 SD_TARGET_IS_UNRESERVED); 22658 } else { 22659 /* 22660 * The device id for this disk has been 22661 * fabricated. Fabricated device id's are 22662 * managed by storing them in the last 2 22663 * available sectors on the drive. The device 22664 * id must be preserved by writing it back out 22665 * to this location. 22666 */ 22667 if (sd_write_deviceid(un) != 0) { 22668 ddi_devid_free(un->un_devid); 22669 un->un_devid = NULL; 22670 } 22671 } 22672 } 22673 mutex_exit(SD_MUTEX(un)); 22674 return (rval); 22675 } 22676 22677 22678 /* 22679 * Function: sd_build_label_vtoc 22680 * 22681 * Description: This routine updates the driver soft state current volume table 22682 * of contents based on a user specified vtoc. 22683 * 22684 * Arguments: un - driver soft state (unit) structure 22685 * user_vtoc - pointer to vtoc structure specifying vtoc to be used 22686 * to update the driver soft state. 22687 * 22688 * Return Code: 0 22689 * EINVAL 22690 */ 22691 22692 static int 22693 sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22694 { 22695 struct dk_map *lmap; 22696 struct partition *vpart; 22697 int nblks; 22698 #if defined(_SUNOS_VTOC_8) 22699 int ncyl; 22700 struct dk_map2 *lpart; 22701 #endif /* defined(_SUNOS_VTOC_8) */ 22702 int i; 22703 22704 ASSERT(mutex_owned(SD_MUTEX(un))); 22705 22706 /* Sanity-check the vtoc */ 22707 if (user_vtoc->v_sanity != VTOC_SANE || 22708 user_vtoc->v_sectorsz != un->un_sys_blocksize || 22709 user_vtoc->v_nparts != V_NUMPAR) { 22710 return (EINVAL); 22711 } 22712 22713 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22714 if (nblks == 0) { 22715 return (EINVAL); 22716 } 22717 22718 #if defined(_SUNOS_VTOC_8) 22719 vpart = user_vtoc->v_part; 22720 for (i = 0; i < V_NUMPAR; i++) { 22721 if ((vpart->p_start % nblks) != 0) { 22722 return (EINVAL); 22723 } 22724 ncyl = vpart->p_start / nblks; 22725 ncyl += vpart->p_size / nblks; 22726 if ((vpart->p_size % nblks) != 0) { 22727 ncyl++; 22728 } 22729 if (ncyl > (int)un->un_g.dkg_ncyl) { 22730 return (EINVAL); 22731 } 22732 vpart++; 22733 } 22734 #endif /* defined(_SUNOS_VTOC_8) */ 22735 22736 /* Put appropriate vtoc structure fields into the disk label */ 22737 #if defined(_SUNOS_VTOC_16) 22738 /* 22739 * The vtoc is always a 32bit data structure to maintain the 22740 * on-disk format. Convert "in place" instead of bcopying it. 22741 */ 22742 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(un->un_vtoc)))); 22743 22744 /* 22745 * in the 16-slice vtoc, starting sectors are expressed in 22746 * numbers *relative* to the start of the Solaris fdisk partition. 22747 */ 22748 lmap = un->un_map; 22749 vpart = user_vtoc->v_part; 22750 22751 for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) { 22752 lmap->dkl_cylno = vpart->p_start / nblks; 22753 lmap->dkl_nblk = vpart->p_size; 22754 } 22755 22756 #elif defined(_SUNOS_VTOC_8) 22757 22758 un->un_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0]; 22759 un->un_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1]; 22760 un->un_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2]; 22761 22762 un->un_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity; 22763 un->un_vtoc.v_version = (uint32_t)user_vtoc->v_version; 22764 22765 bcopy(user_vtoc->v_volume, un->un_vtoc.v_volume, LEN_DKL_VVOL); 22766 22767 un->un_vtoc.v_nparts = user_vtoc->v_nparts; 22768 22769 bcopy(user_vtoc->v_reserved, un->un_vtoc.v_reserved, 22770 sizeof (un->un_vtoc.v_reserved)); 22771 22772 /* 22773 * Note the conversion from starting sector number 22774 * to starting cylinder number. 22775 * Return error if division results in a remainder. 22776 */ 22777 lmap = un->un_map; 22778 lpart = un->un_vtoc.v_part; 22779 vpart = user_vtoc->v_part; 22780 22781 for (i = 0; i < (int)user_vtoc->v_nparts; i++) { 22782 lpart->p_tag = vpart->p_tag; 22783 lpart->p_flag = vpart->p_flag; 22784 lmap->dkl_cylno = vpart->p_start / nblks; 22785 lmap->dkl_nblk = vpart->p_size; 22786 22787 lmap++; 22788 lpart++; 22789 vpart++; 22790 22791 /* (4387723) */ 22792 #ifdef _LP64 22793 if (user_vtoc->timestamp[i] > TIME32_MAX) { 22794 un->un_vtoc.v_timestamp[i] = TIME32_MAX; 22795 } else { 22796 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22797 } 22798 #else 22799 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22800 #endif 22801 } 22802 22803 bcopy(user_vtoc->v_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 22804 #else 22805 #error "No VTOC format defined." 22806 #endif 22807 return (0); 22808 } 22809 22810 /* 22811 * Function: sd_clear_efi 22812 * 22813 * Description: This routine clears all EFI labels. 22814 * 22815 * Arguments: un - driver soft state (unit) structure 22816 * 22817 * Return Code: void 22818 */ 22819 22820 static void 22821 sd_clear_efi(struct sd_lun *un) 22822 { 22823 efi_gpt_t *gpt; 22824 uint_t lbasize; 22825 uint64_t cap; 22826 int rval; 22827 22828 ASSERT(!mutex_owned(SD_MUTEX(un))); 22829 22830 gpt = kmem_alloc(sizeof (efi_gpt_t), KM_SLEEP); 22831 22832 if (sd_send_scsi_READ(un, gpt, DEV_BSIZE, 1, SD_PATH_DIRECT) != 0) { 22833 goto done; 22834 } 22835 22836 sd_swap_efi_gpt(gpt); 22837 rval = sd_validate_efi(gpt); 22838 if (rval == 0) { 22839 /* clear primary */ 22840 bzero(gpt, sizeof (efi_gpt_t)); 22841 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 1, 22842 SD_PATH_DIRECT))) { 22843 SD_INFO(SD_LOG_IO_PARTITION, un, 22844 "sd_clear_efi: clear primary label failed\n"); 22845 } 22846 } 22847 /* the backup */ 22848 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 22849 SD_PATH_DIRECT); 22850 if (rval) { 22851 goto done; 22852 } 22853 if ((rval = sd_send_scsi_READ(un, gpt, lbasize, 22854 cap - 1, SD_PATH_DIRECT)) != 0) { 22855 goto done; 22856 } 22857 sd_swap_efi_gpt(gpt); 22858 rval = sd_validate_efi(gpt); 22859 if (rval == 0) { 22860 /* clear backup */ 22861 SD_TRACE(SD_LOG_IOCTL, un, "sd_clear_efi clear backup@%lu\n", 22862 cap-1); 22863 bzero(gpt, sizeof (efi_gpt_t)); 22864 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 22865 cap-1, SD_PATH_DIRECT))) { 22866 SD_INFO(SD_LOG_IO_PARTITION, un, 22867 "sd_clear_efi: clear backup label failed\n"); 22868 } 22869 } 22870 22871 done: 22872 kmem_free(gpt, sizeof (efi_gpt_t)); 22873 } 22874 22875 /* 22876 * Function: sd_set_vtoc 22877 * 22878 * Description: This routine writes data to the appropriate positions 22879 * 22880 * Arguments: un - driver soft state (unit) structure 22881 * dkl - the data to be written 22882 * 22883 * Return: void 22884 */ 22885 22886 static int 22887 sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl) 22888 { 22889 void *shadow_buf; 22890 uint_t label_addr; 22891 int sec; 22892 int blk; 22893 int head; 22894 int cyl; 22895 int rval; 22896 22897 #if defined(__i386) || defined(__amd64) 22898 label_addr = un->un_solaris_offset + DK_LABEL_LOC; 22899 #else 22900 /* Write the primary label at block 0 of the solaris partition. */ 22901 label_addr = 0; 22902 #endif 22903 22904 if (NOT_DEVBSIZE(un)) { 22905 shadow_buf = kmem_zalloc(un->un_tgt_blocksize, KM_SLEEP); 22906 /* 22907 * Read the target's first block. 22908 */ 22909 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22910 un->un_tgt_blocksize, label_addr, 22911 SD_PATH_STANDARD)) != 0) { 22912 goto exit; 22913 } 22914 /* 22915 * Copy the contents of the label into the shadow buffer 22916 * which is of the size of target block size. 22917 */ 22918 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22919 } 22920 22921 /* Write the primary label */ 22922 if (NOT_DEVBSIZE(un)) { 22923 rval = sd_send_scsi_WRITE(un, shadow_buf, un->un_tgt_blocksize, 22924 label_addr, SD_PATH_STANDARD); 22925 } else { 22926 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22927 label_addr, SD_PATH_STANDARD); 22928 } 22929 if (rval != 0) { 22930 return (rval); 22931 } 22932 22933 /* 22934 * Calculate where the backup labels go. They are always on 22935 * the last alternate cylinder, but some older drives put them 22936 * on head 2 instead of the last head. They are always on the 22937 * first 5 odd sectors of the appropriate track. 22938 * 22939 * We have no choice at this point, but to believe that the 22940 * disk label is valid. Use the geometry of the disk 22941 * as described in the label. 22942 */ 22943 cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1; 22944 head = dkl->dkl_nhead - 1; 22945 22946 /* 22947 * Write and verify the backup labels. Make sure we don't try to 22948 * write past the last cylinder. 22949 */ 22950 for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) { 22951 blk = (daddr_t)( 22952 (cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) + 22953 (head * dkl->dkl_nsect) + sec); 22954 #if defined(__i386) || defined(__amd64) 22955 blk += un->un_solaris_offset; 22956 #endif 22957 if (NOT_DEVBSIZE(un)) { 22958 uint64_t tblk; 22959 /* 22960 * Need to read the block first for read modify write. 22961 */ 22962 tblk = (uint64_t)blk; 22963 blk = (int)((tblk * un->un_sys_blocksize) / 22964 un->un_tgt_blocksize); 22965 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22966 un->un_tgt_blocksize, blk, 22967 SD_PATH_STANDARD)) != 0) { 22968 goto exit; 22969 } 22970 /* 22971 * Modify the shadow buffer with the label. 22972 */ 22973 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22974 rval = sd_send_scsi_WRITE(un, shadow_buf, 22975 un->un_tgt_blocksize, blk, SD_PATH_STANDARD); 22976 } else { 22977 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22978 blk, SD_PATH_STANDARD); 22979 SD_INFO(SD_LOG_IO_PARTITION, un, 22980 "sd_set_vtoc: wrote backup label %d\n", blk); 22981 } 22982 if (rval != 0) { 22983 goto exit; 22984 } 22985 } 22986 exit: 22987 if (NOT_DEVBSIZE(un)) { 22988 kmem_free(shadow_buf, un->un_tgt_blocksize); 22989 } 22990 return (rval); 22991 } 22992 22993 /* 22994 * Function: sd_clear_vtoc 22995 * 22996 * Description: This routine clears out the VTOC labels. 22997 * 22998 * Arguments: un - driver soft state (unit) structure 22999 * 23000 * Return: void 23001 */ 23002 23003 static void 23004 sd_clear_vtoc(struct sd_lun *un) 23005 { 23006 struct dk_label *dkl; 23007 23008 mutex_exit(SD_MUTEX(un)); 23009 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 23010 mutex_enter(SD_MUTEX(un)); 23011 /* 23012 * sd_set_vtoc uses these fields in order to figure out 23013 * where to overwrite the backup labels 23014 */ 23015 dkl->dkl_apc = un->un_g.dkg_apc; 23016 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 23017 dkl->dkl_acyl = un->un_g.dkg_acyl; 23018 dkl->dkl_nhead = un->un_g.dkg_nhead; 23019 dkl->dkl_nsect = un->un_g.dkg_nsect; 23020 mutex_exit(SD_MUTEX(un)); 23021 (void) sd_set_vtoc(un, dkl); 23022 kmem_free(dkl, sizeof (struct dk_label)); 23023 23024 mutex_enter(SD_MUTEX(un)); 23025 } 23026 23027 /* 23028 * Function: sd_write_label 23029 * 23030 * Description: This routine will validate and write the driver soft state vtoc 23031 * contents to the device. 23032 * 23033 * Arguments: dev - the device number 23034 * 23035 * Return Code: the code returned by sd_send_scsi_cmd() 23036 * 0 23037 * EINVAL 23038 * ENXIO 23039 * ENOMEM 23040 */ 23041 23042 static int 23043 sd_write_label(dev_t dev) 23044 { 23045 struct sd_lun *un; 23046 struct dk_label *dkl; 23047 short sum; 23048 short *sp; 23049 int i; 23050 int rval; 23051 23052 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23053 (un->un_state == SD_STATE_OFFLINE)) { 23054 return (ENXIO); 23055 } 23056 ASSERT(mutex_owned(SD_MUTEX(un))); 23057 mutex_exit(SD_MUTEX(un)); 23058 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 23059 mutex_enter(SD_MUTEX(un)); 23060 23061 bcopy(&un->un_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc)); 23062 dkl->dkl_rpm = un->un_g.dkg_rpm; 23063 dkl->dkl_pcyl = un->un_g.dkg_pcyl; 23064 dkl->dkl_apc = un->un_g.dkg_apc; 23065 dkl->dkl_intrlv = un->un_g.dkg_intrlv; 23066 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 23067 dkl->dkl_acyl = un->un_g.dkg_acyl; 23068 dkl->dkl_nhead = un->un_g.dkg_nhead; 23069 dkl->dkl_nsect = un->un_g.dkg_nsect; 23070 23071 #if defined(_SUNOS_VTOC_8) 23072 dkl->dkl_obs1 = un->un_g.dkg_obs1; 23073 dkl->dkl_obs2 = un->un_g.dkg_obs2; 23074 dkl->dkl_obs3 = un->un_g.dkg_obs3; 23075 for (i = 0; i < NDKMAP; i++) { 23076 dkl->dkl_map[i].dkl_cylno = un->un_map[i].dkl_cylno; 23077 dkl->dkl_map[i].dkl_nblk = un->un_map[i].dkl_nblk; 23078 } 23079 bcopy(un->un_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII); 23080 #elif defined(_SUNOS_VTOC_16) 23081 dkl->dkl_skew = un->un_dkg_skew; 23082 #else 23083 #error "No VTOC format defined." 23084 #endif 23085 23086 dkl->dkl_magic = DKL_MAGIC; 23087 dkl->dkl_write_reinstruct = un->un_g.dkg_write_reinstruct; 23088 dkl->dkl_read_reinstruct = un->un_g.dkg_read_reinstruct; 23089 23090 /* Construct checksum for the new disk label */ 23091 sum = 0; 23092 sp = (short *)dkl; 23093 i = sizeof (struct dk_label) / sizeof (short); 23094 while (i--) { 23095 sum ^= *sp++; 23096 } 23097 dkl->dkl_cksum = sum; 23098 23099 mutex_exit(SD_MUTEX(un)); 23100 23101 rval = sd_set_vtoc(un, dkl); 23102 exit: 23103 kmem_free(dkl, sizeof (struct dk_label)); 23104 mutex_enter(SD_MUTEX(un)); 23105 return (rval); 23106 } 23107 23108 static int 23109 sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag) 23110 { 23111 struct sd_lun *un = NULL; 23112 dk_efi_t user_efi; 23113 int rval = 0; 23114 void *buffer; 23115 23116 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 23117 return (ENXIO); 23118 23119 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 23120 return (EFAULT); 23121 23122 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 23123 23124 if ((user_efi.dki_length % un->un_tgt_blocksize) || 23125 (user_efi.dki_length > un->un_max_xfer_size)) 23126 return (EINVAL); 23127 23128 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 23129 if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) { 23130 rval = EFAULT; 23131 } else { 23132 /* 23133 * let's clear the vtoc labels and clear the softstate 23134 * vtoc. 23135 */ 23136 mutex_enter(SD_MUTEX(un)); 23137 if (un->un_vtoc.v_sanity == VTOC_SANE) { 23138 SD_TRACE(SD_LOG_IO_PARTITION, un, 23139 "sd_dkio_set_efi: CLEAR VTOC\n"); 23140 sd_clear_vtoc(un); 23141 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23142 mutex_exit(SD_MUTEX(un)); 23143 ddi_remove_minor_node(SD_DEVINFO(un), "h"); 23144 ddi_remove_minor_node(SD_DEVINFO(un), "h,raw"); 23145 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd", 23146 S_IFBLK, 23147 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23148 un->un_node_type, NULL); 23149 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd,raw", 23150 S_IFCHR, 23151 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23152 un->un_node_type, NULL); 23153 } else 23154 mutex_exit(SD_MUTEX(un)); 23155 rval = sd_send_scsi_WRITE(un, buffer, user_efi.dki_length, 23156 user_efi.dki_lba, SD_PATH_DIRECT); 23157 if (rval == 0) { 23158 mutex_enter(SD_MUTEX(un)); 23159 un->un_f_geometry_is_valid = FALSE; 23160 mutex_exit(SD_MUTEX(un)); 23161 } 23162 } 23163 kmem_free(buffer, user_efi.dki_length); 23164 return (rval); 23165 } 23166 23167 /* 23168 * Function: sd_dkio_get_mboot 23169 * 23170 * Description: This routine is the driver entry point for handling user 23171 * requests to get the current device mboot (DKIOCGMBOOT) 23172 * 23173 * Arguments: dev - the device number 23174 * arg - pointer to user provided mboot structure specifying 23175 * the current mboot. 23176 * flag - this argument is a pass through to ddi_copyxxx() 23177 * directly from the mode argument of ioctl(). 23178 * 23179 * Return Code: 0 23180 * EINVAL 23181 * EFAULT 23182 * ENXIO 23183 */ 23184 23185 static int 23186 sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag) 23187 { 23188 struct sd_lun *un; 23189 struct mboot *mboot; 23190 int rval; 23191 size_t buffer_size; 23192 23193 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23194 (un->un_state == SD_STATE_OFFLINE)) { 23195 return (ENXIO); 23196 } 23197 23198 #if defined(_SUNOS_VTOC_8) 23199 if ((!ISREMOVABLE(un)) || (arg == NULL)) { 23200 #elif defined(_SUNOS_VTOC_16) 23201 if (arg == NULL) { 23202 #endif 23203 return (EINVAL); 23204 } 23205 23206 /* 23207 * Read the mboot block, located at absolute block 0 on the target. 23208 */ 23209 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct mboot)); 23210 23211 SD_TRACE(SD_LOG_IO_PARTITION, un, 23212 "sd_dkio_get_mboot: allocation size: 0x%x\n", buffer_size); 23213 23214 mboot = kmem_zalloc(buffer_size, KM_SLEEP); 23215 if ((rval = sd_send_scsi_READ(un, mboot, buffer_size, 0, 23216 SD_PATH_STANDARD)) == 0) { 23217 if (ddi_copyout(mboot, (void *)arg, 23218 sizeof (struct mboot), flag) != 0) { 23219 rval = EFAULT; 23220 } 23221 } 23222 kmem_free(mboot, buffer_size); 23223 return (rval); 23224 } 23225 23226 23227 /* 23228 * Function: sd_dkio_set_mboot 23229 * 23230 * Description: This routine is the driver entry point for handling user 23231 * requests to validate and set the device master boot 23232 * (DKIOCSMBOOT). 23233 * 23234 * Arguments: dev - the device number 23235 * arg - pointer to user provided mboot structure used to set the 23236 * master boot. 23237 * flag - this argument is a pass through to ddi_copyxxx() 23238 * directly from the mode argument of ioctl(). 23239 * 23240 * Return Code: 0 23241 * EINVAL 23242 * EFAULT 23243 * ENXIO 23244 */ 23245 23246 static int 23247 sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag) 23248 { 23249 struct sd_lun *un = NULL; 23250 struct mboot *mboot = NULL; 23251 int rval; 23252 ushort_t magic; 23253 23254 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23255 return (ENXIO); 23256 } 23257 23258 ASSERT(!mutex_owned(SD_MUTEX(un))); 23259 23260 #if defined(_SUNOS_VTOC_8) 23261 if (!ISREMOVABLE(un)) { 23262 return (EINVAL); 23263 } 23264 #endif 23265 23266 if (arg == NULL) { 23267 return (EINVAL); 23268 } 23269 23270 mboot = kmem_zalloc(sizeof (struct mboot), KM_SLEEP); 23271 23272 if (ddi_copyin((const void *)arg, mboot, 23273 sizeof (struct mboot), flag) != 0) { 23274 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23275 return (EFAULT); 23276 } 23277 23278 /* Is this really a master boot record? */ 23279 magic = LE_16(mboot->signature); 23280 if (magic != MBB_MAGIC) { 23281 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23282 return (EINVAL); 23283 } 23284 23285 rval = sd_send_scsi_WRITE(un, mboot, un->un_sys_blocksize, 0, 23286 SD_PATH_STANDARD); 23287 23288 mutex_enter(SD_MUTEX(un)); 23289 #if defined(__i386) || defined(__amd64) 23290 if (rval == 0) { 23291 /* 23292 * mboot has been written successfully. 23293 * update the fdisk and vtoc tables in memory 23294 */ 23295 rval = sd_update_fdisk_and_vtoc(un); 23296 if ((un->un_f_geometry_is_valid == FALSE) || (rval != 0)) { 23297 mutex_exit(SD_MUTEX(un)); 23298 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23299 return (rval); 23300 } 23301 } 23302 23303 /* 23304 * If the mboot write fails, write the devid anyway, what can it hurt? 23305 * Also preserve the device id by writing to the disk acyl for the case 23306 * where a devid has been fabricated. 23307 */ 23308 if (!ISREMOVABLE(un) && !ISCD(un) && 23309 (un->un_f_opt_fab_devid == TRUE)) { 23310 if (un->un_devid == NULL) { 23311 sd_register_devid(un, SD_DEVINFO(un), 23312 SD_TARGET_IS_UNRESERVED); 23313 } else { 23314 /* 23315 * The device id for this disk has been 23316 * fabricated. Fabricated device id's are 23317 * managed by storing them in the last 2 23318 * available sectors on the drive. The device 23319 * id must be preserved by writing it back out 23320 * to this location. 23321 */ 23322 if (sd_write_deviceid(un) != 0) { 23323 ddi_devid_free(un->un_devid); 23324 un->un_devid = NULL; 23325 } 23326 } 23327 } 23328 #else 23329 if (rval == 0) { 23330 /* 23331 * mboot has been written successfully. 23332 * set up the default geometry and VTOC 23333 */ 23334 if (un->un_blockcount <= DK_MAX_BLOCKS) 23335 sd_setup_default_geometry(un); 23336 } 23337 #endif 23338 mutex_exit(SD_MUTEX(un)); 23339 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23340 return (rval); 23341 } 23342 23343 23344 /* 23345 * Function: sd_setup_default_geometry 23346 * 23347 * Description: This local utility routine sets the default geometry as part of 23348 * setting the device mboot. 23349 * 23350 * Arguments: un - driver soft state (unit) structure 23351 * 23352 * Note: This may be redundant with sd_build_default_label. 23353 */ 23354 23355 static void 23356 sd_setup_default_geometry(struct sd_lun *un) 23357 { 23358 /* zero out the soft state geometry and partition table. */ 23359 bzero(&un->un_g, sizeof (struct dk_geom)); 23360 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23361 bzero(un->un_map, NDKMAP * (sizeof (struct dk_map))); 23362 un->un_asciilabel[0] = '\0'; 23363 23364 /* 23365 * For the rpm, we use the minimum for the disk. 23366 * For the head, cyl and number of sector per track, 23367 * if the capacity <= 1GB, head = 64, sect = 32. 23368 * else head = 255, sect 63 23369 * Note: the capacity should be equal to C*H*S values. 23370 * This will cause some truncation of size due to 23371 * round off errors. For CD-ROMs, this truncation can 23372 * have adverse side effects, so returning ncyl and 23373 * nhead as 1. The nsect will overflow for most of 23374 * CD-ROMs as nsect is of type ushort. 23375 */ 23376 if (ISCD(un)) { 23377 un->un_g.dkg_ncyl = 1; 23378 un->un_g.dkg_nhead = 1; 23379 un->un_g.dkg_nsect = un->un_blockcount; 23380 } else { 23381 if (un->un_blockcount <= 0x1000) { 23382 /* Needed for unlabeled SCSI floppies. */ 23383 un->un_g.dkg_nhead = 2; 23384 un->un_g.dkg_ncyl = 80; 23385 un->un_g.dkg_pcyl = 80; 23386 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 23387 } else if (un->un_blockcount <= 0x200000) { 23388 un->un_g.dkg_nhead = 64; 23389 un->un_g.dkg_nsect = 32; 23390 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 23391 } else { 23392 un->un_g.dkg_nhead = 255; 23393 un->un_g.dkg_nsect = 63; 23394 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 23395 } 23396 un->un_blockcount = un->un_g.dkg_ncyl * 23397 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 23398 } 23399 un->un_g.dkg_acyl = 0; 23400 un->un_g.dkg_bcyl = 0; 23401 un->un_g.dkg_intrlv = 1; 23402 un->un_g.dkg_rpm = 200; 23403 un->un_g.dkg_read_reinstruct = 0; 23404 un->un_g.dkg_write_reinstruct = 0; 23405 if (un->un_g.dkg_pcyl == 0) { 23406 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl; 23407 } 23408 23409 un->un_map['a'-'a'].dkl_cylno = 0; 23410 un->un_map['a'-'a'].dkl_nblk = un->un_blockcount; 23411 un->un_map['c'-'a'].dkl_cylno = 0; 23412 un->un_map['c'-'a'].dkl_nblk = un->un_blockcount; 23413 un->un_f_geometry_is_valid = FALSE; 23414 } 23415 23416 23417 #if defined(__i386) || defined(__amd64) 23418 /* 23419 * Function: sd_update_fdisk_and_vtoc 23420 * 23421 * Description: This local utility routine updates the device fdisk and vtoc 23422 * as part of setting the device mboot. 23423 * 23424 * Arguments: un - driver soft state (unit) structure 23425 * 23426 * Return Code: 0 for success or errno-type return code. 23427 * 23428 * Note:x86: This looks like a duplicate of sd_validate_geometry(), but 23429 * these did exist seperately in x86 sd.c!!! 23430 */ 23431 23432 static int 23433 sd_update_fdisk_and_vtoc(struct sd_lun *un) 23434 { 23435 static char labelstring[128]; 23436 static char buf[256]; 23437 char *label = 0; 23438 int count; 23439 int label_rc = 0; 23440 int gvalid = un->un_f_geometry_is_valid; 23441 int fdisk_rval; 23442 int lbasize; 23443 int capacity; 23444 23445 ASSERT(mutex_owned(SD_MUTEX(un))); 23446 23447 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 23448 return (EINVAL); 23449 } 23450 23451 if (un->un_f_blockcount_is_valid == FALSE) { 23452 return (EINVAL); 23453 } 23454 23455 #if defined(_SUNOS_VTOC_16) 23456 /* 23457 * Set up the "whole disk" fdisk partition; this should always 23458 * exist, regardless of whether the disk contains an fdisk table 23459 * or vtoc. 23460 */ 23461 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 23462 un->un_map[P0_RAW_DISK].dkl_nblk = un->un_blockcount; 23463 #endif /* defined(_SUNOS_VTOC_16) */ 23464 23465 /* 23466 * copy the lbasize and capacity so that if they're 23467 * reset while we're not holding the SD_MUTEX(un), we will 23468 * continue to use valid values after the SD_MUTEX(un) is 23469 * reacquired. 23470 */ 23471 lbasize = un->un_tgt_blocksize; 23472 capacity = un->un_blockcount; 23473 23474 /* 23475 * refresh the logical and physical geometry caches. 23476 * (data from mode sense format/rigid disk geometry pages, 23477 * and scsi_ifgetcap("geometry"). 23478 */ 23479 sd_resync_geom_caches(un, capacity, lbasize, SD_PATH_DIRECT); 23480 23481 /* 23482 * Only DIRECT ACCESS devices will have Sun labels. 23483 * CD's supposedly have a Sun label, too 23484 */ 23485 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 23486 fdisk_rval = sd_read_fdisk(un, capacity, lbasize, 23487 SD_PATH_DIRECT); 23488 if (fdisk_rval == SD_CMD_FAILURE) { 23489 ASSERT(mutex_owned(SD_MUTEX(un))); 23490 return (EIO); 23491 } 23492 23493 if (fdisk_rval == SD_CMD_RESERVATION_CONFLICT) { 23494 ASSERT(mutex_owned(SD_MUTEX(un))); 23495 return (EACCES); 23496 } 23497 23498 if (un->un_solaris_size <= DK_LABEL_LOC) { 23499 /* 23500 * Found fdisk table but no Solaris partition entry, 23501 * so don't call sd_uselabel() and don't create 23502 * a default label. 23503 */ 23504 label_rc = 0; 23505 un->un_f_geometry_is_valid = TRUE; 23506 goto no_solaris_partition; 23507 } 23508 23509 #if defined(_SUNOS_VTOC_8) 23510 label = (char *)un->un_asciilabel; 23511 #elif defined(_SUNOS_VTOC_16) 23512 label = (char *)un->un_vtoc.v_asciilabel; 23513 #else 23514 #error "No VTOC format defined." 23515 #endif 23516 } else if (capacity < 0) { 23517 ASSERT(mutex_owned(SD_MUTEX(un))); 23518 return (EINVAL); 23519 } 23520 23521 /* 23522 * For Removable media We reach here if we have found a 23523 * SOLARIS PARTITION. 23524 * If un_f_geometry_is_valid is FALSE it indicates that the SOLARIS 23525 * PARTITION has changed from the previous one, hence we will setup a 23526 * default VTOC in this case. 23527 */ 23528 if (un->un_f_geometry_is_valid == FALSE) { 23529 sd_build_default_label(un); 23530 label_rc = 0; 23531 } 23532 23533 no_solaris_partition: 23534 if ((!ISREMOVABLE(un) || 23535 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 23536 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 23537 /* 23538 * Print out a message indicating who and what we are. 23539 * We do this only when we happen to really validate the 23540 * geometry. We may call sd_validate_geometry() at other 23541 * times, ioctl()'s like Get VTOC in which case we 23542 * don't want to print the label. 23543 * If the geometry is valid, print the label string, 23544 * else print vendor and product info, if available 23545 */ 23546 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 23547 SD_INFO(SD_LOG_IOCTL_DKIO, un, "?<%s>\n", label); 23548 } else { 23549 mutex_enter(&sd_label_mutex); 23550 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 23551 labelstring); 23552 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 23553 &labelstring[64]); 23554 (void) sprintf(buf, "?Vendor '%s', product '%s'", 23555 labelstring, &labelstring[64]); 23556 if (un->un_f_blockcount_is_valid == TRUE) { 23557 (void) sprintf(&buf[strlen(buf)], 23558 ", %" PRIu64 " %u byte blocks\n", 23559 un->un_blockcount, 23560 un->un_tgt_blocksize); 23561 } else { 23562 (void) sprintf(&buf[strlen(buf)], 23563 ", (unknown capacity)\n"); 23564 } 23565 SD_INFO(SD_LOG_IOCTL_DKIO, un, buf); 23566 mutex_exit(&sd_label_mutex); 23567 } 23568 } 23569 23570 #if defined(_SUNOS_VTOC_16) 23571 /* 23572 * If we have valid geometry, set up the remaining fdisk partitions. 23573 * Note that dkl_cylno is not used for the fdisk map entries, so 23574 * we set it to an entirely bogus value. 23575 */ 23576 for (count = 0; count < FD_NUMPART; count++) { 23577 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 23578 un->un_map[FDISK_P1 + count].dkl_nblk = 23579 un->un_fmap[count].fmap_nblk; 23580 un->un_offset[FDISK_P1 + count] = 23581 un->un_fmap[count].fmap_start; 23582 } 23583 #endif 23584 23585 for (count = 0; count < NDKMAP; count++) { 23586 #if defined(_SUNOS_VTOC_8) 23587 struct dk_map *lp = &un->un_map[count]; 23588 un->un_offset[count] = 23589 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 23590 #elif defined(_SUNOS_VTOC_16) 23591 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 23592 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 23593 #else 23594 #error "No VTOC format defined." 23595 #endif 23596 } 23597 23598 ASSERT(mutex_owned(SD_MUTEX(un))); 23599 return (label_rc); 23600 } 23601 #endif 23602 23603 23604 /* 23605 * Function: sd_check_media 23606 * 23607 * Description: This utility routine implements the functionality for the 23608 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23609 * driver state changes from that specified by the user 23610 * (inserted or ejected). For example, if the user specifies 23611 * DKIO_EJECTED and the current media state is inserted this 23612 * routine will immediately return DKIO_INSERTED. However, if the 23613 * current media state is not inserted the user thread will be 23614 * blocked until the drive state changes. If DKIO_NONE is specified 23615 * the user thread will block until a drive state change occurs. 23616 * 23617 * Arguments: dev - the device number 23618 * state - user pointer to a dkio_state, updated with the current 23619 * drive state at return. 23620 * 23621 * Return Code: ENXIO 23622 * EIO 23623 * EAGAIN 23624 * EINTR 23625 */ 23626 23627 static int 23628 sd_check_media(dev_t dev, enum dkio_state state) 23629 { 23630 struct sd_lun *un = NULL; 23631 enum dkio_state prev_state; 23632 opaque_t token = NULL; 23633 int rval = 0; 23634 23635 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23636 return (ENXIO); 23637 } 23638 23639 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23640 23641 mutex_enter(SD_MUTEX(un)); 23642 23643 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23644 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23645 23646 prev_state = un->un_mediastate; 23647 23648 /* is there anything to do? */ 23649 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23650 /* 23651 * submit the request to the scsi_watch service; 23652 * scsi_media_watch_cb() does the real work 23653 */ 23654 mutex_exit(SD_MUTEX(un)); 23655 23656 /* 23657 * This change handles the case where a scsi watch request is 23658 * added to a device that is powered down. To accomplish this 23659 * we power up the device before adding the scsi watch request, 23660 * since the scsi watch sends a TUR directly to the device 23661 * which the device cannot handle if it is powered down. 23662 */ 23663 if (sd_pm_entry(un) != DDI_SUCCESS) { 23664 mutex_enter(SD_MUTEX(un)); 23665 goto done; 23666 } 23667 23668 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23669 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23670 (caddr_t)dev); 23671 23672 sd_pm_exit(un); 23673 23674 mutex_enter(SD_MUTEX(un)); 23675 if (token == NULL) { 23676 rval = EAGAIN; 23677 goto done; 23678 } 23679 23680 /* 23681 * This is a special case IOCTL that doesn't return 23682 * until the media state changes. Routine sdpower 23683 * knows about and handles this so don't count it 23684 * as an active cmd in the driver, which would 23685 * keep the device busy to the pm framework. 23686 * If the count isn't decremented the device can't 23687 * be powered down. 23688 */ 23689 un->un_ncmds_in_driver--; 23690 ASSERT(un->un_ncmds_in_driver >= 0); 23691 23692 /* 23693 * if a prior request had been made, this will be the same 23694 * token, as scsi_watch was designed that way. 23695 */ 23696 un->un_swr_token = token; 23697 un->un_specified_mediastate = state; 23698 23699 /* 23700 * now wait for media change 23701 * we will not be signalled unless mediastate == state but it is 23702 * still better to test for this condition, since there is a 23703 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23704 */ 23705 SD_TRACE(SD_LOG_COMMON, un, 23706 "sd_check_media: waiting for media state change\n"); 23707 while (un->un_mediastate == state) { 23708 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23709 SD_TRACE(SD_LOG_COMMON, un, 23710 "sd_check_media: waiting for media state " 23711 "was interrupted\n"); 23712 un->un_ncmds_in_driver++; 23713 rval = EINTR; 23714 goto done; 23715 } 23716 SD_TRACE(SD_LOG_COMMON, un, 23717 "sd_check_media: received signal, state=%x\n", 23718 un->un_mediastate); 23719 } 23720 /* 23721 * Inc the counter to indicate the device once again 23722 * has an active outstanding cmd. 23723 */ 23724 un->un_ncmds_in_driver++; 23725 } 23726 23727 /* invalidate geometry */ 23728 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23729 sr_ejected(un); 23730 } 23731 23732 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23733 uint64_t capacity; 23734 uint_t lbasize; 23735 23736 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23737 mutex_exit(SD_MUTEX(un)); 23738 /* 23739 * Since the following routines use SD_PATH_DIRECT, we must 23740 * call PM directly before the upcoming disk accesses. This 23741 * may cause the disk to be power/spin up. 23742 */ 23743 23744 if (sd_pm_entry(un) == DDI_SUCCESS) { 23745 rval = sd_send_scsi_READ_CAPACITY(un, 23746 &capacity, 23747 &lbasize, SD_PATH_DIRECT); 23748 if (rval != 0) { 23749 sd_pm_exit(un); 23750 mutex_enter(SD_MUTEX(un)); 23751 goto done; 23752 } 23753 } else { 23754 rval = EIO; 23755 mutex_enter(SD_MUTEX(un)); 23756 goto done; 23757 } 23758 mutex_enter(SD_MUTEX(un)); 23759 23760 sd_update_block_info(un, lbasize, capacity); 23761 23762 un->un_f_geometry_is_valid = FALSE; 23763 (void) sd_validate_geometry(un, SD_PATH_DIRECT); 23764 23765 mutex_exit(SD_MUTEX(un)); 23766 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 23767 SD_PATH_DIRECT); 23768 sd_pm_exit(un); 23769 23770 mutex_enter(SD_MUTEX(un)); 23771 } 23772 done: 23773 un->un_f_watcht_stopped = FALSE; 23774 if (un->un_swr_token) { 23775 /* 23776 * Use of this local token and the mutex ensures that we avoid 23777 * some race conditions associated with terminating the 23778 * scsi watch. 23779 */ 23780 token = un->un_swr_token; 23781 un->un_swr_token = (opaque_t)NULL; 23782 mutex_exit(SD_MUTEX(un)); 23783 (void) scsi_watch_request_terminate(token, 23784 SCSI_WATCH_TERMINATE_WAIT); 23785 mutex_enter(SD_MUTEX(un)); 23786 } 23787 23788 /* 23789 * Update the capacity kstat value, if no media previously 23790 * (capacity kstat is 0) and a media has been inserted 23791 * (un_f_blockcount_is_valid == TRUE) 23792 * This is a more generic way then checking for ISREMOVABLE. 23793 */ 23794 if (un->un_errstats) { 23795 struct sd_errstats *stp = NULL; 23796 23797 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23798 if ((stp->sd_capacity.value.ui64 == 0) && 23799 (un->un_f_blockcount_is_valid == TRUE)) { 23800 stp->sd_capacity.value.ui64 = 23801 (uint64_t)((uint64_t)un->un_blockcount * 23802 un->un_sys_blocksize); 23803 } 23804 } 23805 mutex_exit(SD_MUTEX(un)); 23806 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23807 return (rval); 23808 } 23809 23810 23811 /* 23812 * Function: sd_delayed_cv_broadcast 23813 * 23814 * Description: Delayed cv_broadcast to allow for target to recover from media 23815 * insertion. 23816 * 23817 * Arguments: arg - driver soft state (unit) structure 23818 */ 23819 23820 static void 23821 sd_delayed_cv_broadcast(void *arg) 23822 { 23823 struct sd_lun *un = arg; 23824 23825 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23826 23827 mutex_enter(SD_MUTEX(un)); 23828 un->un_dcvb_timeid = NULL; 23829 cv_broadcast(&un->un_state_cv); 23830 mutex_exit(SD_MUTEX(un)); 23831 } 23832 23833 23834 /* 23835 * Function: sd_media_watch_cb 23836 * 23837 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23838 * routine processes the TUR sense data and updates the driver 23839 * state if a transition has occurred. The user thread 23840 * (sd_check_media) is then signalled. 23841 * 23842 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23843 * among multiple watches that share this callback function 23844 * resultp - scsi watch facility result packet containing scsi 23845 * packet, status byte and sense data 23846 * 23847 * Return Code: 0 for success, -1 for failure 23848 */ 23849 23850 static int 23851 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23852 { 23853 struct sd_lun *un; 23854 struct scsi_status *statusp = resultp->statusp; 23855 struct scsi_extended_sense *sensep = resultp->sensep; 23856 enum dkio_state state = DKIO_NONE; 23857 dev_t dev = (dev_t)arg; 23858 uchar_t actual_sense_length; 23859 23860 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23861 return (-1); 23862 } 23863 actual_sense_length = resultp->actual_sense_length; 23864 23865 mutex_enter(SD_MUTEX(un)); 23866 SD_TRACE(SD_LOG_COMMON, un, 23867 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23868 *((char *)statusp), (void *)sensep, actual_sense_length); 23869 23870 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23871 un->un_mediastate = DKIO_DEV_GONE; 23872 printf("sd_media_watch_cb: dev gone\n"); 23873 cv_broadcast(&un->un_state_cv); 23874 mutex_exit(SD_MUTEX(un)); 23875 23876 return (0); 23877 } 23878 23879 /* 23880 * If there was a check condition then sensep points to valid sense data 23881 * If status was not a check condition but a reservation or busy status 23882 * then the new state is DKIO_NONE 23883 */ 23884 if (sensep != NULL) { 23885 SD_INFO(SD_LOG_COMMON, un, 23886 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23887 sensep->es_key, sensep->es_add_code, sensep->es_qual_code); 23888 /* This routine only uses up to 13 bytes of sense data. */ 23889 if (actual_sense_length >= 13) { 23890 if (sensep->es_key == KEY_UNIT_ATTENTION) { 23891 if (sensep->es_add_code == 0x28) { 23892 state = DKIO_INSERTED; 23893 } 23894 } else { 23895 /* 23896 * if 02/04/02 means that the host 23897 * should send start command. Explicitly 23898 * leave the media state as is 23899 * (inserted) as the media is inserted 23900 * and host has stopped device for PM 23901 * reasons. Upon next true read/write 23902 * to this media will bring the 23903 * device to the right state good for 23904 * media access. 23905 */ 23906 if ((sensep->es_key == KEY_NOT_READY) && 23907 (sensep->es_add_code == 0x3a)) { 23908 state = DKIO_EJECTED; 23909 } 23910 23911 /* 23912 * If the drivge is busy with an operation 23913 * or long write, keep the media in an 23914 * inserted state. 23915 */ 23916 23917 if ((sensep->es_key == KEY_NOT_READY) && 23918 (sensep->es_add_code == 0x04) && 23919 ((sensep->es_qual_code == 0x02) || 23920 (sensep->es_qual_code == 0x07) || 23921 (sensep->es_qual_code == 0x08))) { 23922 state = DKIO_INSERTED; 23923 } 23924 } 23925 } 23926 } else if ((*((char *)statusp) == STATUS_GOOD) && 23927 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23928 state = DKIO_INSERTED; 23929 } 23930 23931 SD_TRACE(SD_LOG_COMMON, un, 23932 "sd_media_watch_cb: state=%x, specified=%x\n", 23933 state, un->un_specified_mediastate); 23934 23935 /* 23936 * now signal the waiting thread if this is *not* the specified state; 23937 * delay the signal if the state is DKIO_INSERTED to allow the target 23938 * to recover 23939 */ 23940 if (state != un->un_specified_mediastate) { 23941 un->un_mediastate = state; 23942 if (state == DKIO_INSERTED) { 23943 /* 23944 * delay the signal to give the drive a chance 23945 * to do what it apparently needs to do 23946 */ 23947 SD_TRACE(SD_LOG_COMMON, un, 23948 "sd_media_watch_cb: delayed cv_broadcast\n"); 23949 if (un->un_dcvb_timeid == NULL) { 23950 un->un_dcvb_timeid = 23951 timeout(sd_delayed_cv_broadcast, un, 23952 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23953 } 23954 } else { 23955 SD_TRACE(SD_LOG_COMMON, un, 23956 "sd_media_watch_cb: immediate cv_broadcast\n"); 23957 cv_broadcast(&un->un_state_cv); 23958 } 23959 } 23960 mutex_exit(SD_MUTEX(un)); 23961 return (0); 23962 } 23963 23964 23965 /* 23966 * Function: sd_dkio_get_temp 23967 * 23968 * Description: This routine is the driver entry point for handling ioctl 23969 * requests to get the disk temperature. 23970 * 23971 * Arguments: dev - the device number 23972 * arg - pointer to user provided dk_temperature structure. 23973 * flag - this argument is a pass through to ddi_copyxxx() 23974 * directly from the mode argument of ioctl(). 23975 * 23976 * Return Code: 0 23977 * EFAULT 23978 * ENXIO 23979 * EAGAIN 23980 */ 23981 23982 static int 23983 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23984 { 23985 struct sd_lun *un = NULL; 23986 struct dk_temperature *dktemp = NULL; 23987 uchar_t *temperature_page; 23988 int rval = 0; 23989 int path_flag = SD_PATH_STANDARD; 23990 23991 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23992 return (ENXIO); 23993 } 23994 23995 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23996 23997 /* copyin the disk temp argument to get the user flags */ 23998 if (ddi_copyin((void *)arg, dktemp, 23999 sizeof (struct dk_temperature), flag) != 0) { 24000 rval = EFAULT; 24001 goto done; 24002 } 24003 24004 /* Initialize the temperature to invalid. */ 24005 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24006 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24007 24008 /* 24009 * Note: Investigate removing the "bypass pm" semantic. 24010 * Can we just bypass PM always? 24011 */ 24012 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 24013 path_flag = SD_PATH_DIRECT; 24014 ASSERT(!mutex_owned(&un->un_pm_mutex)); 24015 mutex_enter(&un->un_pm_mutex); 24016 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 24017 /* 24018 * If DKT_BYPASS_PM is set, and the drive happens to be 24019 * in low power mode, we can not wake it up, Need to 24020 * return EAGAIN. 24021 */ 24022 mutex_exit(&un->un_pm_mutex); 24023 rval = EAGAIN; 24024 goto done; 24025 } else { 24026 /* 24027 * Indicate to PM the device is busy. This is required 24028 * to avoid a race - i.e. the ioctl is issuing a 24029 * command and the pm framework brings down the device 24030 * to low power mode (possible power cut-off on some 24031 * platforms). 24032 */ 24033 mutex_exit(&un->un_pm_mutex); 24034 if (sd_pm_entry(un) != DDI_SUCCESS) { 24035 rval = EAGAIN; 24036 goto done; 24037 } 24038 } 24039 } 24040 24041 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24042 24043 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 24044 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 24045 goto done2; 24046 } 24047 24048 /* 24049 * For the current temperature verify that the parameter length is 0x02 24050 * and the parameter code is 0x00 24051 */ 24052 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24053 (temperature_page[5] == 0x00)) { 24054 if (temperature_page[9] == 0xFF) { 24055 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24056 } else { 24057 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24058 } 24059 } 24060 24061 /* 24062 * For the reference temperature verify that the parameter 24063 * length is 0x02 and the parameter code is 0x01 24064 */ 24065 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24066 (temperature_page[11] == 0x01)) { 24067 if (temperature_page[15] == 0xFF) { 24068 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24069 } else { 24070 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24071 } 24072 } 24073 24074 /* Do the copyout regardless of the temperature commands status. */ 24075 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24076 flag) != 0) { 24077 rval = EFAULT; 24078 } 24079 24080 done2: 24081 if (path_flag == SD_PATH_DIRECT) { 24082 sd_pm_exit(un); 24083 } 24084 24085 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24086 done: 24087 if (dktemp != NULL) { 24088 kmem_free(dktemp, sizeof (struct dk_temperature)); 24089 } 24090 24091 return (rval); 24092 } 24093 24094 24095 /* 24096 * Function: sd_log_page_supported 24097 * 24098 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24099 * supported log pages. 24100 * 24101 * Arguments: un - 24102 * log_page - 24103 * 24104 * Return Code: -1 - on error (log sense is optional and may not be supported). 24105 * 0 - log page not found. 24106 * 1 - log page found. 24107 */ 24108 24109 static int 24110 sd_log_page_supported(struct sd_lun *un, int log_page) 24111 { 24112 uchar_t *log_page_data; 24113 int i; 24114 int match = 0; 24115 int log_size; 24116 24117 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24118 24119 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 24120 SD_PATH_DIRECT) != 0) { 24121 SD_ERROR(SD_LOG_COMMON, un, 24122 "sd_log_page_supported: failed log page retrieval\n"); 24123 kmem_free(log_page_data, 0xFF); 24124 return (-1); 24125 } 24126 log_size = log_page_data[3]; 24127 24128 /* 24129 * The list of supported log pages start from the fourth byte. Check 24130 * until we run out of log pages or a match is found. 24131 */ 24132 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24133 if (log_page_data[i] == log_page) { 24134 match++; 24135 } 24136 } 24137 kmem_free(log_page_data, 0xFF); 24138 return (match); 24139 } 24140 24141 24142 /* 24143 * Function: sd_mhdioc_failfast 24144 * 24145 * Description: This routine is the driver entry point for handling ioctl 24146 * requests to enable/disable the multihost failfast option. 24147 * (MHIOCENFAILFAST) 24148 * 24149 * Arguments: dev - the device number 24150 * arg - user specified probing interval. 24151 * flag - this argument is a pass through to ddi_copyxxx() 24152 * directly from the mode argument of ioctl(). 24153 * 24154 * Return Code: 0 24155 * EFAULT 24156 * ENXIO 24157 */ 24158 24159 static int 24160 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24161 { 24162 struct sd_lun *un = NULL; 24163 int mh_time; 24164 int rval = 0; 24165 24166 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24167 return (ENXIO); 24168 } 24169 24170 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24171 return (EFAULT); 24172 24173 if (mh_time) { 24174 mutex_enter(SD_MUTEX(un)); 24175 un->un_resvd_status |= SD_FAILFAST; 24176 mutex_exit(SD_MUTEX(un)); 24177 /* 24178 * If mh_time is INT_MAX, then this ioctl is being used for 24179 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24180 */ 24181 if (mh_time != INT_MAX) { 24182 rval = sd_check_mhd(dev, mh_time); 24183 } 24184 } else { 24185 (void) sd_check_mhd(dev, 0); 24186 mutex_enter(SD_MUTEX(un)); 24187 un->un_resvd_status &= ~SD_FAILFAST; 24188 mutex_exit(SD_MUTEX(un)); 24189 } 24190 return (rval); 24191 } 24192 24193 24194 /* 24195 * Function: sd_mhdioc_takeown 24196 * 24197 * Description: This routine is the driver entry point for handling ioctl 24198 * requests to forcefully acquire exclusive access rights to the 24199 * multihost disk (MHIOCTKOWN). 24200 * 24201 * Arguments: dev - the device number 24202 * arg - user provided structure specifying the delay 24203 * parameters in milliseconds 24204 * flag - this argument is a pass through to ddi_copyxxx() 24205 * directly from the mode argument of ioctl(). 24206 * 24207 * Return Code: 0 24208 * EFAULT 24209 * ENXIO 24210 */ 24211 24212 static int 24213 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24214 { 24215 struct sd_lun *un = NULL; 24216 struct mhioctkown *tkown = NULL; 24217 int rval = 0; 24218 24219 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24220 return (ENXIO); 24221 } 24222 24223 if (arg != NULL) { 24224 tkown = (struct mhioctkown *) 24225 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24226 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24227 if (rval != 0) { 24228 rval = EFAULT; 24229 goto error; 24230 } 24231 } 24232 24233 rval = sd_take_ownership(dev, tkown); 24234 mutex_enter(SD_MUTEX(un)); 24235 if (rval == 0) { 24236 un->un_resvd_status |= SD_RESERVE; 24237 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24238 sd_reinstate_resv_delay = 24239 tkown->reinstate_resv_delay * 1000; 24240 } else { 24241 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24242 } 24243 /* 24244 * Give the scsi_watch routine interval set by 24245 * the MHIOCENFAILFAST ioctl precedence here. 24246 */ 24247 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24248 mutex_exit(SD_MUTEX(un)); 24249 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24250 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24251 "sd_mhdioc_takeown : %d\n", 24252 sd_reinstate_resv_delay); 24253 } else { 24254 mutex_exit(SD_MUTEX(un)); 24255 } 24256 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24257 sd_mhd_reset_notify_cb, (caddr_t)un); 24258 } else { 24259 un->un_resvd_status &= ~SD_RESERVE; 24260 mutex_exit(SD_MUTEX(un)); 24261 } 24262 24263 error: 24264 if (tkown != NULL) { 24265 kmem_free(tkown, sizeof (struct mhioctkown)); 24266 } 24267 return (rval); 24268 } 24269 24270 24271 /* 24272 * Function: sd_mhdioc_release 24273 * 24274 * Description: This routine is the driver entry point for handling ioctl 24275 * requests to release exclusive access rights to the multihost 24276 * disk (MHIOCRELEASE). 24277 * 24278 * Arguments: dev - the device number 24279 * 24280 * Return Code: 0 24281 * ENXIO 24282 */ 24283 24284 static int 24285 sd_mhdioc_release(dev_t dev) 24286 { 24287 struct sd_lun *un = NULL; 24288 timeout_id_t resvd_timeid_save; 24289 int resvd_status_save; 24290 int rval = 0; 24291 24292 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24293 return (ENXIO); 24294 } 24295 24296 mutex_enter(SD_MUTEX(un)); 24297 resvd_status_save = un->un_resvd_status; 24298 un->un_resvd_status &= 24299 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24300 if (un->un_resvd_timeid) { 24301 resvd_timeid_save = un->un_resvd_timeid; 24302 un->un_resvd_timeid = NULL; 24303 mutex_exit(SD_MUTEX(un)); 24304 (void) untimeout(resvd_timeid_save); 24305 } else { 24306 mutex_exit(SD_MUTEX(un)); 24307 } 24308 24309 /* 24310 * destroy any pending timeout thread that may be attempting to 24311 * reinstate reservation on this device. 24312 */ 24313 sd_rmv_resv_reclaim_req(dev); 24314 24315 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24316 mutex_enter(SD_MUTEX(un)); 24317 if ((un->un_mhd_token) && 24318 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24319 mutex_exit(SD_MUTEX(un)); 24320 (void) sd_check_mhd(dev, 0); 24321 } else { 24322 mutex_exit(SD_MUTEX(un)); 24323 } 24324 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24325 sd_mhd_reset_notify_cb, (caddr_t)un); 24326 } else { 24327 /* 24328 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24329 */ 24330 mutex_enter(SD_MUTEX(un)); 24331 un->un_resvd_status = resvd_status_save; 24332 mutex_exit(SD_MUTEX(un)); 24333 } 24334 return (rval); 24335 } 24336 24337 24338 /* 24339 * Function: sd_mhdioc_register_devid 24340 * 24341 * Description: This routine is the driver entry point for handling ioctl 24342 * requests to register the device id (MHIOCREREGISTERDEVID). 24343 * 24344 * Note: The implementation for this ioctl has been updated to 24345 * be consistent with the original PSARC case (1999/357) 24346 * (4375899, 4241671, 4220005) 24347 * 24348 * Arguments: dev - the device number 24349 * 24350 * Return Code: 0 24351 * ENXIO 24352 */ 24353 24354 static int 24355 sd_mhdioc_register_devid(dev_t dev) 24356 { 24357 struct sd_lun *un = NULL; 24358 int rval = 0; 24359 24360 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24361 return (ENXIO); 24362 } 24363 24364 ASSERT(!mutex_owned(SD_MUTEX(un))); 24365 24366 mutex_enter(SD_MUTEX(un)); 24367 24368 /* If a devid already exists, de-register it */ 24369 if (un->un_devid != NULL) { 24370 ddi_devid_unregister(SD_DEVINFO(un)); 24371 /* 24372 * After unregister devid, needs to free devid memory 24373 */ 24374 ddi_devid_free(un->un_devid); 24375 un->un_devid = NULL; 24376 } 24377 24378 /* Check for reservation conflict */ 24379 mutex_exit(SD_MUTEX(un)); 24380 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 24381 mutex_enter(SD_MUTEX(un)); 24382 24383 switch (rval) { 24384 case 0: 24385 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24386 break; 24387 case EACCES: 24388 break; 24389 default: 24390 rval = EIO; 24391 } 24392 24393 mutex_exit(SD_MUTEX(un)); 24394 return (rval); 24395 } 24396 24397 24398 /* 24399 * Function: sd_mhdioc_inkeys 24400 * 24401 * Description: This routine is the driver entry point for handling ioctl 24402 * requests to issue the SCSI-3 Persistent In Read Keys command 24403 * to the device (MHIOCGRP_INKEYS). 24404 * 24405 * Arguments: dev - the device number 24406 * arg - user provided in_keys structure 24407 * flag - this argument is a pass through to ddi_copyxxx() 24408 * directly from the mode argument of ioctl(). 24409 * 24410 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24411 * ENXIO 24412 * EFAULT 24413 */ 24414 24415 static int 24416 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24417 { 24418 struct sd_lun *un; 24419 mhioc_inkeys_t inkeys; 24420 int rval = 0; 24421 24422 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24423 return (ENXIO); 24424 } 24425 24426 #ifdef _MULTI_DATAMODEL 24427 switch (ddi_model_convert_from(flag & FMODELS)) { 24428 case DDI_MODEL_ILP32: { 24429 struct mhioc_inkeys32 inkeys32; 24430 24431 if (ddi_copyin(arg, &inkeys32, 24432 sizeof (struct mhioc_inkeys32), flag) != 0) { 24433 return (EFAULT); 24434 } 24435 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24436 if ((rval = sd_persistent_reservation_in_read_keys(un, 24437 &inkeys, flag)) != 0) { 24438 return (rval); 24439 } 24440 inkeys32.generation = inkeys.generation; 24441 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24442 flag) != 0) { 24443 return (EFAULT); 24444 } 24445 break; 24446 } 24447 case DDI_MODEL_NONE: 24448 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24449 flag) != 0) { 24450 return (EFAULT); 24451 } 24452 if ((rval = sd_persistent_reservation_in_read_keys(un, 24453 &inkeys, flag)) != 0) { 24454 return (rval); 24455 } 24456 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24457 flag) != 0) { 24458 return (EFAULT); 24459 } 24460 break; 24461 } 24462 24463 #else /* ! _MULTI_DATAMODEL */ 24464 24465 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24466 return (EFAULT); 24467 } 24468 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24469 if (rval != 0) { 24470 return (rval); 24471 } 24472 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24473 return (EFAULT); 24474 } 24475 24476 #endif /* _MULTI_DATAMODEL */ 24477 24478 return (rval); 24479 } 24480 24481 24482 /* 24483 * Function: sd_mhdioc_inresv 24484 * 24485 * Description: This routine is the driver entry point for handling ioctl 24486 * requests to issue the SCSI-3 Persistent In Read Reservations 24487 * command to the device (MHIOCGRP_INKEYS). 24488 * 24489 * Arguments: dev - the device number 24490 * arg - user provided in_resv structure 24491 * flag - this argument is a pass through to ddi_copyxxx() 24492 * directly from the mode argument of ioctl(). 24493 * 24494 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24495 * ENXIO 24496 * EFAULT 24497 */ 24498 24499 static int 24500 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24501 { 24502 struct sd_lun *un; 24503 mhioc_inresvs_t inresvs; 24504 int rval = 0; 24505 24506 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24507 return (ENXIO); 24508 } 24509 24510 #ifdef _MULTI_DATAMODEL 24511 24512 switch (ddi_model_convert_from(flag & FMODELS)) { 24513 case DDI_MODEL_ILP32: { 24514 struct mhioc_inresvs32 inresvs32; 24515 24516 if (ddi_copyin(arg, &inresvs32, 24517 sizeof (struct mhioc_inresvs32), flag) != 0) { 24518 return (EFAULT); 24519 } 24520 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24521 if ((rval = sd_persistent_reservation_in_read_resv(un, 24522 &inresvs, flag)) != 0) { 24523 return (rval); 24524 } 24525 inresvs32.generation = inresvs.generation; 24526 if (ddi_copyout(&inresvs32, arg, 24527 sizeof (struct mhioc_inresvs32), flag) != 0) { 24528 return (EFAULT); 24529 } 24530 break; 24531 } 24532 case DDI_MODEL_NONE: 24533 if (ddi_copyin(arg, &inresvs, 24534 sizeof (mhioc_inresvs_t), flag) != 0) { 24535 return (EFAULT); 24536 } 24537 if ((rval = sd_persistent_reservation_in_read_resv(un, 24538 &inresvs, flag)) != 0) { 24539 return (rval); 24540 } 24541 if (ddi_copyout(&inresvs, arg, 24542 sizeof (mhioc_inresvs_t), flag) != 0) { 24543 return (EFAULT); 24544 } 24545 break; 24546 } 24547 24548 #else /* ! _MULTI_DATAMODEL */ 24549 24550 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24551 return (EFAULT); 24552 } 24553 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24554 if (rval != 0) { 24555 return (rval); 24556 } 24557 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24558 return (EFAULT); 24559 } 24560 24561 #endif /* ! _MULTI_DATAMODEL */ 24562 24563 return (rval); 24564 } 24565 24566 24567 /* 24568 * The following routines support the clustering functionality described below 24569 * and implement lost reservation reclaim functionality. 24570 * 24571 * Clustering 24572 * ---------- 24573 * The clustering code uses two different, independent forms of SCSI 24574 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24575 * Persistent Group Reservations. For any particular disk, it will use either 24576 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24577 * 24578 * SCSI-2 24579 * The cluster software takes ownership of a multi-hosted disk by issuing the 24580 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24581 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 24582 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 24583 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 24584 * meaning of failfast is that if the driver (on this host) ever encounters the 24585 * scsi error return code RESERVATION_CONFLICT from the device, it should 24586 * immediately panic the host. The motivation for this ioctl is that if this 24587 * host does encounter reservation conflict, the underlying cause is that some 24588 * other host of the cluster has decided that this host is no longer in the 24589 * cluster and has seized control of the disks for itself. Since this host is no 24590 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 24591 * does two things: 24592 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24593 * error to panic the host 24594 * (b) it sets up a periodic timer to test whether this host still has 24595 * "access" (in that no other host has reserved the device): if the 24596 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24597 * purpose of that periodic timer is to handle scenarios where the host is 24598 * otherwise temporarily quiescent, temporarily doing no real i/o. 24599 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24600 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24601 * the device itself. 24602 * 24603 * SCSI-3 PGR 24604 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24605 * facility is supported through the shared multihost disk ioctls 24606 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24607 * MHIOCGRP_PREEMPTANDABORT) 24608 * 24609 * Reservation Reclaim: 24610 * -------------------- 24611 * To support the lost reservation reclaim operations this driver creates a 24612 * single thread to handle reinstating reservations on all devices that have 24613 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24614 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24615 * and the reservation reclaim thread loops through the requests to regain the 24616 * lost reservations. 24617 */ 24618 24619 /* 24620 * Function: sd_check_mhd() 24621 * 24622 * Description: This function sets up and submits a scsi watch request or 24623 * terminates an existing watch request. This routine is used in 24624 * support of reservation reclaim. 24625 * 24626 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24627 * among multiple watches that share the callback function 24628 * interval - the number of microseconds specifying the watch 24629 * interval for issuing TEST UNIT READY commands. If 24630 * set to 0 the watch should be terminated. If the 24631 * interval is set to 0 and if the device is required 24632 * to hold reservation while disabling failfast, the 24633 * watch is restarted with an interval of 24634 * reinstate_resv_delay. 24635 * 24636 * Return Code: 0 - Successful submit/terminate of scsi watch request 24637 * ENXIO - Indicates an invalid device was specified 24638 * EAGAIN - Unable to submit the scsi watch request 24639 */ 24640 24641 static int 24642 sd_check_mhd(dev_t dev, int interval) 24643 { 24644 struct sd_lun *un; 24645 opaque_t token; 24646 24647 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24648 return (ENXIO); 24649 } 24650 24651 /* is this a watch termination request? */ 24652 if (interval == 0) { 24653 mutex_enter(SD_MUTEX(un)); 24654 /* if there is an existing watch task then terminate it */ 24655 if (un->un_mhd_token) { 24656 token = un->un_mhd_token; 24657 un->un_mhd_token = NULL; 24658 mutex_exit(SD_MUTEX(un)); 24659 (void) scsi_watch_request_terminate(token, 24660 SCSI_WATCH_TERMINATE_WAIT); 24661 mutex_enter(SD_MUTEX(un)); 24662 } else { 24663 mutex_exit(SD_MUTEX(un)); 24664 /* 24665 * Note: If we return here we don't check for the 24666 * failfast case. This is the original legacy 24667 * implementation but perhaps we should be checking 24668 * the failfast case. 24669 */ 24670 return (0); 24671 } 24672 /* 24673 * If the device is required to hold reservation while 24674 * disabling failfast, we need to restart the scsi_watch 24675 * routine with an interval of reinstate_resv_delay. 24676 */ 24677 if (un->un_resvd_status & SD_RESERVE) { 24678 interval = sd_reinstate_resv_delay/1000; 24679 } else { 24680 /* no failfast so bail */ 24681 mutex_exit(SD_MUTEX(un)); 24682 return (0); 24683 } 24684 mutex_exit(SD_MUTEX(un)); 24685 } 24686 24687 /* 24688 * adjust minimum time interval to 1 second, 24689 * and convert from msecs to usecs 24690 */ 24691 if (interval > 0 && interval < 1000) { 24692 interval = 1000; 24693 } 24694 interval *= 1000; 24695 24696 /* 24697 * submit the request to the scsi_watch service 24698 */ 24699 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24700 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24701 if (token == NULL) { 24702 return (EAGAIN); 24703 } 24704 24705 /* 24706 * save token for termination later on 24707 */ 24708 mutex_enter(SD_MUTEX(un)); 24709 un->un_mhd_token = token; 24710 mutex_exit(SD_MUTEX(un)); 24711 return (0); 24712 } 24713 24714 24715 /* 24716 * Function: sd_mhd_watch_cb() 24717 * 24718 * Description: This function is the call back function used by the scsi watch 24719 * facility. The scsi watch facility sends the "Test Unit Ready" 24720 * and processes the status. If applicable (i.e. a "Unit Attention" 24721 * status and automatic "Request Sense" not used) the scsi watch 24722 * facility will send a "Request Sense" and retrieve the sense data 24723 * to be passed to this callback function. In either case the 24724 * automatic "Request Sense" or the facility submitting one, this 24725 * callback is passed the status and sense data. 24726 * 24727 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24728 * among multiple watches that share this callback function 24729 * resultp - scsi watch facility result packet containing scsi 24730 * packet, status byte and sense data 24731 * 24732 * Return Code: 0 - continue the watch task 24733 * non-zero - terminate the watch task 24734 */ 24735 24736 static int 24737 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24738 { 24739 struct sd_lun *un; 24740 struct scsi_status *statusp; 24741 struct scsi_extended_sense *sensep; 24742 struct scsi_pkt *pkt; 24743 uchar_t actual_sense_length; 24744 dev_t dev = (dev_t)arg; 24745 24746 ASSERT(resultp != NULL); 24747 statusp = resultp->statusp; 24748 sensep = resultp->sensep; 24749 pkt = resultp->pkt; 24750 actual_sense_length = resultp->actual_sense_length; 24751 24752 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24753 return (ENXIO); 24754 } 24755 24756 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24757 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24758 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24759 24760 /* Begin processing of the status and/or sense data */ 24761 if (pkt->pkt_reason != CMD_CMPLT) { 24762 /* Handle the incomplete packet */ 24763 sd_mhd_watch_incomplete(un, pkt); 24764 return (0); 24765 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24766 if (*((unsigned char *)statusp) 24767 == STATUS_RESERVATION_CONFLICT) { 24768 /* 24769 * Handle a reservation conflict by panicking if 24770 * configured for failfast or by logging the conflict 24771 * and updating the reservation status 24772 */ 24773 mutex_enter(SD_MUTEX(un)); 24774 if ((un->un_resvd_status & SD_FAILFAST) && 24775 (sd_failfast_enable)) { 24776 panic("Reservation Conflict"); 24777 /*NOTREACHED*/ 24778 } 24779 SD_INFO(SD_LOG_IOCTL_MHD, un, 24780 "sd_mhd_watch_cb: Reservation Conflict\n"); 24781 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24782 mutex_exit(SD_MUTEX(un)); 24783 } 24784 } 24785 24786 if (sensep != NULL) { 24787 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24788 mutex_enter(SD_MUTEX(un)); 24789 if ((sensep->es_add_code == SD_SCSI_RESET_SENSE_CODE) && 24790 (un->un_resvd_status & SD_RESERVE)) { 24791 /* 24792 * The additional sense code indicates a power 24793 * on or bus device reset has occurred; update 24794 * the reservation status. 24795 */ 24796 un->un_resvd_status |= 24797 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24798 SD_INFO(SD_LOG_IOCTL_MHD, un, 24799 "sd_mhd_watch_cb: Lost Reservation\n"); 24800 } 24801 } else { 24802 return (0); 24803 } 24804 } else { 24805 mutex_enter(SD_MUTEX(un)); 24806 } 24807 24808 if ((un->un_resvd_status & SD_RESERVE) && 24809 (un->un_resvd_status & SD_LOST_RESERVE)) { 24810 if (un->un_resvd_status & SD_WANT_RESERVE) { 24811 /* 24812 * A reset occurred in between the last probe and this 24813 * one so if a timeout is pending cancel it. 24814 */ 24815 if (un->un_resvd_timeid) { 24816 timeout_id_t temp_id = un->un_resvd_timeid; 24817 un->un_resvd_timeid = NULL; 24818 mutex_exit(SD_MUTEX(un)); 24819 (void) untimeout(temp_id); 24820 mutex_enter(SD_MUTEX(un)); 24821 } 24822 un->un_resvd_status &= ~SD_WANT_RESERVE; 24823 } 24824 if (un->un_resvd_timeid == 0) { 24825 /* Schedule a timeout to handle the lost reservation */ 24826 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24827 (void *)dev, 24828 drv_usectohz(sd_reinstate_resv_delay)); 24829 } 24830 } 24831 mutex_exit(SD_MUTEX(un)); 24832 return (0); 24833 } 24834 24835 24836 /* 24837 * Function: sd_mhd_watch_incomplete() 24838 * 24839 * Description: This function is used to find out why a scsi pkt sent by the 24840 * scsi watch facility was not completed. Under some scenarios this 24841 * routine will return. Otherwise it will send a bus reset to see 24842 * if the drive is still online. 24843 * 24844 * Arguments: un - driver soft state (unit) structure 24845 * pkt - incomplete scsi pkt 24846 */ 24847 24848 static void 24849 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24850 { 24851 int be_chatty; 24852 int perr; 24853 24854 ASSERT(pkt != NULL); 24855 ASSERT(un != NULL); 24856 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24857 perr = (pkt->pkt_statistics & STAT_PERR); 24858 24859 mutex_enter(SD_MUTEX(un)); 24860 if (un->un_state == SD_STATE_DUMPING) { 24861 mutex_exit(SD_MUTEX(un)); 24862 return; 24863 } 24864 24865 switch (pkt->pkt_reason) { 24866 case CMD_UNX_BUS_FREE: 24867 /* 24868 * If we had a parity error that caused the target to drop BSY*, 24869 * don't be chatty about it. 24870 */ 24871 if (perr && be_chatty) { 24872 be_chatty = 0; 24873 } 24874 break; 24875 case CMD_TAG_REJECT: 24876 /* 24877 * The SCSI-2 spec states that a tag reject will be sent by the 24878 * target if tagged queuing is not supported. A tag reject may 24879 * also be sent during certain initialization periods or to 24880 * control internal resources. For the latter case the target 24881 * may also return Queue Full. 24882 * 24883 * If this driver receives a tag reject from a target that is 24884 * going through an init period or controlling internal 24885 * resources tagged queuing will be disabled. This is a less 24886 * than optimal behavior but the driver is unable to determine 24887 * the target state and assumes tagged queueing is not supported 24888 */ 24889 pkt->pkt_flags = 0; 24890 un->un_tagflags = 0; 24891 24892 if (un->un_f_opt_queueing == TRUE) { 24893 un->un_throttle = min(un->un_throttle, 3); 24894 } else { 24895 un->un_throttle = 1; 24896 } 24897 mutex_exit(SD_MUTEX(un)); 24898 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24899 mutex_enter(SD_MUTEX(un)); 24900 break; 24901 case CMD_INCOMPLETE: 24902 /* 24903 * The transport stopped with an abnormal state, fallthrough and 24904 * reset the target and/or bus unless selection did not complete 24905 * (indicated by STATE_GOT_BUS) in which case we don't want to 24906 * go through a target/bus reset 24907 */ 24908 if (pkt->pkt_state == STATE_GOT_BUS) { 24909 break; 24910 } 24911 /*FALLTHROUGH*/ 24912 24913 case CMD_TIMEOUT: 24914 default: 24915 /* 24916 * The lun may still be running the command, so a lun reset 24917 * should be attempted. If the lun reset fails or cannot be 24918 * issued, than try a target reset. Lastly try a bus reset. 24919 */ 24920 if ((pkt->pkt_statistics & 24921 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24922 int reset_retval = 0; 24923 mutex_exit(SD_MUTEX(un)); 24924 if (un->un_f_allow_bus_device_reset == TRUE) { 24925 if (un->un_f_lun_reset_enabled == TRUE) { 24926 reset_retval = 24927 scsi_reset(SD_ADDRESS(un), 24928 RESET_LUN); 24929 } 24930 if (reset_retval == 0) { 24931 reset_retval = 24932 scsi_reset(SD_ADDRESS(un), 24933 RESET_TARGET); 24934 } 24935 } 24936 if (reset_retval == 0) { 24937 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24938 } 24939 mutex_enter(SD_MUTEX(un)); 24940 } 24941 break; 24942 } 24943 24944 /* A device/bus reset has occurred; update the reservation status. */ 24945 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24946 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24947 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24948 un->un_resvd_status |= 24949 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24950 SD_INFO(SD_LOG_IOCTL_MHD, un, 24951 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24952 } 24953 } 24954 24955 /* 24956 * The disk has been turned off; Update the device state. 24957 * 24958 * Note: Should we be offlining the disk here? 24959 */ 24960 if (pkt->pkt_state == STATE_GOT_BUS) { 24961 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24962 "Disk not responding to selection\n"); 24963 if (un->un_state != SD_STATE_OFFLINE) { 24964 New_state(un, SD_STATE_OFFLINE); 24965 } 24966 } else if (be_chatty) { 24967 /* 24968 * suppress messages if they are all the same pkt reason; 24969 * with TQ, many (up to 256) are returned with the same 24970 * pkt_reason 24971 */ 24972 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24973 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24974 "sd_mhd_watch_incomplete: " 24975 "SCSI transport failed: reason '%s'\n", 24976 scsi_rname(pkt->pkt_reason)); 24977 } 24978 } 24979 un->un_last_pkt_reason = pkt->pkt_reason; 24980 mutex_exit(SD_MUTEX(un)); 24981 } 24982 24983 24984 /* 24985 * Function: sd_sname() 24986 * 24987 * Description: This is a simple little routine to return a string containing 24988 * a printable description of command status byte for use in 24989 * logging. 24990 * 24991 * Arguments: status - pointer to a status byte 24992 * 24993 * Return Code: char * - string containing status description. 24994 */ 24995 24996 static char * 24997 sd_sname(uchar_t status) 24998 { 24999 switch (status & STATUS_MASK) { 25000 case STATUS_GOOD: 25001 return ("good status"); 25002 case STATUS_CHECK: 25003 return ("check condition"); 25004 case STATUS_MET: 25005 return ("condition met"); 25006 case STATUS_BUSY: 25007 return ("busy"); 25008 case STATUS_INTERMEDIATE: 25009 return ("intermediate"); 25010 case STATUS_INTERMEDIATE_MET: 25011 return ("intermediate - condition met"); 25012 case STATUS_RESERVATION_CONFLICT: 25013 return ("reservation_conflict"); 25014 case STATUS_TERMINATED: 25015 return ("command terminated"); 25016 case STATUS_QFULL: 25017 return ("queue full"); 25018 default: 25019 return ("<unknown status>"); 25020 } 25021 } 25022 25023 25024 /* 25025 * Function: sd_mhd_resvd_recover() 25026 * 25027 * Description: This function adds a reservation entry to the 25028 * sd_resv_reclaim_request list and signals the reservation 25029 * reclaim thread that there is work pending. If the reservation 25030 * reclaim thread has not been previously created this function 25031 * will kick it off. 25032 * 25033 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25034 * among multiple watches that share this callback function 25035 * 25036 * Context: This routine is called by timeout() and is run in interrupt 25037 * context. It must not sleep or call other functions which may 25038 * sleep. 25039 */ 25040 25041 static void 25042 sd_mhd_resvd_recover(void *arg) 25043 { 25044 dev_t dev = (dev_t)arg; 25045 struct sd_lun *un; 25046 struct sd_thr_request *sd_treq = NULL; 25047 struct sd_thr_request *sd_cur = NULL; 25048 struct sd_thr_request *sd_prev = NULL; 25049 int already_there = 0; 25050 25051 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25052 return; 25053 } 25054 25055 mutex_enter(SD_MUTEX(un)); 25056 un->un_resvd_timeid = NULL; 25057 if (un->un_resvd_status & SD_WANT_RESERVE) { 25058 /* 25059 * There was a reset so don't issue the reserve, allow the 25060 * sd_mhd_watch_cb callback function to notice this and 25061 * reschedule the timeout for reservation. 25062 */ 25063 mutex_exit(SD_MUTEX(un)); 25064 return; 25065 } 25066 mutex_exit(SD_MUTEX(un)); 25067 25068 /* 25069 * Add this device to the sd_resv_reclaim_request list and the 25070 * sd_resv_reclaim_thread should take care of the rest. 25071 * 25072 * Note: We can't sleep in this context so if the memory allocation 25073 * fails allow the sd_mhd_watch_cb callback function to notice this and 25074 * reschedule the timeout for reservation. (4378460) 25075 */ 25076 sd_treq = (struct sd_thr_request *) 25077 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25078 if (sd_treq == NULL) { 25079 return; 25080 } 25081 25082 sd_treq->sd_thr_req_next = NULL; 25083 sd_treq->dev = dev; 25084 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25085 if (sd_tr.srq_thr_req_head == NULL) { 25086 sd_tr.srq_thr_req_head = sd_treq; 25087 } else { 25088 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25089 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25090 if (sd_cur->dev == dev) { 25091 /* 25092 * already in Queue so don't log 25093 * another request for the device 25094 */ 25095 already_there = 1; 25096 break; 25097 } 25098 sd_prev = sd_cur; 25099 } 25100 if (!already_there) { 25101 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25102 "logging request for %lx\n", dev); 25103 sd_prev->sd_thr_req_next = sd_treq; 25104 } else { 25105 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25106 } 25107 } 25108 25109 /* 25110 * Create a kernel thread to do the reservation reclaim and free up this 25111 * thread. We cannot block this thread while we go away to do the 25112 * reservation reclaim 25113 */ 25114 if (sd_tr.srq_resv_reclaim_thread == NULL) 25115 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25116 sd_resv_reclaim_thread, NULL, 25117 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25118 25119 /* Tell the reservation reclaim thread that it has work to do */ 25120 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25121 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25122 } 25123 25124 /* 25125 * Function: sd_resv_reclaim_thread() 25126 * 25127 * Description: This function implements the reservation reclaim operations 25128 * 25129 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25130 * among multiple watches that share this callback function 25131 */ 25132 25133 static void 25134 sd_resv_reclaim_thread() 25135 { 25136 struct sd_lun *un; 25137 struct sd_thr_request *sd_mhreq; 25138 25139 /* Wait for work */ 25140 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25141 if (sd_tr.srq_thr_req_head == NULL) { 25142 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25143 &sd_tr.srq_resv_reclaim_mutex); 25144 } 25145 25146 /* Loop while we have work */ 25147 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25148 un = ddi_get_soft_state(sd_state, 25149 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25150 if (un == NULL) { 25151 /* 25152 * softstate structure is NULL so just 25153 * dequeue the request and continue 25154 */ 25155 sd_tr.srq_thr_req_head = 25156 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25157 kmem_free(sd_tr.srq_thr_cur_req, 25158 sizeof (struct sd_thr_request)); 25159 continue; 25160 } 25161 25162 /* dequeue the request */ 25163 sd_mhreq = sd_tr.srq_thr_cur_req; 25164 sd_tr.srq_thr_req_head = 25165 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25166 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25167 25168 /* 25169 * Reclaim reservation only if SD_RESERVE is still set. There 25170 * may have been a call to MHIOCRELEASE before we got here. 25171 */ 25172 mutex_enter(SD_MUTEX(un)); 25173 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25174 /* 25175 * Note: The SD_LOST_RESERVE flag is cleared before 25176 * reclaiming the reservation. If this is done after the 25177 * call to sd_reserve_release a reservation loss in the 25178 * window between pkt completion of reserve cmd and 25179 * mutex_enter below may not be recognized 25180 */ 25181 un->un_resvd_status &= ~SD_LOST_RESERVE; 25182 mutex_exit(SD_MUTEX(un)); 25183 25184 if (sd_reserve_release(sd_mhreq->dev, 25185 SD_RESERVE) == 0) { 25186 mutex_enter(SD_MUTEX(un)); 25187 un->un_resvd_status |= SD_RESERVE; 25188 mutex_exit(SD_MUTEX(un)); 25189 SD_INFO(SD_LOG_IOCTL_MHD, un, 25190 "sd_resv_reclaim_thread: " 25191 "Reservation Recovered\n"); 25192 } else { 25193 mutex_enter(SD_MUTEX(un)); 25194 un->un_resvd_status |= SD_LOST_RESERVE; 25195 mutex_exit(SD_MUTEX(un)); 25196 SD_INFO(SD_LOG_IOCTL_MHD, un, 25197 "sd_resv_reclaim_thread: Failed " 25198 "Reservation Recovery\n"); 25199 } 25200 } else { 25201 mutex_exit(SD_MUTEX(un)); 25202 } 25203 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25204 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25205 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25206 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25207 /* 25208 * wakeup the destroy thread if anyone is waiting on 25209 * us to complete. 25210 */ 25211 cv_signal(&sd_tr.srq_inprocess_cv); 25212 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25213 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25214 } 25215 25216 /* 25217 * cleanup the sd_tr structure now that this thread will not exist 25218 */ 25219 ASSERT(sd_tr.srq_thr_req_head == NULL); 25220 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25221 sd_tr.srq_resv_reclaim_thread = NULL; 25222 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25223 thread_exit(); 25224 } 25225 25226 25227 /* 25228 * Function: sd_rmv_resv_reclaim_req() 25229 * 25230 * Description: This function removes any pending reservation reclaim requests 25231 * for the specified device. 25232 * 25233 * Arguments: dev - the device 'dev_t' 25234 */ 25235 25236 static void 25237 sd_rmv_resv_reclaim_req(dev_t dev) 25238 { 25239 struct sd_thr_request *sd_mhreq; 25240 struct sd_thr_request *sd_prev; 25241 25242 /* Remove a reservation reclaim request from the list */ 25243 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25244 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25245 /* 25246 * We are attempting to reinstate reservation for 25247 * this device. We wait for sd_reserve_release() 25248 * to return before we return. 25249 */ 25250 cv_wait(&sd_tr.srq_inprocess_cv, 25251 &sd_tr.srq_resv_reclaim_mutex); 25252 } else { 25253 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25254 if (sd_mhreq && sd_mhreq->dev == dev) { 25255 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25256 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25257 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25258 return; 25259 } 25260 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25261 if (sd_mhreq && sd_mhreq->dev == dev) { 25262 break; 25263 } 25264 sd_prev = sd_mhreq; 25265 } 25266 if (sd_mhreq != NULL) { 25267 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25268 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25269 } 25270 } 25271 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25272 } 25273 25274 25275 /* 25276 * Function: sd_mhd_reset_notify_cb() 25277 * 25278 * Description: This is a call back function for scsi_reset_notify. This 25279 * function updates the softstate reserved status and logs the 25280 * reset. The driver scsi watch facility callback function 25281 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25282 * will reclaim the reservation. 25283 * 25284 * Arguments: arg - driver soft state (unit) structure 25285 */ 25286 25287 static void 25288 sd_mhd_reset_notify_cb(caddr_t arg) 25289 { 25290 struct sd_lun *un = (struct sd_lun *)arg; 25291 25292 mutex_enter(SD_MUTEX(un)); 25293 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25294 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25295 SD_INFO(SD_LOG_IOCTL_MHD, un, 25296 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25297 } 25298 mutex_exit(SD_MUTEX(un)); 25299 } 25300 25301 25302 /* 25303 * Function: sd_take_ownership() 25304 * 25305 * Description: This routine implements an algorithm to achieve a stable 25306 * reservation on disks which don't implement priority reserve, 25307 * and makes sure that other host lose re-reservation attempts. 25308 * This algorithm contains of a loop that keeps issuing the RESERVE 25309 * for some period of time (min_ownership_delay, default 6 seconds) 25310 * During that loop, it looks to see if there has been a bus device 25311 * reset or bus reset (both of which cause an existing reservation 25312 * to be lost). If the reservation is lost issue RESERVE until a 25313 * period of min_ownership_delay with no resets has gone by, or 25314 * until max_ownership_delay has expired. This loop ensures that 25315 * the host really did manage to reserve the device, in spite of 25316 * resets. The looping for min_ownership_delay (default six 25317 * seconds) is important to early generation clustering products, 25318 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25319 * MHIOCENFAILFAST periodic timer of two seconds. By having 25320 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25321 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25322 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25323 * have already noticed, via the MHIOCENFAILFAST polling, that it 25324 * no longer "owns" the disk and will have panicked itself. Thus, 25325 * the host issuing the MHIOCTKOWN is assured (with timing 25326 * dependencies) that by the time it actually starts to use the 25327 * disk for real work, the old owner is no longer accessing it. 25328 * 25329 * min_ownership_delay is the minimum amount of time for which the 25330 * disk must be reserved continuously devoid of resets before the 25331 * MHIOCTKOWN ioctl will return success. 25332 * 25333 * max_ownership_delay indicates the amount of time by which the 25334 * take ownership should succeed or timeout with an error. 25335 * 25336 * Arguments: dev - the device 'dev_t' 25337 * *p - struct containing timing info. 25338 * 25339 * Return Code: 0 for success or error code 25340 */ 25341 25342 static int 25343 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25344 { 25345 struct sd_lun *un; 25346 int rval; 25347 int err; 25348 int reservation_count = 0; 25349 int min_ownership_delay = 6000000; /* in usec */ 25350 int max_ownership_delay = 30000000; /* in usec */ 25351 clock_t start_time; /* starting time of this algorithm */ 25352 clock_t end_time; /* time limit for giving up */ 25353 clock_t ownership_time; /* time limit for stable ownership */ 25354 clock_t current_time; 25355 clock_t previous_current_time; 25356 25357 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25358 return (ENXIO); 25359 } 25360 25361 /* 25362 * Attempt a device reservation. A priority reservation is requested. 25363 */ 25364 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25365 != SD_SUCCESS) { 25366 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25367 "sd_take_ownership: return(1)=%d\n", rval); 25368 return (rval); 25369 } 25370 25371 /* Update the softstate reserved status to indicate the reservation */ 25372 mutex_enter(SD_MUTEX(un)); 25373 un->un_resvd_status |= SD_RESERVE; 25374 un->un_resvd_status &= 25375 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25376 mutex_exit(SD_MUTEX(un)); 25377 25378 if (p != NULL) { 25379 if (p->min_ownership_delay != 0) { 25380 min_ownership_delay = p->min_ownership_delay * 1000; 25381 } 25382 if (p->max_ownership_delay != 0) { 25383 max_ownership_delay = p->max_ownership_delay * 1000; 25384 } 25385 } 25386 SD_INFO(SD_LOG_IOCTL_MHD, un, 25387 "sd_take_ownership: min, max delays: %d, %d\n", 25388 min_ownership_delay, max_ownership_delay); 25389 25390 start_time = ddi_get_lbolt(); 25391 current_time = start_time; 25392 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25393 end_time = start_time + drv_usectohz(max_ownership_delay); 25394 25395 while (current_time - end_time < 0) { 25396 delay(drv_usectohz(500000)); 25397 25398 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25399 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25400 mutex_enter(SD_MUTEX(un)); 25401 rval = (un->un_resvd_status & 25402 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25403 mutex_exit(SD_MUTEX(un)); 25404 break; 25405 } 25406 } 25407 previous_current_time = current_time; 25408 current_time = ddi_get_lbolt(); 25409 mutex_enter(SD_MUTEX(un)); 25410 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25411 ownership_time = ddi_get_lbolt() + 25412 drv_usectohz(min_ownership_delay); 25413 reservation_count = 0; 25414 } else { 25415 reservation_count++; 25416 } 25417 un->un_resvd_status |= SD_RESERVE; 25418 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25419 mutex_exit(SD_MUTEX(un)); 25420 25421 SD_INFO(SD_LOG_IOCTL_MHD, un, 25422 "sd_take_ownership: ticks for loop iteration=%ld, " 25423 "reservation=%s\n", (current_time - previous_current_time), 25424 reservation_count ? "ok" : "reclaimed"); 25425 25426 if (current_time - ownership_time >= 0 && 25427 reservation_count >= 4) { 25428 rval = 0; /* Achieved a stable ownership */ 25429 break; 25430 } 25431 if (current_time - end_time >= 0) { 25432 rval = EACCES; /* No ownership in max possible time */ 25433 break; 25434 } 25435 } 25436 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25437 "sd_take_ownership: return(2)=%d\n", rval); 25438 return (rval); 25439 } 25440 25441 25442 /* 25443 * Function: sd_reserve_release() 25444 * 25445 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25446 * PRIORITY RESERVE commands based on a user specified command type 25447 * 25448 * Arguments: dev - the device 'dev_t' 25449 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25450 * SD_RESERVE, SD_RELEASE 25451 * 25452 * Return Code: 0 or Error Code 25453 */ 25454 25455 static int 25456 sd_reserve_release(dev_t dev, int cmd) 25457 { 25458 struct uscsi_cmd *com = NULL; 25459 struct sd_lun *un = NULL; 25460 char cdb[CDB_GROUP0]; 25461 int rval; 25462 25463 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25464 (cmd == SD_PRIORITY_RESERVE)); 25465 25466 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25467 return (ENXIO); 25468 } 25469 25470 /* instantiate and initialize the command and cdb */ 25471 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25472 bzero(cdb, CDB_GROUP0); 25473 com->uscsi_flags = USCSI_SILENT; 25474 com->uscsi_timeout = un->un_reserve_release_time; 25475 com->uscsi_cdblen = CDB_GROUP0; 25476 com->uscsi_cdb = cdb; 25477 if (cmd == SD_RELEASE) { 25478 cdb[0] = SCMD_RELEASE; 25479 } else { 25480 cdb[0] = SCMD_RESERVE; 25481 } 25482 25483 /* Send the command. */ 25484 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25485 UIO_SYSSPACE, SD_PATH_STANDARD); 25486 25487 /* 25488 * "break" a reservation that is held by another host, by issuing a 25489 * reset if priority reserve is desired, and we could not get the 25490 * device. 25491 */ 25492 if ((cmd == SD_PRIORITY_RESERVE) && 25493 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25494 /* 25495 * First try to reset the LUN. If we cannot, then try a target 25496 * reset, followed by a bus reset if the target reset fails. 25497 */ 25498 int reset_retval = 0; 25499 if (un->un_f_lun_reset_enabled == TRUE) { 25500 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25501 } 25502 if (reset_retval == 0) { 25503 /* The LUN reset either failed or was not issued */ 25504 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25505 } 25506 if ((reset_retval == 0) && 25507 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25508 rval = EIO; 25509 kmem_free(com, sizeof (*com)); 25510 return (rval); 25511 } 25512 25513 bzero(com, sizeof (struct uscsi_cmd)); 25514 com->uscsi_flags = USCSI_SILENT; 25515 com->uscsi_cdb = cdb; 25516 com->uscsi_cdblen = CDB_GROUP0; 25517 com->uscsi_timeout = 5; 25518 25519 /* 25520 * Reissue the last reserve command, this time without request 25521 * sense. Assume that it is just a regular reserve command. 25522 */ 25523 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25524 UIO_SYSSPACE, SD_PATH_STANDARD); 25525 } 25526 25527 /* Return an error if still getting a reservation conflict. */ 25528 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25529 rval = EACCES; 25530 } 25531 25532 kmem_free(com, sizeof (*com)); 25533 return (rval); 25534 } 25535 25536 25537 #define SD_NDUMP_RETRIES 12 25538 /* 25539 * System Crash Dump routine 25540 */ 25541 25542 static int 25543 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25544 { 25545 int instance; 25546 int partition; 25547 int i; 25548 int err; 25549 struct sd_lun *un; 25550 struct dk_map *lp; 25551 struct scsi_pkt *wr_pktp; 25552 struct buf *wr_bp; 25553 struct buf wr_buf; 25554 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25555 daddr_t tgt_blkno; /* rmw - blkno for target */ 25556 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25557 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25558 size_t io_start_offset; 25559 int doing_rmw = FALSE; 25560 int rval; 25561 #if defined(__i386) || defined(__amd64) 25562 ssize_t dma_resid; 25563 daddr_t oblkno; 25564 #endif 25565 25566 instance = SDUNIT(dev); 25567 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25568 (!un->un_f_geometry_is_valid) || ISCD(un)) { 25569 return (ENXIO); 25570 } 25571 25572 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25573 25574 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25575 25576 partition = SDPART(dev); 25577 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25578 25579 /* Validate blocks to dump at against partition size. */ 25580 lp = &un->un_map[partition]; 25581 if ((blkno + nblk) > lp->dkl_nblk) { 25582 SD_TRACE(SD_LOG_DUMP, un, 25583 "sddump: dump range larger than partition: " 25584 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25585 blkno, nblk, lp->dkl_nblk); 25586 return (EINVAL); 25587 } 25588 25589 mutex_enter(&un->un_pm_mutex); 25590 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25591 struct scsi_pkt *start_pktp; 25592 25593 mutex_exit(&un->un_pm_mutex); 25594 25595 /* 25596 * use pm framework to power on HBA 1st 25597 */ 25598 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25599 25600 /* 25601 * Dump no long uses sdpower to power on a device, it's 25602 * in-line here so it can be done in polled mode. 25603 */ 25604 25605 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25606 25607 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25608 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25609 25610 if (start_pktp == NULL) { 25611 /* We were not given a SCSI packet, fail. */ 25612 return (EIO); 25613 } 25614 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25615 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25616 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25617 start_pktp->pkt_flags = FLAG_NOINTR; 25618 25619 mutex_enter(SD_MUTEX(un)); 25620 SD_FILL_SCSI1_LUN(un, start_pktp); 25621 mutex_exit(SD_MUTEX(un)); 25622 /* 25623 * Scsi_poll returns 0 (success) if the command completes and 25624 * the status block is STATUS_GOOD. 25625 */ 25626 if (sd_scsi_poll(un, start_pktp) != 0) { 25627 scsi_destroy_pkt(start_pktp); 25628 return (EIO); 25629 } 25630 scsi_destroy_pkt(start_pktp); 25631 (void) sd_ddi_pm_resume(un); 25632 } else { 25633 mutex_exit(&un->un_pm_mutex); 25634 } 25635 25636 mutex_enter(SD_MUTEX(un)); 25637 un->un_throttle = 0; 25638 25639 /* 25640 * The first time through, reset the specific target device. 25641 * However, when cpr calls sddump we know that sd is in a 25642 * a good state so no bus reset is required. 25643 * Clear sense data via Request Sense cmd. 25644 * In sddump we don't care about allow_bus_device_reset anymore 25645 */ 25646 25647 if ((un->un_state != SD_STATE_SUSPENDED) && 25648 (un->un_state != SD_STATE_DUMPING)) { 25649 25650 New_state(un, SD_STATE_DUMPING); 25651 25652 if (un->un_f_is_fibre == FALSE) { 25653 mutex_exit(SD_MUTEX(un)); 25654 /* 25655 * Attempt a bus reset for parallel scsi. 25656 * 25657 * Note: A bus reset is required because on some host 25658 * systems (i.e. E420R) a bus device reset is 25659 * insufficient to reset the state of the target. 25660 * 25661 * Note: Don't issue the reset for fibre-channel, 25662 * because this tends to hang the bus (loop) for 25663 * too long while everyone is logging out and in 25664 * and the deadman timer for dumping will fire 25665 * before the dump is complete. 25666 */ 25667 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25668 mutex_enter(SD_MUTEX(un)); 25669 Restore_state(un); 25670 mutex_exit(SD_MUTEX(un)); 25671 return (EIO); 25672 } 25673 25674 /* Delay to give the device some recovery time. */ 25675 drv_usecwait(10000); 25676 25677 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25678 SD_INFO(SD_LOG_DUMP, un, 25679 "sddump: sd_send_polled_RQS failed\n"); 25680 } 25681 mutex_enter(SD_MUTEX(un)); 25682 } 25683 } 25684 25685 /* 25686 * Convert the partition-relative block number to a 25687 * disk physical block number. 25688 */ 25689 blkno += un->un_offset[partition]; 25690 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25691 25692 25693 /* 25694 * Check if the device has a non-512 block size. 25695 */ 25696 wr_bp = NULL; 25697 if (NOT_DEVBSIZE(un)) { 25698 tgt_byte_offset = blkno * un->un_sys_blocksize; 25699 tgt_byte_count = nblk * un->un_sys_blocksize; 25700 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25701 (tgt_byte_count % un->un_tgt_blocksize)) { 25702 doing_rmw = TRUE; 25703 /* 25704 * Calculate the block number and number of block 25705 * in terms of the media block size. 25706 */ 25707 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25708 tgt_nblk = 25709 ((tgt_byte_offset + tgt_byte_count + 25710 (un->un_tgt_blocksize - 1)) / 25711 un->un_tgt_blocksize) - tgt_blkno; 25712 25713 /* 25714 * Invoke the routine which is going to do read part 25715 * of read-modify-write. 25716 * Note that this routine returns a pointer to 25717 * a valid bp in wr_bp. 25718 */ 25719 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25720 &wr_bp); 25721 if (err) { 25722 mutex_exit(SD_MUTEX(un)); 25723 return (err); 25724 } 25725 /* 25726 * Offset is being calculated as - 25727 * (original block # * system block size) - 25728 * (new block # * target block size) 25729 */ 25730 io_start_offset = 25731 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25732 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25733 25734 ASSERT((io_start_offset >= 0) && 25735 (io_start_offset < un->un_tgt_blocksize)); 25736 /* 25737 * Do the modify portion of read modify write. 25738 */ 25739 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25740 (size_t)nblk * un->un_sys_blocksize); 25741 } else { 25742 doing_rmw = FALSE; 25743 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25744 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25745 } 25746 25747 /* Convert blkno and nblk to target blocks */ 25748 blkno = tgt_blkno; 25749 nblk = tgt_nblk; 25750 } else { 25751 wr_bp = &wr_buf; 25752 bzero(wr_bp, sizeof (struct buf)); 25753 wr_bp->b_flags = B_BUSY; 25754 wr_bp->b_un.b_addr = addr; 25755 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25756 wr_bp->b_resid = 0; 25757 } 25758 25759 mutex_exit(SD_MUTEX(un)); 25760 25761 /* 25762 * Obtain a SCSI packet for the write command. 25763 * It should be safe to call the allocator here without 25764 * worrying about being locked for DVMA mapping because 25765 * the address we're passed is already a DVMA mapping 25766 * 25767 * We are also not going to worry about semaphore ownership 25768 * in the dump buffer. Dumping is single threaded at present. 25769 */ 25770 25771 wr_pktp = NULL; 25772 25773 #if defined(__i386) || defined(__amd64) 25774 dma_resid = wr_bp->b_bcount; 25775 oblkno = blkno; 25776 while (dma_resid != 0) { 25777 #endif 25778 25779 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25780 wr_bp->b_flags &= ~B_ERROR; 25781 25782 #if defined(__i386) || defined(__amd64) 25783 blkno = oblkno + 25784 ((wr_bp->b_bcount - dma_resid) / 25785 un->un_tgt_blocksize); 25786 nblk = dma_resid / un->un_tgt_blocksize; 25787 25788 if (wr_pktp) { 25789 /* Partial DMA transfers after initial transfer */ 25790 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25791 blkno, nblk); 25792 } else { 25793 /* Initial transfer */ 25794 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25795 un->un_pkt_flags, NULL_FUNC, NULL, 25796 blkno, nblk); 25797 } 25798 #else 25799 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25800 0, NULL_FUNC, NULL, blkno, nblk); 25801 #endif 25802 25803 if (rval == 0) { 25804 /* We were given a SCSI packet, continue. */ 25805 break; 25806 } 25807 25808 if (i == 0) { 25809 if (wr_bp->b_flags & B_ERROR) { 25810 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25811 "no resources for dumping; " 25812 "error code: 0x%x, retrying", 25813 geterror(wr_bp)); 25814 } else { 25815 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25816 "no resources for dumping; retrying"); 25817 } 25818 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25819 if (wr_bp->b_flags & B_ERROR) { 25820 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25821 "no resources for dumping; error code: " 25822 "0x%x, retrying\n", geterror(wr_bp)); 25823 } 25824 } else { 25825 if (wr_bp->b_flags & B_ERROR) { 25826 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25827 "no resources for dumping; " 25828 "error code: 0x%x, retries failed, " 25829 "giving up.\n", geterror(wr_bp)); 25830 } else { 25831 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25832 "no resources for dumping; " 25833 "retries failed, giving up.\n"); 25834 } 25835 mutex_enter(SD_MUTEX(un)); 25836 Restore_state(un); 25837 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25838 mutex_exit(SD_MUTEX(un)); 25839 scsi_free_consistent_buf(wr_bp); 25840 } else { 25841 mutex_exit(SD_MUTEX(un)); 25842 } 25843 return (EIO); 25844 } 25845 drv_usecwait(10000); 25846 } 25847 25848 #if defined(__i386) || defined(__amd64) 25849 /* 25850 * save the resid from PARTIAL_DMA 25851 */ 25852 dma_resid = wr_pktp->pkt_resid; 25853 if (dma_resid != 0) 25854 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25855 wr_pktp->pkt_resid = 0; 25856 #endif 25857 25858 /* SunBug 1222170 */ 25859 wr_pktp->pkt_flags = FLAG_NOINTR; 25860 25861 err = EIO; 25862 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25863 25864 /* 25865 * Scsi_poll returns 0 (success) if the command completes and 25866 * the status block is STATUS_GOOD. We should only check 25867 * errors if this condition is not true. Even then we should 25868 * send our own request sense packet only if we have a check 25869 * condition and auto request sense has not been performed by 25870 * the hba. 25871 */ 25872 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25873 25874 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25875 (wr_pktp->pkt_resid == 0)) { 25876 err = SD_SUCCESS; 25877 break; 25878 } 25879 25880 /* 25881 * Check CMD_DEV_GONE 1st, give up if device is gone. 25882 */ 25883 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25884 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25885 "Device is gone\n"); 25886 break; 25887 } 25888 25889 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25890 SD_INFO(SD_LOG_DUMP, un, 25891 "sddump: write failed with CHECK, try # %d\n", i); 25892 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25893 (void) sd_send_polled_RQS(un); 25894 } 25895 25896 continue; 25897 } 25898 25899 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25900 int reset_retval = 0; 25901 25902 SD_INFO(SD_LOG_DUMP, un, 25903 "sddump: write failed with BUSY, try # %d\n", i); 25904 25905 if (un->un_f_lun_reset_enabled == TRUE) { 25906 reset_retval = scsi_reset(SD_ADDRESS(un), 25907 RESET_LUN); 25908 } 25909 if (reset_retval == 0) { 25910 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25911 } 25912 (void) sd_send_polled_RQS(un); 25913 25914 } else { 25915 SD_INFO(SD_LOG_DUMP, un, 25916 "sddump: write failed with 0x%x, try # %d\n", 25917 SD_GET_PKT_STATUS(wr_pktp), i); 25918 mutex_enter(SD_MUTEX(un)); 25919 sd_reset_target(un, wr_pktp); 25920 mutex_exit(SD_MUTEX(un)); 25921 } 25922 25923 /* 25924 * If we are not getting anywhere with lun/target resets, 25925 * let's reset the bus. 25926 */ 25927 if (i == SD_NDUMP_RETRIES/2) { 25928 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25929 (void) sd_send_polled_RQS(un); 25930 } 25931 25932 } 25933 #if defined(__i386) || defined(__amd64) 25934 } /* dma_resid */ 25935 #endif 25936 25937 scsi_destroy_pkt(wr_pktp); 25938 mutex_enter(SD_MUTEX(un)); 25939 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25940 mutex_exit(SD_MUTEX(un)); 25941 scsi_free_consistent_buf(wr_bp); 25942 } else { 25943 mutex_exit(SD_MUTEX(un)); 25944 } 25945 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25946 return (err); 25947 } 25948 25949 /* 25950 * Function: sd_scsi_poll() 25951 * 25952 * Description: This is a wrapper for the scsi_poll call. 25953 * 25954 * Arguments: sd_lun - The unit structure 25955 * scsi_pkt - The scsi packet being sent to the device. 25956 * 25957 * Return Code: 0 - Command completed successfully with good status 25958 * -1 - Command failed. This could indicate a check condition 25959 * or other status value requiring recovery action. 25960 * 25961 */ 25962 25963 static int 25964 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25965 { 25966 int status; 25967 25968 ASSERT(un != NULL); 25969 ASSERT(!mutex_owned(SD_MUTEX(un))); 25970 ASSERT(pktp != NULL); 25971 25972 status = SD_SUCCESS; 25973 25974 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25975 pktp->pkt_flags |= un->un_tagflags; 25976 pktp->pkt_flags &= ~FLAG_NODISCON; 25977 } 25978 25979 status = sd_ddi_scsi_poll(pktp); 25980 /* 25981 * Scsi_poll returns 0 (success) if the command completes and the 25982 * status block is STATUS_GOOD. We should only check errors if this 25983 * condition is not true. Even then we should send our own request 25984 * sense packet only if we have a check condition and auto 25985 * request sense has not been performed by the hba. 25986 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25987 */ 25988 if ((status != SD_SUCCESS) && 25989 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25990 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25991 (pktp->pkt_reason != CMD_DEV_GONE)) 25992 (void) sd_send_polled_RQS(un); 25993 25994 return (status); 25995 } 25996 25997 /* 25998 * Function: sd_send_polled_RQS() 25999 * 26000 * Description: This sends the request sense command to a device. 26001 * 26002 * Arguments: sd_lun - The unit structure 26003 * 26004 * Return Code: 0 - Command completed successfully with good status 26005 * -1 - Command failed. 26006 * 26007 */ 26008 26009 static int 26010 sd_send_polled_RQS(struct sd_lun *un) 26011 { 26012 int ret_val; 26013 struct scsi_pkt *rqs_pktp; 26014 struct buf *rqs_bp; 26015 26016 ASSERT(un != NULL); 26017 ASSERT(!mutex_owned(SD_MUTEX(un))); 26018 26019 ret_val = SD_SUCCESS; 26020 26021 rqs_pktp = un->un_rqs_pktp; 26022 rqs_bp = un->un_rqs_bp; 26023 26024 mutex_enter(SD_MUTEX(un)); 26025 26026 if (un->un_sense_isbusy) { 26027 ret_val = SD_FAILURE; 26028 mutex_exit(SD_MUTEX(un)); 26029 return (ret_val); 26030 } 26031 26032 /* 26033 * If the request sense buffer (and packet) is not in use, 26034 * let's set the un_sense_isbusy and send our packet 26035 */ 26036 un->un_sense_isbusy = 1; 26037 rqs_pktp->pkt_resid = 0; 26038 rqs_pktp->pkt_reason = 0; 26039 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26040 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26041 26042 mutex_exit(SD_MUTEX(un)); 26043 26044 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26045 " 0x%p\n", rqs_bp->b_un.b_addr); 26046 26047 /* 26048 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26049 * axle - it has a call into us! 26050 */ 26051 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26052 SD_INFO(SD_LOG_COMMON, un, 26053 "sd_send_polled_RQS: RQS failed\n"); 26054 } 26055 26056 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26057 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26058 26059 mutex_enter(SD_MUTEX(un)); 26060 un->un_sense_isbusy = 0; 26061 mutex_exit(SD_MUTEX(un)); 26062 26063 return (ret_val); 26064 } 26065 26066 /* 26067 * Defines needed for localized version of the scsi_poll routine. 26068 */ 26069 #define SD_CSEC 10000 /* usecs */ 26070 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 26071 26072 26073 /* 26074 * Function: sd_ddi_scsi_poll() 26075 * 26076 * Description: Localized version of the scsi_poll routine. The purpose is to 26077 * send a scsi_pkt to a device as a polled command. This version 26078 * is to ensure more robust handling of transport errors. 26079 * Specifically this routine cures not ready, coming ready 26080 * transition for power up and reset of sonoma's. This can take 26081 * up to 45 seconds for power-on and 20 seconds for reset of a 26082 * sonoma lun. 26083 * 26084 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26085 * 26086 * Return Code: 0 - Command completed successfully with good status 26087 * -1 - Command failed. 26088 * 26089 */ 26090 26091 static int 26092 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26093 { 26094 int busy_count; 26095 int timeout; 26096 int rval = SD_FAILURE; 26097 int savef; 26098 struct scsi_extended_sense *sensep; 26099 long savet; 26100 void (*savec)(); 26101 /* 26102 * The following is defined in machdep.c and is used in determining if 26103 * the scsi transport system will do polled I/O instead of interrupt 26104 * I/O when called from xx_dump(). 26105 */ 26106 extern int do_polled_io; 26107 26108 /* 26109 * save old flags in pkt, to restore at end 26110 */ 26111 savef = pkt->pkt_flags; 26112 savec = pkt->pkt_comp; 26113 savet = pkt->pkt_time; 26114 26115 pkt->pkt_flags |= FLAG_NOINTR; 26116 26117 /* 26118 * XXX there is nothing in the SCSA spec that states that we should not 26119 * do a callback for polled cmds; however, removing this will break sd 26120 * and probably other target drivers 26121 */ 26122 pkt->pkt_comp = NULL; 26123 26124 /* 26125 * we don't like a polled command without timeout. 26126 * 60 seconds seems long enough. 26127 */ 26128 if (pkt->pkt_time == 0) { 26129 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26130 } 26131 26132 /* 26133 * Send polled cmd. 26134 * 26135 * We do some error recovery for various errors. Tran_busy, 26136 * queue full, and non-dispatched commands are retried every 10 msec. 26137 * as they are typically transient failures. Busy status and Not 26138 * Ready are retried every second as this status takes a while to 26139 * change. Unit attention is retried for pkt_time (60) times 26140 * with no delay. 26141 */ 26142 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 26143 26144 for (busy_count = 0; busy_count < timeout; busy_count++) { 26145 int rc; 26146 int poll_delay; 26147 26148 /* 26149 * Initialize pkt status variables. 26150 */ 26151 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26152 26153 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26154 if (rc != TRAN_BUSY) { 26155 /* Transport failed - give up. */ 26156 break; 26157 } else { 26158 /* Transport busy - try again. */ 26159 poll_delay = 1 * SD_CSEC; /* 10 msec */ 26160 } 26161 } else { 26162 /* 26163 * Transport accepted - check pkt status. 26164 */ 26165 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26166 if (pkt->pkt_reason == CMD_CMPLT && 26167 rc == STATUS_CHECK && 26168 pkt->pkt_state & STATE_ARQ_DONE) { 26169 struct scsi_arq_status *arqstat = 26170 (struct scsi_arq_status *)(pkt->pkt_scbp); 26171 26172 sensep = &arqstat->sts_sensedata; 26173 } else { 26174 sensep = NULL; 26175 } 26176 26177 if ((pkt->pkt_reason == CMD_CMPLT) && 26178 (rc == STATUS_GOOD)) { 26179 /* No error - we're done */ 26180 rval = SD_SUCCESS; 26181 break; 26182 26183 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26184 /* Lost connection - give up */ 26185 break; 26186 26187 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26188 (pkt->pkt_state == 0)) { 26189 /* Pkt not dispatched - try again. */ 26190 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26191 26192 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26193 (rc == STATUS_QFULL)) { 26194 /* Queue full - try again. */ 26195 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26196 26197 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26198 (rc == STATUS_BUSY)) { 26199 /* Busy - try again. */ 26200 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26201 busy_count += (SD_SEC_TO_CSEC - 1); 26202 26203 } else if ((sensep != NULL) && 26204 (sensep->es_key == KEY_UNIT_ATTENTION)) { 26205 /* Unit Attention - try again */ 26206 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 26207 continue; 26208 26209 } else if ((sensep != NULL) && 26210 (sensep->es_key == KEY_NOT_READY) && 26211 (sensep->es_add_code == 0x04) && 26212 (sensep->es_qual_code == 0x01)) { 26213 /* Not ready -> ready - try again. */ 26214 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26215 busy_count += (SD_SEC_TO_CSEC - 1); 26216 26217 } else { 26218 /* BAD status - give up. */ 26219 break; 26220 } 26221 } 26222 26223 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 26224 !do_polled_io) { 26225 delay(drv_usectohz(poll_delay)); 26226 } else { 26227 /* we busy wait during cpr_dump or interrupt threads */ 26228 drv_usecwait(poll_delay); 26229 } 26230 } 26231 26232 pkt->pkt_flags = savef; 26233 pkt->pkt_comp = savec; 26234 pkt->pkt_time = savet; 26235 return (rval); 26236 } 26237 26238 26239 /* 26240 * Function: sd_persistent_reservation_in_read_keys 26241 * 26242 * Description: This routine is the driver entry point for handling CD-ROM 26243 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26244 * by sending the SCSI-3 PRIN commands to the device. 26245 * Processes the read keys command response by copying the 26246 * reservation key information into the user provided buffer. 26247 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26248 * 26249 * Arguments: un - Pointer to soft state struct for the target. 26250 * usrp - user provided pointer to multihost Persistent In Read 26251 * Keys structure (mhioc_inkeys_t) 26252 * flag - this argument is a pass through to ddi_copyxxx() 26253 * directly from the mode argument of ioctl(). 26254 * 26255 * Return Code: 0 - Success 26256 * EACCES 26257 * ENOTSUP 26258 * errno return code from sd_send_scsi_cmd() 26259 * 26260 * Context: Can sleep. Does not return until command is completed. 26261 */ 26262 26263 static int 26264 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26265 mhioc_inkeys_t *usrp, int flag) 26266 { 26267 #ifdef _MULTI_DATAMODEL 26268 struct mhioc_key_list32 li32; 26269 #endif 26270 sd_prin_readkeys_t *in; 26271 mhioc_inkeys_t *ptr; 26272 mhioc_key_list_t li; 26273 uchar_t *data_bufp; 26274 int data_len; 26275 int rval; 26276 size_t copysz; 26277 26278 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26279 return (EINVAL); 26280 } 26281 bzero(&li, sizeof (mhioc_key_list_t)); 26282 26283 /* 26284 * Get the listsize from user 26285 */ 26286 #ifdef _MULTI_DATAMODEL 26287 26288 switch (ddi_model_convert_from(flag & FMODELS)) { 26289 case DDI_MODEL_ILP32: 26290 copysz = sizeof (struct mhioc_key_list32); 26291 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26292 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26293 "sd_persistent_reservation_in_read_keys: " 26294 "failed ddi_copyin: mhioc_key_list32_t\n"); 26295 rval = EFAULT; 26296 goto done; 26297 } 26298 li.listsize = li32.listsize; 26299 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26300 break; 26301 26302 case DDI_MODEL_NONE: 26303 copysz = sizeof (mhioc_key_list_t); 26304 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26305 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26306 "sd_persistent_reservation_in_read_keys: " 26307 "failed ddi_copyin: mhioc_key_list_t\n"); 26308 rval = EFAULT; 26309 goto done; 26310 } 26311 break; 26312 } 26313 26314 #else /* ! _MULTI_DATAMODEL */ 26315 copysz = sizeof (mhioc_key_list_t); 26316 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26317 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26318 "sd_persistent_reservation_in_read_keys: " 26319 "failed ddi_copyin: mhioc_key_list_t\n"); 26320 rval = EFAULT; 26321 goto done; 26322 } 26323 #endif 26324 26325 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26326 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26327 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26328 26329 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 26330 data_len, data_bufp)) != 0) { 26331 goto done; 26332 } 26333 in = (sd_prin_readkeys_t *)data_bufp; 26334 ptr->generation = BE_32(in->generation); 26335 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26336 26337 /* 26338 * Return the min(listsize, listlen) keys 26339 */ 26340 #ifdef _MULTI_DATAMODEL 26341 26342 switch (ddi_model_convert_from(flag & FMODELS)) { 26343 case DDI_MODEL_ILP32: 26344 li32.listlen = li.listlen; 26345 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26346 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26347 "sd_persistent_reservation_in_read_keys: " 26348 "failed ddi_copyout: mhioc_key_list32_t\n"); 26349 rval = EFAULT; 26350 goto done; 26351 } 26352 break; 26353 26354 case DDI_MODEL_NONE: 26355 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26356 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26357 "sd_persistent_reservation_in_read_keys: " 26358 "failed ddi_copyout: mhioc_key_list_t\n"); 26359 rval = EFAULT; 26360 goto done; 26361 } 26362 break; 26363 } 26364 26365 #else /* ! _MULTI_DATAMODEL */ 26366 26367 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26368 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26369 "sd_persistent_reservation_in_read_keys: " 26370 "failed ddi_copyout: mhioc_key_list_t\n"); 26371 rval = EFAULT; 26372 goto done; 26373 } 26374 26375 #endif /* _MULTI_DATAMODEL */ 26376 26377 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26378 li.listsize * MHIOC_RESV_KEY_SIZE); 26379 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26380 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26381 "sd_persistent_reservation_in_read_keys: " 26382 "failed ddi_copyout: keylist\n"); 26383 rval = EFAULT; 26384 } 26385 done: 26386 kmem_free(data_bufp, data_len); 26387 return (rval); 26388 } 26389 26390 26391 /* 26392 * Function: sd_persistent_reservation_in_read_resv 26393 * 26394 * Description: This routine is the driver entry point for handling CD-ROM 26395 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26396 * by sending the SCSI-3 PRIN commands to the device. 26397 * Process the read persistent reservations command response by 26398 * copying the reservation information into the user provided 26399 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26400 * 26401 * Arguments: un - Pointer to soft state struct for the target. 26402 * usrp - user provided pointer to multihost Persistent In Read 26403 * Keys structure (mhioc_inkeys_t) 26404 * flag - this argument is a pass through to ddi_copyxxx() 26405 * directly from the mode argument of ioctl(). 26406 * 26407 * Return Code: 0 - Success 26408 * EACCES 26409 * ENOTSUP 26410 * errno return code from sd_send_scsi_cmd() 26411 * 26412 * Context: Can sleep. Does not return until command is completed. 26413 */ 26414 26415 static int 26416 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26417 mhioc_inresvs_t *usrp, int flag) 26418 { 26419 #ifdef _MULTI_DATAMODEL 26420 struct mhioc_resv_desc_list32 resvlist32; 26421 #endif 26422 sd_prin_readresv_t *in; 26423 mhioc_inresvs_t *ptr; 26424 sd_readresv_desc_t *readresv_ptr; 26425 mhioc_resv_desc_list_t resvlist; 26426 mhioc_resv_desc_t resvdesc; 26427 uchar_t *data_bufp; 26428 int data_len; 26429 int rval; 26430 int i; 26431 size_t copysz; 26432 mhioc_resv_desc_t *bufp; 26433 26434 if ((ptr = usrp) == NULL) { 26435 return (EINVAL); 26436 } 26437 26438 /* 26439 * Get the listsize from user 26440 */ 26441 #ifdef _MULTI_DATAMODEL 26442 switch (ddi_model_convert_from(flag & FMODELS)) { 26443 case DDI_MODEL_ILP32: 26444 copysz = sizeof (struct mhioc_resv_desc_list32); 26445 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26446 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26447 "sd_persistent_reservation_in_read_resv: " 26448 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26449 rval = EFAULT; 26450 goto done; 26451 } 26452 resvlist.listsize = resvlist32.listsize; 26453 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26454 break; 26455 26456 case DDI_MODEL_NONE: 26457 copysz = sizeof (mhioc_resv_desc_list_t); 26458 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26459 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26460 "sd_persistent_reservation_in_read_resv: " 26461 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26462 rval = EFAULT; 26463 goto done; 26464 } 26465 break; 26466 } 26467 #else /* ! _MULTI_DATAMODEL */ 26468 copysz = sizeof (mhioc_resv_desc_list_t); 26469 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26470 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26471 "sd_persistent_reservation_in_read_resv: " 26472 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26473 rval = EFAULT; 26474 goto done; 26475 } 26476 #endif /* ! _MULTI_DATAMODEL */ 26477 26478 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26479 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26480 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26481 26482 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 26483 data_len, data_bufp)) != 0) { 26484 goto done; 26485 } 26486 in = (sd_prin_readresv_t *)data_bufp; 26487 ptr->generation = BE_32(in->generation); 26488 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26489 26490 /* 26491 * Return the min(listsize, listlen( keys 26492 */ 26493 #ifdef _MULTI_DATAMODEL 26494 26495 switch (ddi_model_convert_from(flag & FMODELS)) { 26496 case DDI_MODEL_ILP32: 26497 resvlist32.listlen = resvlist.listlen; 26498 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26499 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26500 "sd_persistent_reservation_in_read_resv: " 26501 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26502 rval = EFAULT; 26503 goto done; 26504 } 26505 break; 26506 26507 case DDI_MODEL_NONE: 26508 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26509 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26510 "sd_persistent_reservation_in_read_resv: " 26511 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26512 rval = EFAULT; 26513 goto done; 26514 } 26515 break; 26516 } 26517 26518 #else /* ! _MULTI_DATAMODEL */ 26519 26520 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26521 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26522 "sd_persistent_reservation_in_read_resv: " 26523 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26524 rval = EFAULT; 26525 goto done; 26526 } 26527 26528 #endif /* ! _MULTI_DATAMODEL */ 26529 26530 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26531 bufp = resvlist.list; 26532 copysz = sizeof (mhioc_resv_desc_t); 26533 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26534 i++, readresv_ptr++, bufp++) { 26535 26536 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26537 MHIOC_RESV_KEY_SIZE); 26538 resvdesc.type = readresv_ptr->type; 26539 resvdesc.scope = readresv_ptr->scope; 26540 resvdesc.scope_specific_addr = 26541 BE_32(readresv_ptr->scope_specific_addr); 26542 26543 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26544 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26545 "sd_persistent_reservation_in_read_resv: " 26546 "failed ddi_copyout: resvlist\n"); 26547 rval = EFAULT; 26548 goto done; 26549 } 26550 } 26551 done: 26552 kmem_free(data_bufp, data_len); 26553 return (rval); 26554 } 26555 26556 26557 /* 26558 * Function: sr_change_blkmode() 26559 * 26560 * Description: This routine is the driver entry point for handling CD-ROM 26561 * block mode ioctl requests. Support for returning and changing 26562 * the current block size in use by the device is implemented. The 26563 * LBA size is changed via a MODE SELECT Block Descriptor. 26564 * 26565 * This routine issues a mode sense with an allocation length of 26566 * 12 bytes for the mode page header and a single block descriptor. 26567 * 26568 * Arguments: dev - the device 'dev_t' 26569 * cmd - the request type; one of CDROMGBLKMODE (get) or 26570 * CDROMSBLKMODE (set) 26571 * data - current block size or requested block size 26572 * flag - this argument is a pass through to ddi_copyxxx() directly 26573 * from the mode argument of ioctl(). 26574 * 26575 * Return Code: the code returned by sd_send_scsi_cmd() 26576 * EINVAL if invalid arguments are provided 26577 * EFAULT if ddi_copyxxx() fails 26578 * ENXIO if fail ddi_get_soft_state 26579 * EIO if invalid mode sense block descriptor length 26580 * 26581 */ 26582 26583 static int 26584 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26585 { 26586 struct sd_lun *un = NULL; 26587 struct mode_header *sense_mhp, *select_mhp; 26588 struct block_descriptor *sense_desc, *select_desc; 26589 int current_bsize; 26590 int rval = EINVAL; 26591 uchar_t *sense = NULL; 26592 uchar_t *select = NULL; 26593 26594 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26595 26596 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26597 return (ENXIO); 26598 } 26599 26600 /* 26601 * The block length is changed via the Mode Select block descriptor, the 26602 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26603 * required as part of this routine. Therefore the mode sense allocation 26604 * length is specified to be the length of a mode page header and a 26605 * block descriptor. 26606 */ 26607 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26608 26609 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26610 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 26611 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26612 "sr_change_blkmode: Mode Sense Failed\n"); 26613 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26614 return (rval); 26615 } 26616 26617 /* Check the block descriptor len to handle only 1 block descriptor */ 26618 sense_mhp = (struct mode_header *)sense; 26619 if ((sense_mhp->bdesc_length == 0) || 26620 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26621 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26622 "sr_change_blkmode: Mode Sense returned invalid block" 26623 " descriptor length\n"); 26624 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26625 return (EIO); 26626 } 26627 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26628 current_bsize = ((sense_desc->blksize_hi << 16) | 26629 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26630 26631 /* Process command */ 26632 switch (cmd) { 26633 case CDROMGBLKMODE: 26634 /* Return the block size obtained during the mode sense */ 26635 if (ddi_copyout(¤t_bsize, (void *)data, 26636 sizeof (int), flag) != 0) 26637 rval = EFAULT; 26638 break; 26639 case CDROMSBLKMODE: 26640 /* Validate the requested block size */ 26641 switch (data) { 26642 case CDROM_BLK_512: 26643 case CDROM_BLK_1024: 26644 case CDROM_BLK_2048: 26645 case CDROM_BLK_2056: 26646 case CDROM_BLK_2336: 26647 case CDROM_BLK_2340: 26648 case CDROM_BLK_2352: 26649 case CDROM_BLK_2368: 26650 case CDROM_BLK_2448: 26651 case CDROM_BLK_2646: 26652 case CDROM_BLK_2647: 26653 break; 26654 default: 26655 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26656 "sr_change_blkmode: " 26657 "Block Size '%ld' Not Supported\n", data); 26658 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26659 return (EINVAL); 26660 } 26661 26662 /* 26663 * The current block size matches the requested block size so 26664 * there is no need to send the mode select to change the size 26665 */ 26666 if (current_bsize == data) { 26667 break; 26668 } 26669 26670 /* Build the select data for the requested block size */ 26671 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26672 select_mhp = (struct mode_header *)select; 26673 select_desc = 26674 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26675 /* 26676 * The LBA size is changed via the block descriptor, so the 26677 * descriptor is built according to the user data 26678 */ 26679 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26680 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26681 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26682 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26683 26684 /* Send the mode select for the requested block size */ 26685 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26686 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26687 SD_PATH_STANDARD)) != 0) { 26688 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26689 "sr_change_blkmode: Mode Select Failed\n"); 26690 /* 26691 * The mode select failed for the requested block size, 26692 * so reset the data for the original block size and 26693 * send it to the target. The error is indicated by the 26694 * return value for the failed mode select. 26695 */ 26696 select_desc->blksize_hi = sense_desc->blksize_hi; 26697 select_desc->blksize_mid = sense_desc->blksize_mid; 26698 select_desc->blksize_lo = sense_desc->blksize_lo; 26699 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26700 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26701 SD_PATH_STANDARD); 26702 } else { 26703 ASSERT(!mutex_owned(SD_MUTEX(un))); 26704 mutex_enter(SD_MUTEX(un)); 26705 sd_update_block_info(un, (uint32_t)data, 0); 26706 26707 mutex_exit(SD_MUTEX(un)); 26708 } 26709 break; 26710 default: 26711 /* should not reach here, but check anyway */ 26712 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26713 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26714 rval = EINVAL; 26715 break; 26716 } 26717 26718 if (select) { 26719 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26720 } 26721 if (sense) { 26722 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26723 } 26724 return (rval); 26725 } 26726 26727 26728 /* 26729 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26730 * implement driver support for getting and setting the CD speed. The command 26731 * set used will be based on the device type. If the device has not been 26732 * identified as MMC the Toshiba vendor specific mode page will be used. If 26733 * the device is MMC but does not support the Real Time Streaming feature 26734 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26735 * be used to read the speed. 26736 */ 26737 26738 /* 26739 * Function: sr_change_speed() 26740 * 26741 * Description: This routine is the driver entry point for handling CD-ROM 26742 * drive speed ioctl requests for devices supporting the Toshiba 26743 * vendor specific drive speed mode page. Support for returning 26744 * and changing the current drive speed in use by the device is 26745 * implemented. 26746 * 26747 * Arguments: dev - the device 'dev_t' 26748 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26749 * CDROMSDRVSPEED (set) 26750 * data - current drive speed or requested drive speed 26751 * flag - this argument is a pass through to ddi_copyxxx() directly 26752 * from the mode argument of ioctl(). 26753 * 26754 * Return Code: the code returned by sd_send_scsi_cmd() 26755 * EINVAL if invalid arguments are provided 26756 * EFAULT if ddi_copyxxx() fails 26757 * ENXIO if fail ddi_get_soft_state 26758 * EIO if invalid mode sense block descriptor length 26759 */ 26760 26761 static int 26762 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26763 { 26764 struct sd_lun *un = NULL; 26765 struct mode_header *sense_mhp, *select_mhp; 26766 struct mode_speed *sense_page, *select_page; 26767 int current_speed; 26768 int rval = EINVAL; 26769 int bd_len; 26770 uchar_t *sense = NULL; 26771 uchar_t *select = NULL; 26772 26773 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26774 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26775 return (ENXIO); 26776 } 26777 26778 /* 26779 * Note: The drive speed is being modified here according to a Toshiba 26780 * vendor specific mode page (0x31). 26781 */ 26782 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26783 26784 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26785 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26786 SD_PATH_STANDARD)) != 0) { 26787 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26788 "sr_change_speed: Mode Sense Failed\n"); 26789 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26790 return (rval); 26791 } 26792 sense_mhp = (struct mode_header *)sense; 26793 26794 /* Check the block descriptor len to handle only 1 block descriptor */ 26795 bd_len = sense_mhp->bdesc_length; 26796 if (bd_len > MODE_BLK_DESC_LENGTH) { 26797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26798 "sr_change_speed: Mode Sense returned invalid block " 26799 "descriptor length\n"); 26800 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26801 return (EIO); 26802 } 26803 26804 sense_page = (struct mode_speed *) 26805 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26806 current_speed = sense_page->speed; 26807 26808 /* Process command */ 26809 switch (cmd) { 26810 case CDROMGDRVSPEED: 26811 /* Return the drive speed obtained during the mode sense */ 26812 if (current_speed == 0x2) { 26813 current_speed = CDROM_TWELVE_SPEED; 26814 } 26815 if (ddi_copyout(¤t_speed, (void *)data, 26816 sizeof (int), flag) != 0) { 26817 rval = EFAULT; 26818 } 26819 break; 26820 case CDROMSDRVSPEED: 26821 /* Validate the requested drive speed */ 26822 switch ((uchar_t)data) { 26823 case CDROM_TWELVE_SPEED: 26824 data = 0x2; 26825 /*FALLTHROUGH*/ 26826 case CDROM_NORMAL_SPEED: 26827 case CDROM_DOUBLE_SPEED: 26828 case CDROM_QUAD_SPEED: 26829 case CDROM_MAXIMUM_SPEED: 26830 break; 26831 default: 26832 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26833 "sr_change_speed: " 26834 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26835 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26836 return (EINVAL); 26837 } 26838 26839 /* 26840 * The current drive speed matches the requested drive speed so 26841 * there is no need to send the mode select to change the speed 26842 */ 26843 if (current_speed == data) { 26844 break; 26845 } 26846 26847 /* Build the select data for the requested drive speed */ 26848 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26849 select_mhp = (struct mode_header *)select; 26850 select_mhp->bdesc_length = 0; 26851 select_page = 26852 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26853 select_page = 26854 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26855 select_page->mode_page.code = CDROM_MODE_SPEED; 26856 select_page->mode_page.length = 2; 26857 select_page->speed = (uchar_t)data; 26858 26859 /* Send the mode select for the requested block size */ 26860 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26861 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26862 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 26863 /* 26864 * The mode select failed for the requested drive speed, 26865 * so reset the data for the original drive speed and 26866 * send it to the target. The error is indicated by the 26867 * return value for the failed mode select. 26868 */ 26869 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26870 "sr_drive_speed: Mode Select Failed\n"); 26871 select_page->speed = sense_page->speed; 26872 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26873 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26874 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26875 } 26876 break; 26877 default: 26878 /* should not reach here, but check anyway */ 26879 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26880 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26881 rval = EINVAL; 26882 break; 26883 } 26884 26885 if (select) { 26886 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26887 } 26888 if (sense) { 26889 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26890 } 26891 26892 return (rval); 26893 } 26894 26895 26896 /* 26897 * Function: sr_atapi_change_speed() 26898 * 26899 * Description: This routine is the driver entry point for handling CD-ROM 26900 * drive speed ioctl requests for MMC devices that do not support 26901 * the Real Time Streaming feature (0x107). 26902 * 26903 * Note: This routine will use the SET SPEED command which may not 26904 * be supported by all devices. 26905 * 26906 * Arguments: dev- the device 'dev_t' 26907 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26908 * CDROMSDRVSPEED (set) 26909 * data- current drive speed or requested drive speed 26910 * flag- this argument is a pass through to ddi_copyxxx() directly 26911 * from the mode argument of ioctl(). 26912 * 26913 * Return Code: the code returned by sd_send_scsi_cmd() 26914 * EINVAL if invalid arguments are provided 26915 * EFAULT if ddi_copyxxx() fails 26916 * ENXIO if fail ddi_get_soft_state 26917 * EIO if invalid mode sense block descriptor length 26918 */ 26919 26920 static int 26921 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26922 { 26923 struct sd_lun *un; 26924 struct uscsi_cmd *com = NULL; 26925 struct mode_header_grp2 *sense_mhp; 26926 uchar_t *sense_page; 26927 uchar_t *sense = NULL; 26928 char cdb[CDB_GROUP5]; 26929 int bd_len; 26930 int current_speed = 0; 26931 int max_speed = 0; 26932 int rval; 26933 26934 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26935 26936 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26937 return (ENXIO); 26938 } 26939 26940 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26941 26942 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26943 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26944 SD_PATH_STANDARD)) != 0) { 26945 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26946 "sr_atapi_change_speed: Mode Sense Failed\n"); 26947 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26948 return (rval); 26949 } 26950 26951 /* Check the block descriptor len to handle only 1 block descriptor */ 26952 sense_mhp = (struct mode_header_grp2 *)sense; 26953 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26954 if (bd_len > MODE_BLK_DESC_LENGTH) { 26955 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26956 "sr_atapi_change_speed: Mode Sense returned invalid " 26957 "block descriptor length\n"); 26958 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26959 return (EIO); 26960 } 26961 26962 /* Calculate the current and maximum drive speeds */ 26963 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26964 current_speed = (sense_page[14] << 8) | sense_page[15]; 26965 max_speed = (sense_page[8] << 8) | sense_page[9]; 26966 26967 /* Process the command */ 26968 switch (cmd) { 26969 case CDROMGDRVSPEED: 26970 current_speed /= SD_SPEED_1X; 26971 if (ddi_copyout(¤t_speed, (void *)data, 26972 sizeof (int), flag) != 0) 26973 rval = EFAULT; 26974 break; 26975 case CDROMSDRVSPEED: 26976 /* Convert the speed code to KB/sec */ 26977 switch ((uchar_t)data) { 26978 case CDROM_NORMAL_SPEED: 26979 current_speed = SD_SPEED_1X; 26980 break; 26981 case CDROM_DOUBLE_SPEED: 26982 current_speed = 2 * SD_SPEED_1X; 26983 break; 26984 case CDROM_QUAD_SPEED: 26985 current_speed = 4 * SD_SPEED_1X; 26986 break; 26987 case CDROM_TWELVE_SPEED: 26988 current_speed = 12 * SD_SPEED_1X; 26989 break; 26990 case CDROM_MAXIMUM_SPEED: 26991 current_speed = 0xffff; 26992 break; 26993 default: 26994 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26995 "sr_atapi_change_speed: invalid drive speed %d\n", 26996 (uchar_t)data); 26997 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26998 return (EINVAL); 26999 } 27000 27001 /* Check the request against the drive's max speed. */ 27002 if (current_speed != 0xffff) { 27003 if (current_speed > max_speed) { 27004 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27005 return (EINVAL); 27006 } 27007 } 27008 27009 /* 27010 * Build and send the SET SPEED command 27011 * 27012 * Note: The SET SPEED (0xBB) command used in this routine is 27013 * obsolete per the SCSI MMC spec but still supported in the 27014 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27015 * therefore the command is still implemented in this routine. 27016 */ 27017 bzero(cdb, sizeof (cdb)); 27018 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27019 cdb[2] = (uchar_t)(current_speed >> 8); 27020 cdb[3] = (uchar_t)current_speed; 27021 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27022 com->uscsi_cdb = (caddr_t)cdb; 27023 com->uscsi_cdblen = CDB_GROUP5; 27024 com->uscsi_bufaddr = NULL; 27025 com->uscsi_buflen = 0; 27026 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27027 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, 0, 27028 UIO_SYSSPACE, SD_PATH_STANDARD); 27029 break; 27030 default: 27031 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27032 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27033 rval = EINVAL; 27034 } 27035 27036 if (sense) { 27037 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27038 } 27039 if (com) { 27040 kmem_free(com, sizeof (*com)); 27041 } 27042 return (rval); 27043 } 27044 27045 27046 /* 27047 * Function: sr_pause_resume() 27048 * 27049 * Description: This routine is the driver entry point for handling CD-ROM 27050 * pause/resume ioctl requests. This only affects the audio play 27051 * operation. 27052 * 27053 * Arguments: dev - the device 'dev_t' 27054 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27055 * for setting the resume bit of the cdb. 27056 * 27057 * Return Code: the code returned by sd_send_scsi_cmd() 27058 * EINVAL if invalid mode specified 27059 * 27060 */ 27061 27062 static int 27063 sr_pause_resume(dev_t dev, int cmd) 27064 { 27065 struct sd_lun *un; 27066 struct uscsi_cmd *com; 27067 char cdb[CDB_GROUP1]; 27068 int rval; 27069 27070 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27071 return (ENXIO); 27072 } 27073 27074 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27075 bzero(cdb, CDB_GROUP1); 27076 cdb[0] = SCMD_PAUSE_RESUME; 27077 switch (cmd) { 27078 case CDROMRESUME: 27079 cdb[8] = 1; 27080 break; 27081 case CDROMPAUSE: 27082 cdb[8] = 0; 27083 break; 27084 default: 27085 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27086 " Command '%x' Not Supported\n", cmd); 27087 rval = EINVAL; 27088 goto done; 27089 } 27090 27091 com->uscsi_cdb = cdb; 27092 com->uscsi_cdblen = CDB_GROUP1; 27093 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27094 27095 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27096 UIO_SYSSPACE, SD_PATH_STANDARD); 27097 27098 done: 27099 kmem_free(com, sizeof (*com)); 27100 return (rval); 27101 } 27102 27103 27104 /* 27105 * Function: sr_play_msf() 27106 * 27107 * Description: This routine is the driver entry point for handling CD-ROM 27108 * ioctl requests to output the audio signals at the specified 27109 * starting address and continue the audio play until the specified 27110 * ending address (CDROMPLAYMSF) The address is in Minute Second 27111 * Frame (MSF) format. 27112 * 27113 * Arguments: dev - the device 'dev_t' 27114 * data - pointer to user provided audio msf structure, 27115 * specifying start/end addresses. 27116 * flag - this argument is a pass through to ddi_copyxxx() 27117 * directly from the mode argument of ioctl(). 27118 * 27119 * Return Code: the code returned by sd_send_scsi_cmd() 27120 * EFAULT if ddi_copyxxx() fails 27121 * ENXIO if fail ddi_get_soft_state 27122 * EINVAL if data pointer is NULL 27123 */ 27124 27125 static int 27126 sr_play_msf(dev_t dev, caddr_t data, int flag) 27127 { 27128 struct sd_lun *un; 27129 struct uscsi_cmd *com; 27130 struct cdrom_msf msf_struct; 27131 struct cdrom_msf *msf = &msf_struct; 27132 char cdb[CDB_GROUP1]; 27133 int rval; 27134 27135 if (data == NULL) { 27136 return (EINVAL); 27137 } 27138 27139 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27140 return (ENXIO); 27141 } 27142 27143 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27144 return (EFAULT); 27145 } 27146 27147 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27148 bzero(cdb, CDB_GROUP1); 27149 cdb[0] = SCMD_PLAYAUDIO_MSF; 27150 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27151 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27152 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27153 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27154 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27155 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27156 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27157 } else { 27158 cdb[3] = msf->cdmsf_min0; 27159 cdb[4] = msf->cdmsf_sec0; 27160 cdb[5] = msf->cdmsf_frame0; 27161 cdb[6] = msf->cdmsf_min1; 27162 cdb[7] = msf->cdmsf_sec1; 27163 cdb[8] = msf->cdmsf_frame1; 27164 } 27165 com->uscsi_cdb = cdb; 27166 com->uscsi_cdblen = CDB_GROUP1; 27167 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27168 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27169 UIO_SYSSPACE, SD_PATH_STANDARD); 27170 kmem_free(com, sizeof (*com)); 27171 return (rval); 27172 } 27173 27174 27175 /* 27176 * Function: sr_play_trkind() 27177 * 27178 * Description: This routine is the driver entry point for handling CD-ROM 27179 * ioctl requests to output the audio signals at the specified 27180 * starting address and continue the audio play until the specified 27181 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27182 * format. 27183 * 27184 * Arguments: dev - the device 'dev_t' 27185 * data - pointer to user provided audio track/index structure, 27186 * specifying start/end addresses. 27187 * flag - this argument is a pass through to ddi_copyxxx() 27188 * directly from the mode argument of ioctl(). 27189 * 27190 * Return Code: the code returned by sd_send_scsi_cmd() 27191 * EFAULT if ddi_copyxxx() fails 27192 * ENXIO if fail ddi_get_soft_state 27193 * EINVAL if data pointer is NULL 27194 */ 27195 27196 static int 27197 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27198 { 27199 struct cdrom_ti ti_struct; 27200 struct cdrom_ti *ti = &ti_struct; 27201 struct uscsi_cmd *com = NULL; 27202 char cdb[CDB_GROUP1]; 27203 int rval; 27204 27205 if (data == NULL) { 27206 return (EINVAL); 27207 } 27208 27209 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27210 return (EFAULT); 27211 } 27212 27213 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27214 bzero(cdb, CDB_GROUP1); 27215 cdb[0] = SCMD_PLAYAUDIO_TI; 27216 cdb[4] = ti->cdti_trk0; 27217 cdb[5] = ti->cdti_ind0; 27218 cdb[7] = ti->cdti_trk1; 27219 cdb[8] = ti->cdti_ind1; 27220 com->uscsi_cdb = cdb; 27221 com->uscsi_cdblen = CDB_GROUP1; 27222 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27223 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27224 UIO_SYSSPACE, SD_PATH_STANDARD); 27225 kmem_free(com, sizeof (*com)); 27226 return (rval); 27227 } 27228 27229 27230 /* 27231 * Function: sr_read_all_subcodes() 27232 * 27233 * Description: This routine is the driver entry point for handling CD-ROM 27234 * ioctl requests to return raw subcode data while the target is 27235 * playing audio (CDROMSUBCODE). 27236 * 27237 * Arguments: dev - the device 'dev_t' 27238 * data - pointer to user provided cdrom subcode structure, 27239 * specifying the transfer length and address. 27240 * flag - this argument is a pass through to ddi_copyxxx() 27241 * directly from the mode argument of ioctl(). 27242 * 27243 * Return Code: the code returned by sd_send_scsi_cmd() 27244 * EFAULT if ddi_copyxxx() fails 27245 * ENXIO if fail ddi_get_soft_state 27246 * EINVAL if data pointer is NULL 27247 */ 27248 27249 static int 27250 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27251 { 27252 struct sd_lun *un = NULL; 27253 struct uscsi_cmd *com = NULL; 27254 struct cdrom_subcode *subcode = NULL; 27255 int rval; 27256 size_t buflen; 27257 char cdb[CDB_GROUP5]; 27258 27259 #ifdef _MULTI_DATAMODEL 27260 /* To support ILP32 applications in an LP64 world */ 27261 struct cdrom_subcode32 cdrom_subcode32; 27262 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27263 #endif 27264 if (data == NULL) { 27265 return (EINVAL); 27266 } 27267 27268 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27269 return (ENXIO); 27270 } 27271 27272 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27273 27274 #ifdef _MULTI_DATAMODEL 27275 switch (ddi_model_convert_from(flag & FMODELS)) { 27276 case DDI_MODEL_ILP32: 27277 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27278 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27279 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27280 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27281 return (EFAULT); 27282 } 27283 /* Convert the ILP32 uscsi data from the application to LP64 */ 27284 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27285 break; 27286 case DDI_MODEL_NONE: 27287 if (ddi_copyin(data, subcode, 27288 sizeof (struct cdrom_subcode), flag)) { 27289 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27290 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27291 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27292 return (EFAULT); 27293 } 27294 break; 27295 } 27296 #else /* ! _MULTI_DATAMODEL */ 27297 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27298 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27299 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27300 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27301 return (EFAULT); 27302 } 27303 #endif /* _MULTI_DATAMODEL */ 27304 27305 /* 27306 * Since MMC-2 expects max 3 bytes for length, check if the 27307 * length input is greater than 3 bytes 27308 */ 27309 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27310 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27311 "sr_read_all_subcodes: " 27312 "cdrom transfer length too large: %d (limit %d)\n", 27313 subcode->cdsc_length, 0xFFFFFF); 27314 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27315 return (EINVAL); 27316 } 27317 27318 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27319 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27320 bzero(cdb, CDB_GROUP5); 27321 27322 if (un->un_f_mmc_cap == TRUE) { 27323 cdb[0] = (char)SCMD_READ_CD; 27324 cdb[2] = (char)0xff; 27325 cdb[3] = (char)0xff; 27326 cdb[4] = (char)0xff; 27327 cdb[5] = (char)0xff; 27328 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27329 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27330 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27331 cdb[10] = 1; 27332 } else { 27333 /* 27334 * Note: A vendor specific command (0xDF) is being used her to 27335 * request a read of all subcodes. 27336 */ 27337 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27338 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27339 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27340 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27341 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27342 } 27343 com->uscsi_cdb = cdb; 27344 com->uscsi_cdblen = CDB_GROUP5; 27345 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27346 com->uscsi_buflen = buflen; 27347 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27348 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27349 UIO_SYSSPACE, SD_PATH_STANDARD); 27350 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27351 kmem_free(com, sizeof (*com)); 27352 return (rval); 27353 } 27354 27355 27356 /* 27357 * Function: sr_read_subchannel() 27358 * 27359 * Description: This routine is the driver entry point for handling CD-ROM 27360 * ioctl requests to return the Q sub-channel data of the CD 27361 * current position block. (CDROMSUBCHNL) The data includes the 27362 * track number, index number, absolute CD-ROM address (LBA or MSF 27363 * format per the user) , track relative CD-ROM address (LBA or MSF 27364 * format per the user), control data and audio status. 27365 * 27366 * Arguments: dev - the device 'dev_t' 27367 * data - pointer to user provided cdrom sub-channel structure 27368 * flag - this argument is a pass through to ddi_copyxxx() 27369 * directly from the mode argument of ioctl(). 27370 * 27371 * Return Code: the code returned by sd_send_scsi_cmd() 27372 * EFAULT if ddi_copyxxx() fails 27373 * ENXIO if fail ddi_get_soft_state 27374 * EINVAL if data pointer is NULL 27375 */ 27376 27377 static int 27378 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27379 { 27380 struct sd_lun *un; 27381 struct uscsi_cmd *com; 27382 struct cdrom_subchnl subchanel; 27383 struct cdrom_subchnl *subchnl = &subchanel; 27384 char cdb[CDB_GROUP1]; 27385 caddr_t buffer; 27386 int rval; 27387 27388 if (data == NULL) { 27389 return (EINVAL); 27390 } 27391 27392 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27393 (un->un_state == SD_STATE_OFFLINE)) { 27394 return (ENXIO); 27395 } 27396 27397 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27398 return (EFAULT); 27399 } 27400 27401 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27402 bzero(cdb, CDB_GROUP1); 27403 cdb[0] = SCMD_READ_SUBCHANNEL; 27404 /* Set the MSF bit based on the user requested address format */ 27405 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27406 /* 27407 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27408 * returned 27409 */ 27410 cdb[2] = 0x40; 27411 /* 27412 * Set byte 3 to specify the return data format. A value of 0x01 27413 * indicates that the CD-ROM current position should be returned. 27414 */ 27415 cdb[3] = 0x01; 27416 cdb[8] = 0x10; 27417 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27418 com->uscsi_cdb = cdb; 27419 com->uscsi_cdblen = CDB_GROUP1; 27420 com->uscsi_bufaddr = buffer; 27421 com->uscsi_buflen = 16; 27422 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27423 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27424 UIO_SYSSPACE, SD_PATH_STANDARD); 27425 if (rval != 0) { 27426 kmem_free(buffer, 16); 27427 kmem_free(com, sizeof (*com)); 27428 return (rval); 27429 } 27430 27431 /* Process the returned Q sub-channel data */ 27432 subchnl->cdsc_audiostatus = buffer[1]; 27433 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27434 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27435 subchnl->cdsc_trk = buffer[6]; 27436 subchnl->cdsc_ind = buffer[7]; 27437 if (subchnl->cdsc_format & CDROM_LBA) { 27438 subchnl->cdsc_absaddr.lba = 27439 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27440 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27441 subchnl->cdsc_reladdr.lba = 27442 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27443 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27444 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27445 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27446 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27447 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27448 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27449 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27450 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27451 } else { 27452 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27453 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27454 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27455 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27456 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27457 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27458 } 27459 kmem_free(buffer, 16); 27460 kmem_free(com, sizeof (*com)); 27461 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27462 != 0) { 27463 return (EFAULT); 27464 } 27465 return (rval); 27466 } 27467 27468 27469 /* 27470 * Function: sr_read_tocentry() 27471 * 27472 * Description: This routine is the driver entry point for handling CD-ROM 27473 * ioctl requests to read from the Table of Contents (TOC) 27474 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27475 * fields, the starting address (LBA or MSF format per the user) 27476 * and the data mode if the user specified track is a data track. 27477 * 27478 * Note: The READ HEADER (0x44) command used in this routine is 27479 * obsolete per the SCSI MMC spec but still supported in the 27480 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27481 * therefore the command is still implemented in this routine. 27482 * 27483 * Arguments: dev - the device 'dev_t' 27484 * data - pointer to user provided toc entry structure, 27485 * specifying the track # and the address format 27486 * (LBA or MSF). 27487 * flag - this argument is a pass through to ddi_copyxxx() 27488 * directly from the mode argument of ioctl(). 27489 * 27490 * Return Code: the code returned by sd_send_scsi_cmd() 27491 * EFAULT if ddi_copyxxx() fails 27492 * ENXIO if fail ddi_get_soft_state 27493 * EINVAL if data pointer is NULL 27494 */ 27495 27496 static int 27497 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27498 { 27499 struct sd_lun *un = NULL; 27500 struct uscsi_cmd *com; 27501 struct cdrom_tocentry toc_entry; 27502 struct cdrom_tocentry *entry = &toc_entry; 27503 caddr_t buffer; 27504 int rval; 27505 char cdb[CDB_GROUP1]; 27506 27507 if (data == NULL) { 27508 return (EINVAL); 27509 } 27510 27511 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27512 (un->un_state == SD_STATE_OFFLINE)) { 27513 return (ENXIO); 27514 } 27515 27516 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27517 return (EFAULT); 27518 } 27519 27520 /* Validate the requested track and address format */ 27521 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27522 return (EINVAL); 27523 } 27524 27525 if (entry->cdte_track == 0) { 27526 return (EINVAL); 27527 } 27528 27529 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27530 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27531 bzero(cdb, CDB_GROUP1); 27532 27533 cdb[0] = SCMD_READ_TOC; 27534 /* Set the MSF bit based on the user requested address format */ 27535 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27536 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27537 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27538 } else { 27539 cdb[6] = entry->cdte_track; 27540 } 27541 27542 /* 27543 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27544 * (4 byte TOC response header + 8 byte track descriptor) 27545 */ 27546 cdb[8] = 12; 27547 com->uscsi_cdb = cdb; 27548 com->uscsi_cdblen = CDB_GROUP1; 27549 com->uscsi_bufaddr = buffer; 27550 com->uscsi_buflen = 0x0C; 27551 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27552 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27553 UIO_SYSSPACE, SD_PATH_STANDARD); 27554 if (rval != 0) { 27555 kmem_free(buffer, 12); 27556 kmem_free(com, sizeof (*com)); 27557 return (rval); 27558 } 27559 27560 /* Process the toc entry */ 27561 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27562 entry->cdte_ctrl = (buffer[5] & 0x0F); 27563 if (entry->cdte_format & CDROM_LBA) { 27564 entry->cdte_addr.lba = 27565 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27566 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27567 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27568 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27569 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27570 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27571 /* 27572 * Send a READ TOC command using the LBA address format to get 27573 * the LBA for the track requested so it can be used in the 27574 * READ HEADER request 27575 * 27576 * Note: The MSF bit of the READ HEADER command specifies the 27577 * output format. The block address specified in that command 27578 * must be in LBA format. 27579 */ 27580 cdb[1] = 0; 27581 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27582 UIO_SYSSPACE, SD_PATH_STANDARD); 27583 if (rval != 0) { 27584 kmem_free(buffer, 12); 27585 kmem_free(com, sizeof (*com)); 27586 return (rval); 27587 } 27588 } else { 27589 entry->cdte_addr.msf.minute = buffer[9]; 27590 entry->cdte_addr.msf.second = buffer[10]; 27591 entry->cdte_addr.msf.frame = buffer[11]; 27592 /* 27593 * Send a READ TOC command using the LBA address format to get 27594 * the LBA for the track requested so it can be used in the 27595 * READ HEADER request 27596 * 27597 * Note: The MSF bit of the READ HEADER command specifies the 27598 * output format. The block address specified in that command 27599 * must be in LBA format. 27600 */ 27601 cdb[1] = 0; 27602 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27603 UIO_SYSSPACE, SD_PATH_STANDARD); 27604 if (rval != 0) { 27605 kmem_free(buffer, 12); 27606 kmem_free(com, sizeof (*com)); 27607 return (rval); 27608 } 27609 } 27610 27611 /* 27612 * Build and send the READ HEADER command to determine the data mode of 27613 * the user specified track. 27614 */ 27615 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27616 (entry->cdte_track != CDROM_LEADOUT)) { 27617 bzero(cdb, CDB_GROUP1); 27618 cdb[0] = SCMD_READ_HEADER; 27619 cdb[2] = buffer[8]; 27620 cdb[3] = buffer[9]; 27621 cdb[4] = buffer[10]; 27622 cdb[5] = buffer[11]; 27623 cdb[8] = 0x08; 27624 com->uscsi_buflen = 0x08; 27625 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27626 UIO_SYSSPACE, SD_PATH_STANDARD); 27627 if (rval == 0) { 27628 entry->cdte_datamode = buffer[0]; 27629 } else { 27630 /* 27631 * READ HEADER command failed, since this is 27632 * obsoleted in one spec, its better to return 27633 * -1 for an invlid track so that we can still 27634 * recieve the rest of the TOC data. 27635 */ 27636 entry->cdte_datamode = (uchar_t)-1; 27637 } 27638 } else { 27639 entry->cdte_datamode = (uchar_t)-1; 27640 } 27641 27642 kmem_free(buffer, 12); 27643 kmem_free(com, sizeof (*com)); 27644 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27645 return (EFAULT); 27646 27647 return (rval); 27648 } 27649 27650 27651 /* 27652 * Function: sr_read_tochdr() 27653 * 27654 * Description: This routine is the driver entry point for handling CD-ROM 27655 * ioctl requests to read the Table of Contents (TOC) header 27656 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27657 * and ending track numbers 27658 * 27659 * Arguments: dev - the device 'dev_t' 27660 * data - pointer to user provided toc header structure, 27661 * specifying the starting and ending track numbers. 27662 * flag - this argument is a pass through to ddi_copyxxx() 27663 * directly from the mode argument of ioctl(). 27664 * 27665 * Return Code: the code returned by sd_send_scsi_cmd() 27666 * EFAULT if ddi_copyxxx() fails 27667 * ENXIO if fail ddi_get_soft_state 27668 * EINVAL if data pointer is NULL 27669 */ 27670 27671 static int 27672 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27673 { 27674 struct sd_lun *un; 27675 struct uscsi_cmd *com; 27676 struct cdrom_tochdr toc_header; 27677 struct cdrom_tochdr *hdr = &toc_header; 27678 char cdb[CDB_GROUP1]; 27679 int rval; 27680 caddr_t buffer; 27681 27682 if (data == NULL) { 27683 return (EINVAL); 27684 } 27685 27686 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27687 (un->un_state == SD_STATE_OFFLINE)) { 27688 return (ENXIO); 27689 } 27690 27691 buffer = kmem_zalloc(4, KM_SLEEP); 27692 bzero(cdb, CDB_GROUP1); 27693 cdb[0] = SCMD_READ_TOC; 27694 /* 27695 * Specifying a track number of 0x00 in the READ TOC command indicates 27696 * that the TOC header should be returned 27697 */ 27698 cdb[6] = 0x00; 27699 /* 27700 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27701 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27702 */ 27703 cdb[8] = 0x04; 27704 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27705 com->uscsi_cdb = cdb; 27706 com->uscsi_cdblen = CDB_GROUP1; 27707 com->uscsi_bufaddr = buffer; 27708 com->uscsi_buflen = 0x04; 27709 com->uscsi_timeout = 300; 27710 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27711 27712 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27713 UIO_SYSSPACE, SD_PATH_STANDARD); 27714 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27715 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27716 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27717 } else { 27718 hdr->cdth_trk0 = buffer[2]; 27719 hdr->cdth_trk1 = buffer[3]; 27720 } 27721 kmem_free(buffer, 4); 27722 kmem_free(com, sizeof (*com)); 27723 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27724 return (EFAULT); 27725 } 27726 return (rval); 27727 } 27728 27729 27730 /* 27731 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27732 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27733 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27734 * digital audio and extended architecture digital audio. These modes are 27735 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27736 * MMC specs. 27737 * 27738 * In addition to support for the various data formats these routines also 27739 * include support for devices that implement only the direct access READ 27740 * commands (0x08, 0x28), devices that implement the READ_CD commands 27741 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27742 * READ CDXA commands (0xD8, 0xDB) 27743 */ 27744 27745 /* 27746 * Function: sr_read_mode1() 27747 * 27748 * Description: This routine is the driver entry point for handling CD-ROM 27749 * ioctl read mode1 requests (CDROMREADMODE1). 27750 * 27751 * Arguments: dev - the device 'dev_t' 27752 * data - pointer to user provided cd read structure specifying 27753 * the lba buffer address and length. 27754 * flag - this argument is a pass through to ddi_copyxxx() 27755 * directly from the mode argument of ioctl(). 27756 * 27757 * Return Code: the code returned by sd_send_scsi_cmd() 27758 * EFAULT if ddi_copyxxx() fails 27759 * ENXIO if fail ddi_get_soft_state 27760 * EINVAL if data pointer is NULL 27761 */ 27762 27763 static int 27764 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27765 { 27766 struct sd_lun *un; 27767 struct cdrom_read mode1_struct; 27768 struct cdrom_read *mode1 = &mode1_struct; 27769 int rval; 27770 #ifdef _MULTI_DATAMODEL 27771 /* To support ILP32 applications in an LP64 world */ 27772 struct cdrom_read32 cdrom_read32; 27773 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27774 #endif /* _MULTI_DATAMODEL */ 27775 27776 if (data == NULL) { 27777 return (EINVAL); 27778 } 27779 27780 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27781 (un->un_state == SD_STATE_OFFLINE)) { 27782 return (ENXIO); 27783 } 27784 27785 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27786 "sd_read_mode1: entry: un:0x%p\n", un); 27787 27788 #ifdef _MULTI_DATAMODEL 27789 switch (ddi_model_convert_from(flag & FMODELS)) { 27790 case DDI_MODEL_ILP32: 27791 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27792 return (EFAULT); 27793 } 27794 /* Convert the ILP32 uscsi data from the application to LP64 */ 27795 cdrom_read32tocdrom_read(cdrd32, mode1); 27796 break; 27797 case DDI_MODEL_NONE: 27798 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27799 return (EFAULT); 27800 } 27801 } 27802 #else /* ! _MULTI_DATAMODEL */ 27803 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27804 return (EFAULT); 27805 } 27806 #endif /* _MULTI_DATAMODEL */ 27807 27808 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 27809 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27810 27811 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27812 "sd_read_mode1: exit: un:0x%p\n", un); 27813 27814 return (rval); 27815 } 27816 27817 27818 /* 27819 * Function: sr_read_cd_mode2() 27820 * 27821 * Description: This routine is the driver entry point for handling CD-ROM 27822 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27823 * support the READ CD (0xBE) command or the 1st generation 27824 * READ CD (0xD4) command. 27825 * 27826 * Arguments: dev - the device 'dev_t' 27827 * data - pointer to user provided cd read structure specifying 27828 * the lba buffer address and length. 27829 * flag - this argument is a pass through to ddi_copyxxx() 27830 * directly from the mode argument of ioctl(). 27831 * 27832 * Return Code: the code returned by sd_send_scsi_cmd() 27833 * EFAULT if ddi_copyxxx() fails 27834 * ENXIO if fail ddi_get_soft_state 27835 * EINVAL if data pointer is NULL 27836 */ 27837 27838 static int 27839 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27840 { 27841 struct sd_lun *un; 27842 struct uscsi_cmd *com; 27843 struct cdrom_read mode2_struct; 27844 struct cdrom_read *mode2 = &mode2_struct; 27845 uchar_t cdb[CDB_GROUP5]; 27846 int nblocks; 27847 int rval; 27848 #ifdef _MULTI_DATAMODEL 27849 /* To support ILP32 applications in an LP64 world */ 27850 struct cdrom_read32 cdrom_read32; 27851 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27852 #endif /* _MULTI_DATAMODEL */ 27853 27854 if (data == NULL) { 27855 return (EINVAL); 27856 } 27857 27858 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27859 (un->un_state == SD_STATE_OFFLINE)) { 27860 return (ENXIO); 27861 } 27862 27863 #ifdef _MULTI_DATAMODEL 27864 switch (ddi_model_convert_from(flag & FMODELS)) { 27865 case DDI_MODEL_ILP32: 27866 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27867 return (EFAULT); 27868 } 27869 /* Convert the ILP32 uscsi data from the application to LP64 */ 27870 cdrom_read32tocdrom_read(cdrd32, mode2); 27871 break; 27872 case DDI_MODEL_NONE: 27873 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27874 return (EFAULT); 27875 } 27876 break; 27877 } 27878 27879 #else /* ! _MULTI_DATAMODEL */ 27880 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27881 return (EFAULT); 27882 } 27883 #endif /* _MULTI_DATAMODEL */ 27884 27885 bzero(cdb, sizeof (cdb)); 27886 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27887 /* Read command supported by 1st generation atapi drives */ 27888 cdb[0] = SCMD_READ_CDD4; 27889 } else { 27890 /* Universal CD Access Command */ 27891 cdb[0] = SCMD_READ_CD; 27892 } 27893 27894 /* 27895 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27896 */ 27897 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27898 27899 /* set the start address */ 27900 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27901 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27902 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27903 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27904 27905 /* set the transfer length */ 27906 nblocks = mode2->cdread_buflen / 2336; 27907 cdb[6] = (uchar_t)(nblocks >> 16); 27908 cdb[7] = (uchar_t)(nblocks >> 8); 27909 cdb[8] = (uchar_t)nblocks; 27910 27911 /* set the filter bits */ 27912 cdb[9] = CDROM_READ_CD_USERDATA; 27913 27914 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27915 com->uscsi_cdb = (caddr_t)cdb; 27916 com->uscsi_cdblen = sizeof (cdb); 27917 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27918 com->uscsi_buflen = mode2->cdread_buflen; 27919 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27920 27921 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27922 UIO_SYSSPACE, SD_PATH_STANDARD); 27923 kmem_free(com, sizeof (*com)); 27924 return (rval); 27925 } 27926 27927 27928 /* 27929 * Function: sr_read_mode2() 27930 * 27931 * Description: This routine is the driver entry point for handling CD-ROM 27932 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27933 * do not support the READ CD (0xBE) command. 27934 * 27935 * Arguments: dev - the device 'dev_t' 27936 * data - pointer to user provided cd read structure specifying 27937 * the lba buffer address and length. 27938 * flag - this argument is a pass through to ddi_copyxxx() 27939 * directly from the mode argument of ioctl(). 27940 * 27941 * Return Code: the code returned by sd_send_scsi_cmd() 27942 * EFAULT if ddi_copyxxx() fails 27943 * ENXIO if fail ddi_get_soft_state 27944 * EINVAL if data pointer is NULL 27945 * EIO if fail to reset block size 27946 * EAGAIN if commands are in progress in the driver 27947 */ 27948 27949 static int 27950 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27951 { 27952 struct sd_lun *un; 27953 struct cdrom_read mode2_struct; 27954 struct cdrom_read *mode2 = &mode2_struct; 27955 int rval; 27956 uint32_t restore_blksize; 27957 struct uscsi_cmd *com; 27958 uchar_t cdb[CDB_GROUP0]; 27959 int nblocks; 27960 27961 #ifdef _MULTI_DATAMODEL 27962 /* To support ILP32 applications in an LP64 world */ 27963 struct cdrom_read32 cdrom_read32; 27964 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27965 #endif /* _MULTI_DATAMODEL */ 27966 27967 if (data == NULL) { 27968 return (EINVAL); 27969 } 27970 27971 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27972 (un->un_state == SD_STATE_OFFLINE)) { 27973 return (ENXIO); 27974 } 27975 27976 /* 27977 * Because this routine will update the device and driver block size 27978 * being used we want to make sure there are no commands in progress. 27979 * If commands are in progress the user will have to try again. 27980 * 27981 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27982 * in sdioctl to protect commands from sdioctl through to the top of 27983 * sd_uscsi_strategy. See sdioctl for details. 27984 */ 27985 mutex_enter(SD_MUTEX(un)); 27986 if (un->un_ncmds_in_driver != 1) { 27987 mutex_exit(SD_MUTEX(un)); 27988 return (EAGAIN); 27989 } 27990 mutex_exit(SD_MUTEX(un)); 27991 27992 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27993 "sd_read_mode2: entry: un:0x%p\n", un); 27994 27995 #ifdef _MULTI_DATAMODEL 27996 switch (ddi_model_convert_from(flag & FMODELS)) { 27997 case DDI_MODEL_ILP32: 27998 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27999 return (EFAULT); 28000 } 28001 /* Convert the ILP32 uscsi data from the application to LP64 */ 28002 cdrom_read32tocdrom_read(cdrd32, mode2); 28003 break; 28004 case DDI_MODEL_NONE: 28005 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 28006 return (EFAULT); 28007 } 28008 break; 28009 } 28010 #else /* ! _MULTI_DATAMODEL */ 28011 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 28012 return (EFAULT); 28013 } 28014 #endif /* _MULTI_DATAMODEL */ 28015 28016 /* Store the current target block size for restoration later */ 28017 restore_blksize = un->un_tgt_blocksize; 28018 28019 /* Change the device and soft state target block size to 2336 */ 28020 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28021 rval = EIO; 28022 goto done; 28023 } 28024 28025 28026 bzero(cdb, sizeof (cdb)); 28027 28028 /* set READ operation */ 28029 cdb[0] = SCMD_READ; 28030 28031 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28032 mode2->cdread_lba >>= 2; 28033 28034 /* set the start address */ 28035 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28036 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28037 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28038 28039 /* set the transfer length */ 28040 nblocks = mode2->cdread_buflen / 2336; 28041 cdb[4] = (uchar_t)nblocks & 0xFF; 28042 28043 /* build command */ 28044 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28045 com->uscsi_cdb = (caddr_t)cdb; 28046 com->uscsi_cdblen = sizeof (cdb); 28047 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28048 com->uscsi_buflen = mode2->cdread_buflen; 28049 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28050 28051 /* 28052 * Issue SCSI command with user space address for read buffer. 28053 * 28054 * This sends the command through main channel in the driver. 28055 * 28056 * Since this is accessed via an IOCTL call, we go through the 28057 * standard path, so that if the device was powered down, then 28058 * it would be 'awakened' to handle the command. 28059 */ 28060 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28061 UIO_SYSSPACE, SD_PATH_STANDARD); 28062 28063 kmem_free(com, sizeof (*com)); 28064 28065 /* Restore the device and soft state target block size */ 28066 if (sr_sector_mode(dev, restore_blksize) != 0) { 28067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28068 "can't do switch back to mode 1\n"); 28069 /* 28070 * If sd_send_scsi_READ succeeded we still need to report 28071 * an error because we failed to reset the block size 28072 */ 28073 if (rval == 0) { 28074 rval = EIO; 28075 } 28076 } 28077 28078 done: 28079 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28080 "sd_read_mode2: exit: un:0x%p\n", un); 28081 28082 return (rval); 28083 } 28084 28085 28086 /* 28087 * Function: sr_sector_mode() 28088 * 28089 * Description: This utility function is used by sr_read_mode2 to set the target 28090 * block size based on the user specified size. This is a legacy 28091 * implementation based upon a vendor specific mode page 28092 * 28093 * Arguments: dev - the device 'dev_t' 28094 * data - flag indicating if block size is being set to 2336 or 28095 * 512. 28096 * 28097 * Return Code: the code returned by sd_send_scsi_cmd() 28098 * EFAULT if ddi_copyxxx() fails 28099 * ENXIO if fail ddi_get_soft_state 28100 * EINVAL if data pointer is NULL 28101 */ 28102 28103 static int 28104 sr_sector_mode(dev_t dev, uint32_t blksize) 28105 { 28106 struct sd_lun *un; 28107 uchar_t *sense; 28108 uchar_t *select; 28109 int rval; 28110 28111 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28112 (un->un_state == SD_STATE_OFFLINE)) { 28113 return (ENXIO); 28114 } 28115 28116 sense = kmem_zalloc(20, KM_SLEEP); 28117 28118 /* Note: This is a vendor specific mode page (0x81) */ 28119 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 28120 SD_PATH_STANDARD)) != 0) { 28121 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28122 "sr_sector_mode: Mode Sense failed\n"); 28123 kmem_free(sense, 20); 28124 return (rval); 28125 } 28126 select = kmem_zalloc(20, KM_SLEEP); 28127 select[3] = 0x08; 28128 select[10] = ((blksize >> 8) & 0xff); 28129 select[11] = (blksize & 0xff); 28130 select[12] = 0x01; 28131 select[13] = 0x06; 28132 select[14] = sense[14]; 28133 select[15] = sense[15]; 28134 if (blksize == SD_MODE2_BLKSIZE) { 28135 select[14] |= 0x01; 28136 } 28137 28138 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 28139 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 28140 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28141 "sr_sector_mode: Mode Select failed\n"); 28142 } else { 28143 /* 28144 * Only update the softstate block size if we successfully 28145 * changed the device block mode. 28146 */ 28147 mutex_enter(SD_MUTEX(un)); 28148 sd_update_block_info(un, blksize, 0); 28149 mutex_exit(SD_MUTEX(un)); 28150 } 28151 kmem_free(sense, 20); 28152 kmem_free(select, 20); 28153 return (rval); 28154 } 28155 28156 28157 /* 28158 * Function: sr_read_cdda() 28159 * 28160 * Description: This routine is the driver entry point for handling CD-ROM 28161 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28162 * the target supports CDDA these requests are handled via a vendor 28163 * specific command (0xD8) If the target does not support CDDA 28164 * these requests are handled via the READ CD command (0xBE). 28165 * 28166 * Arguments: dev - the device 'dev_t' 28167 * data - pointer to user provided CD-DA structure specifying 28168 * the track starting address, transfer length, and 28169 * subcode options. 28170 * flag - this argument is a pass through to ddi_copyxxx() 28171 * directly from the mode argument of ioctl(). 28172 * 28173 * Return Code: the code returned by sd_send_scsi_cmd() 28174 * EFAULT if ddi_copyxxx() fails 28175 * ENXIO if fail ddi_get_soft_state 28176 * EINVAL if invalid arguments are provided 28177 * ENOTTY 28178 */ 28179 28180 static int 28181 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28182 { 28183 struct sd_lun *un; 28184 struct uscsi_cmd *com; 28185 struct cdrom_cdda *cdda; 28186 int rval; 28187 size_t buflen; 28188 char cdb[CDB_GROUP5]; 28189 28190 #ifdef _MULTI_DATAMODEL 28191 /* To support ILP32 applications in an LP64 world */ 28192 struct cdrom_cdda32 cdrom_cdda32; 28193 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28194 #endif /* _MULTI_DATAMODEL */ 28195 28196 if (data == NULL) { 28197 return (EINVAL); 28198 } 28199 28200 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28201 return (ENXIO); 28202 } 28203 28204 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28205 28206 #ifdef _MULTI_DATAMODEL 28207 switch (ddi_model_convert_from(flag & FMODELS)) { 28208 case DDI_MODEL_ILP32: 28209 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28210 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28211 "sr_read_cdda: ddi_copyin Failed\n"); 28212 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28213 return (EFAULT); 28214 } 28215 /* Convert the ILP32 uscsi data from the application to LP64 */ 28216 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28217 break; 28218 case DDI_MODEL_NONE: 28219 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28220 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28221 "sr_read_cdda: ddi_copyin Failed\n"); 28222 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28223 return (EFAULT); 28224 } 28225 break; 28226 } 28227 #else /* ! _MULTI_DATAMODEL */ 28228 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28229 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28230 "sr_read_cdda: ddi_copyin Failed\n"); 28231 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28232 return (EFAULT); 28233 } 28234 #endif /* _MULTI_DATAMODEL */ 28235 28236 /* 28237 * Since MMC-2 expects max 3 bytes for length, check if the 28238 * length input is greater than 3 bytes 28239 */ 28240 if ((cdda->cdda_length & 0xFF000000) != 0) { 28241 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28242 "cdrom transfer length too large: %d (limit %d)\n", 28243 cdda->cdda_length, 0xFFFFFF); 28244 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28245 return (EINVAL); 28246 } 28247 28248 switch (cdda->cdda_subcode) { 28249 case CDROM_DA_NO_SUBCODE: 28250 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28251 break; 28252 case CDROM_DA_SUBQ: 28253 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28254 break; 28255 case CDROM_DA_ALL_SUBCODE: 28256 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28257 break; 28258 case CDROM_DA_SUBCODE_ONLY: 28259 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28260 break; 28261 default: 28262 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28263 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28264 cdda->cdda_subcode); 28265 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28266 return (EINVAL); 28267 } 28268 28269 /* Build and send the command */ 28270 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28271 bzero(cdb, CDB_GROUP5); 28272 28273 if (un->un_f_cfg_cdda == TRUE) { 28274 cdb[0] = (char)SCMD_READ_CD; 28275 cdb[1] = 0x04; 28276 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28277 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28278 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28279 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28280 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28281 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28282 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28283 cdb[9] = 0x10; 28284 switch (cdda->cdda_subcode) { 28285 case CDROM_DA_NO_SUBCODE : 28286 cdb[10] = 0x0; 28287 break; 28288 case CDROM_DA_SUBQ : 28289 cdb[10] = 0x2; 28290 break; 28291 case CDROM_DA_ALL_SUBCODE : 28292 cdb[10] = 0x1; 28293 break; 28294 case CDROM_DA_SUBCODE_ONLY : 28295 /* FALLTHROUGH */ 28296 default : 28297 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28298 kmem_free(com, sizeof (*com)); 28299 return (ENOTTY); 28300 } 28301 } else { 28302 cdb[0] = (char)SCMD_READ_CDDA; 28303 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28304 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28305 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28306 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28307 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28308 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28309 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28310 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28311 cdb[10] = cdda->cdda_subcode; 28312 } 28313 28314 com->uscsi_cdb = cdb; 28315 com->uscsi_cdblen = CDB_GROUP5; 28316 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28317 com->uscsi_buflen = buflen; 28318 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28319 28320 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28321 UIO_SYSSPACE, SD_PATH_STANDARD); 28322 28323 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28324 kmem_free(com, sizeof (*com)); 28325 return (rval); 28326 } 28327 28328 28329 /* 28330 * Function: sr_read_cdxa() 28331 * 28332 * Description: This routine is the driver entry point for handling CD-ROM 28333 * ioctl requests to return CD-XA (Extended Architecture) data. 28334 * (CDROMCDXA). 28335 * 28336 * Arguments: dev - the device 'dev_t' 28337 * data - pointer to user provided CD-XA structure specifying 28338 * the data starting address, transfer length, and format 28339 * flag - this argument is a pass through to ddi_copyxxx() 28340 * directly from the mode argument of ioctl(). 28341 * 28342 * Return Code: the code returned by sd_send_scsi_cmd() 28343 * EFAULT if ddi_copyxxx() fails 28344 * ENXIO if fail ddi_get_soft_state 28345 * EINVAL if data pointer is NULL 28346 */ 28347 28348 static int 28349 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28350 { 28351 struct sd_lun *un; 28352 struct uscsi_cmd *com; 28353 struct cdrom_cdxa *cdxa; 28354 int rval; 28355 size_t buflen; 28356 char cdb[CDB_GROUP5]; 28357 uchar_t read_flags; 28358 28359 #ifdef _MULTI_DATAMODEL 28360 /* To support ILP32 applications in an LP64 world */ 28361 struct cdrom_cdxa32 cdrom_cdxa32; 28362 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28363 #endif /* _MULTI_DATAMODEL */ 28364 28365 if (data == NULL) { 28366 return (EINVAL); 28367 } 28368 28369 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28370 return (ENXIO); 28371 } 28372 28373 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28374 28375 #ifdef _MULTI_DATAMODEL 28376 switch (ddi_model_convert_from(flag & FMODELS)) { 28377 case DDI_MODEL_ILP32: 28378 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28379 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28380 return (EFAULT); 28381 } 28382 /* 28383 * Convert the ILP32 uscsi data from the 28384 * application to LP64 for internal use. 28385 */ 28386 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28387 break; 28388 case DDI_MODEL_NONE: 28389 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28390 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28391 return (EFAULT); 28392 } 28393 break; 28394 } 28395 #else /* ! _MULTI_DATAMODEL */ 28396 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28397 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28398 return (EFAULT); 28399 } 28400 #endif /* _MULTI_DATAMODEL */ 28401 28402 /* 28403 * Since MMC-2 expects max 3 bytes for length, check if the 28404 * length input is greater than 3 bytes 28405 */ 28406 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28407 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28408 "cdrom transfer length too large: %d (limit %d)\n", 28409 cdxa->cdxa_length, 0xFFFFFF); 28410 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28411 return (EINVAL); 28412 } 28413 28414 switch (cdxa->cdxa_format) { 28415 case CDROM_XA_DATA: 28416 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28417 read_flags = 0x10; 28418 break; 28419 case CDROM_XA_SECTOR_DATA: 28420 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28421 read_flags = 0xf8; 28422 break; 28423 case CDROM_XA_DATA_W_ERROR: 28424 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28425 read_flags = 0xfc; 28426 break; 28427 default: 28428 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28429 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28430 cdxa->cdxa_format); 28431 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28432 return (EINVAL); 28433 } 28434 28435 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28436 bzero(cdb, CDB_GROUP5); 28437 if (un->un_f_mmc_cap == TRUE) { 28438 cdb[0] = (char)SCMD_READ_CD; 28439 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28440 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28441 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28442 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28443 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28444 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28445 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28446 cdb[9] = (char)read_flags; 28447 } else { 28448 /* 28449 * Note: A vendor specific command (0xDB) is being used her to 28450 * request a read of all subcodes. 28451 */ 28452 cdb[0] = (char)SCMD_READ_CDXA; 28453 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28454 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28455 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28456 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28457 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28458 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28459 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28460 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28461 cdb[10] = cdxa->cdxa_format; 28462 } 28463 com->uscsi_cdb = cdb; 28464 com->uscsi_cdblen = CDB_GROUP5; 28465 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28466 com->uscsi_buflen = buflen; 28467 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28468 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28469 UIO_SYSSPACE, SD_PATH_STANDARD); 28470 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28471 kmem_free(com, sizeof (*com)); 28472 return (rval); 28473 } 28474 28475 28476 /* 28477 * Function: sr_eject() 28478 * 28479 * Description: This routine is the driver entry point for handling CD-ROM 28480 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28481 * 28482 * Arguments: dev - the device 'dev_t' 28483 * 28484 * Return Code: the code returned by sd_send_scsi_cmd() 28485 */ 28486 28487 static int 28488 sr_eject(dev_t dev) 28489 { 28490 struct sd_lun *un; 28491 int rval; 28492 28493 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28494 (un->un_state == SD_STATE_OFFLINE)) { 28495 return (ENXIO); 28496 } 28497 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 28498 SD_PATH_STANDARD)) != 0) { 28499 return (rval); 28500 } 28501 28502 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 28503 SD_PATH_STANDARD); 28504 28505 if (rval == 0) { 28506 mutex_enter(SD_MUTEX(un)); 28507 sr_ejected(un); 28508 un->un_mediastate = DKIO_EJECTED; 28509 cv_broadcast(&un->un_state_cv); 28510 mutex_exit(SD_MUTEX(un)); 28511 } 28512 return (rval); 28513 } 28514 28515 28516 /* 28517 * Function: sr_ejected() 28518 * 28519 * Description: This routine updates the soft state structure to invalidate the 28520 * geometry information after the media has been ejected or a 28521 * media eject has been detected. 28522 * 28523 * Arguments: un - driver soft state (unit) structure 28524 */ 28525 28526 static void 28527 sr_ejected(struct sd_lun *un) 28528 { 28529 struct sd_errstats *stp; 28530 28531 ASSERT(un != NULL); 28532 ASSERT(mutex_owned(SD_MUTEX(un))); 28533 28534 un->un_f_blockcount_is_valid = FALSE; 28535 un->un_f_tgt_blocksize_is_valid = FALSE; 28536 un->un_f_geometry_is_valid = FALSE; 28537 28538 if (un->un_errstats != NULL) { 28539 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28540 stp->sd_capacity.value.ui64 = 0; 28541 } 28542 } 28543 28544 28545 /* 28546 * Function: sr_check_wp() 28547 * 28548 * Description: This routine checks the write protection of a removable media 28549 * disk via the write protect bit of the Mode Page Header device 28550 * specific field. This routine has been implemented to use the 28551 * error recovery mode page for all device types. 28552 * Note: In the future use a sd_send_scsi_MODE_SENSE() routine 28553 * 28554 * Arguments: dev - the device 'dev_t' 28555 * 28556 * Return Code: int indicating if the device is write protected (1) or not (0) 28557 * 28558 * Context: Kernel thread. 28559 * 28560 */ 28561 28562 static int 28563 sr_check_wp(dev_t dev) 28564 { 28565 struct sd_lun *un; 28566 uchar_t device_specific; 28567 uchar_t *sense; 28568 int hdrlen; 28569 int rval; 28570 int retry_flag = FALSE; 28571 28572 /* 28573 * Note: The return codes for this routine should be reworked to 28574 * properly handle the case of a NULL softstate. 28575 */ 28576 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28577 return (FALSE); 28578 } 28579 28580 if (un->un_f_cfg_is_atapi == TRUE) { 28581 retry_flag = TRUE; 28582 } 28583 28584 retry: 28585 if (un->un_f_cfg_is_atapi == TRUE) { 28586 /* 28587 * The mode page contents are not required; set the allocation 28588 * length for the mode page header only 28589 */ 28590 hdrlen = MODE_HEADER_LENGTH_GRP2; 28591 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28592 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 28593 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28594 device_specific = 28595 ((struct mode_header_grp2 *)sense)->device_specific; 28596 } else { 28597 hdrlen = MODE_HEADER_LENGTH; 28598 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28599 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 28600 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28601 device_specific = 28602 ((struct mode_header *)sense)->device_specific; 28603 } 28604 28605 if (rval != 0) { 28606 if ((un->un_f_cfg_is_atapi == TRUE) && (retry_flag)) { 28607 /* 28608 * For an Atapi Zip drive, observed the drive 28609 * reporting check condition for the first attempt. 28610 * Sense data indicating power on or bus device/reset. 28611 * Hence in case of failure need to try at least once 28612 * for Atapi devices. 28613 */ 28614 retry_flag = FALSE; 28615 kmem_free(sense, hdrlen); 28616 goto retry; 28617 } else { 28618 /* 28619 * Write protect mode sense failed; not all disks 28620 * understand this query. Return FALSE assuming that 28621 * these devices are not writable. 28622 */ 28623 rval = FALSE; 28624 } 28625 } else { 28626 if (device_specific & WRITE_PROTECT) { 28627 rval = TRUE; 28628 } else { 28629 rval = FALSE; 28630 } 28631 } 28632 kmem_free(sense, hdrlen); 28633 return (rval); 28634 } 28635 28636 28637 /* 28638 * Function: sr_volume_ctrl() 28639 * 28640 * Description: This routine is the driver entry point for handling CD-ROM 28641 * audio output volume ioctl requests. (CDROMVOLCTRL) 28642 * 28643 * Arguments: dev - the device 'dev_t' 28644 * data - pointer to user audio volume control structure 28645 * flag - this argument is a pass through to ddi_copyxxx() 28646 * directly from the mode argument of ioctl(). 28647 * 28648 * Return Code: the code returned by sd_send_scsi_cmd() 28649 * EFAULT if ddi_copyxxx() fails 28650 * ENXIO if fail ddi_get_soft_state 28651 * EINVAL if data pointer is NULL 28652 * 28653 */ 28654 28655 static int 28656 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28657 { 28658 struct sd_lun *un; 28659 struct cdrom_volctrl volume; 28660 struct cdrom_volctrl *vol = &volume; 28661 uchar_t *sense_page; 28662 uchar_t *select_page; 28663 uchar_t *sense; 28664 uchar_t *select; 28665 int sense_buflen; 28666 int select_buflen; 28667 int rval; 28668 28669 if (data == NULL) { 28670 return (EINVAL); 28671 } 28672 28673 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28674 (un->un_state == SD_STATE_OFFLINE)) { 28675 return (ENXIO); 28676 } 28677 28678 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28679 return (EFAULT); 28680 } 28681 28682 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28683 struct mode_header_grp2 *sense_mhp; 28684 struct mode_header_grp2 *select_mhp; 28685 int bd_len; 28686 28687 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28688 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28689 MODEPAGE_AUDIO_CTRL_LEN; 28690 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28691 select = kmem_zalloc(select_buflen, KM_SLEEP); 28692 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 28693 sense_buflen, MODEPAGE_AUDIO_CTRL, 28694 SD_PATH_STANDARD)) != 0) { 28695 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28696 "sr_volume_ctrl: Mode Sense Failed\n"); 28697 kmem_free(sense, sense_buflen); 28698 kmem_free(select, select_buflen); 28699 return (rval); 28700 } 28701 sense_mhp = (struct mode_header_grp2 *)sense; 28702 select_mhp = (struct mode_header_grp2 *)select; 28703 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28704 sense_mhp->bdesc_length_lo; 28705 if (bd_len > MODE_BLK_DESC_LENGTH) { 28706 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28707 "sr_volume_ctrl: Mode Sense returned invalid " 28708 "block descriptor length\n"); 28709 kmem_free(sense, sense_buflen); 28710 kmem_free(select, select_buflen); 28711 return (EIO); 28712 } 28713 sense_page = (uchar_t *) 28714 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28715 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28716 select_mhp->length_msb = 0; 28717 select_mhp->length_lsb = 0; 28718 select_mhp->bdesc_length_hi = 0; 28719 select_mhp->bdesc_length_lo = 0; 28720 } else { 28721 struct mode_header *sense_mhp, *select_mhp; 28722 28723 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28724 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28725 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28726 select = kmem_zalloc(select_buflen, KM_SLEEP); 28727 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 28728 sense_buflen, MODEPAGE_AUDIO_CTRL, 28729 SD_PATH_STANDARD)) != 0) { 28730 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28731 "sr_volume_ctrl: Mode Sense Failed\n"); 28732 kmem_free(sense, sense_buflen); 28733 kmem_free(select, select_buflen); 28734 return (rval); 28735 } 28736 sense_mhp = (struct mode_header *)sense; 28737 select_mhp = (struct mode_header *)select; 28738 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28740 "sr_volume_ctrl: Mode Sense returned invalid " 28741 "block descriptor length\n"); 28742 kmem_free(sense, sense_buflen); 28743 kmem_free(select, select_buflen); 28744 return (EIO); 28745 } 28746 sense_page = (uchar_t *) 28747 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28748 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28749 select_mhp->length = 0; 28750 select_mhp->bdesc_length = 0; 28751 } 28752 /* 28753 * Note: An audio control data structure could be created and overlayed 28754 * on the following in place of the array indexing method implemented. 28755 */ 28756 28757 /* Build the select data for the user volume data */ 28758 select_page[0] = MODEPAGE_AUDIO_CTRL; 28759 select_page[1] = 0xE; 28760 /* Set the immediate bit */ 28761 select_page[2] = 0x04; 28762 /* Zero out reserved fields */ 28763 select_page[3] = 0x00; 28764 select_page[4] = 0x00; 28765 /* Return sense data for fields not to be modified */ 28766 select_page[5] = sense_page[5]; 28767 select_page[6] = sense_page[6]; 28768 select_page[7] = sense_page[7]; 28769 /* Set the user specified volume levels for channel 0 and 1 */ 28770 select_page[8] = 0x01; 28771 select_page[9] = vol->channel0; 28772 select_page[10] = 0x02; 28773 select_page[11] = vol->channel1; 28774 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28775 select_page[12] = sense_page[12]; 28776 select_page[13] = sense_page[13]; 28777 select_page[14] = sense_page[14]; 28778 select_page[15] = sense_page[15]; 28779 28780 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28781 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 28782 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28783 } else { 28784 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 28785 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28786 } 28787 28788 kmem_free(sense, sense_buflen); 28789 kmem_free(select, select_buflen); 28790 return (rval); 28791 } 28792 28793 28794 /* 28795 * Function: sr_read_sony_session_offset() 28796 * 28797 * Description: This routine is the driver entry point for handling CD-ROM 28798 * ioctl requests for session offset information. (CDROMREADOFFSET) 28799 * The address of the first track in the last session of a 28800 * multi-session CD-ROM is returned 28801 * 28802 * Note: This routine uses a vendor specific key value in the 28803 * command control field without implementing any vendor check here 28804 * or in the ioctl routine. 28805 * 28806 * Arguments: dev - the device 'dev_t' 28807 * data - pointer to an int to hold the requested address 28808 * flag - this argument is a pass through to ddi_copyxxx() 28809 * directly from the mode argument of ioctl(). 28810 * 28811 * Return Code: the code returned by sd_send_scsi_cmd() 28812 * EFAULT if ddi_copyxxx() fails 28813 * ENXIO if fail ddi_get_soft_state 28814 * EINVAL if data pointer is NULL 28815 */ 28816 28817 static int 28818 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28819 { 28820 struct sd_lun *un; 28821 struct uscsi_cmd *com; 28822 caddr_t buffer; 28823 char cdb[CDB_GROUP1]; 28824 int session_offset = 0; 28825 int rval; 28826 28827 if (data == NULL) { 28828 return (EINVAL); 28829 } 28830 28831 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28832 (un->un_state == SD_STATE_OFFLINE)) { 28833 return (ENXIO); 28834 } 28835 28836 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28837 bzero(cdb, CDB_GROUP1); 28838 cdb[0] = SCMD_READ_TOC; 28839 /* 28840 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28841 * (4 byte TOC response header + 8 byte response data) 28842 */ 28843 cdb[8] = SONY_SESSION_OFFSET_LEN; 28844 /* Byte 9 is the control byte. A vendor specific value is used */ 28845 cdb[9] = SONY_SESSION_OFFSET_KEY; 28846 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28847 com->uscsi_cdb = cdb; 28848 com->uscsi_cdblen = CDB_GROUP1; 28849 com->uscsi_bufaddr = buffer; 28850 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28851 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28852 28853 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 28854 UIO_SYSSPACE, SD_PATH_STANDARD); 28855 if (rval != 0) { 28856 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28857 kmem_free(com, sizeof (*com)); 28858 return (rval); 28859 } 28860 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28861 session_offset = 28862 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28863 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28864 /* 28865 * Offset returned offset in current lbasize block's. Convert to 28866 * 2k block's to return to the user 28867 */ 28868 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28869 session_offset >>= 2; 28870 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28871 session_offset >>= 1; 28872 } 28873 } 28874 28875 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28876 rval = EFAULT; 28877 } 28878 28879 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28880 kmem_free(com, sizeof (*com)); 28881 return (rval); 28882 } 28883 28884 28885 /* 28886 * Function: sd_wm_cache_constructor() 28887 * 28888 * Description: Cache Constructor for the wmap cache for the read/modify/write 28889 * devices. 28890 * 28891 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28892 * un - sd_lun structure for the device. 28893 * flag - the km flags passed to constructor 28894 * 28895 * Return Code: 0 on success. 28896 * -1 on failure. 28897 */ 28898 28899 /*ARGSUSED*/ 28900 static int 28901 sd_wm_cache_constructor(void *wm, void *un, int flags) 28902 { 28903 bzero(wm, sizeof (struct sd_w_map)); 28904 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28905 return (0); 28906 } 28907 28908 28909 /* 28910 * Function: sd_wm_cache_destructor() 28911 * 28912 * Description: Cache destructor for the wmap cache for the read/modify/write 28913 * devices. 28914 * 28915 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28916 * un - sd_lun structure for the device. 28917 */ 28918 /*ARGSUSED*/ 28919 static void 28920 sd_wm_cache_destructor(void *wm, void *un) 28921 { 28922 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28923 } 28924 28925 28926 /* 28927 * Function: sd_range_lock() 28928 * 28929 * Description: Lock the range of blocks specified as parameter to ensure 28930 * that read, modify write is atomic and no other i/o writes 28931 * to the same location. The range is specified in terms 28932 * of start and end blocks. Block numbers are the actual 28933 * media block numbers and not system. 28934 * 28935 * Arguments: un - sd_lun structure for the device. 28936 * startb - The starting block number 28937 * endb - The end block number 28938 * typ - type of i/o - simple/read_modify_write 28939 * 28940 * Return Code: wm - pointer to the wmap structure. 28941 * 28942 * Context: This routine can sleep. 28943 */ 28944 28945 static struct sd_w_map * 28946 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28947 { 28948 struct sd_w_map *wmp = NULL; 28949 struct sd_w_map *sl_wmp = NULL; 28950 struct sd_w_map *tmp_wmp; 28951 wm_state state = SD_WM_CHK_LIST; 28952 28953 28954 ASSERT(un != NULL); 28955 ASSERT(!mutex_owned(SD_MUTEX(un))); 28956 28957 mutex_enter(SD_MUTEX(un)); 28958 28959 while (state != SD_WM_DONE) { 28960 28961 switch (state) { 28962 case SD_WM_CHK_LIST: 28963 /* 28964 * This is the starting state. Check the wmap list 28965 * to see if the range is currently available. 28966 */ 28967 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28968 /* 28969 * If this is a simple write and no rmw 28970 * i/o is pending then try to lock the 28971 * range as the range should be available. 28972 */ 28973 state = SD_WM_LOCK_RANGE; 28974 } else { 28975 tmp_wmp = sd_get_range(un, startb, endb); 28976 if (tmp_wmp != NULL) { 28977 if ((wmp != NULL) && ONLIST(un, wmp)) { 28978 /* 28979 * Should not keep onlist wmps 28980 * while waiting this macro 28981 * will also do wmp = NULL; 28982 */ 28983 FREE_ONLIST_WMAP(un, wmp); 28984 } 28985 /* 28986 * sl_wmp is the wmap on which wait 28987 * is done, since the tmp_wmp points 28988 * to the inuse wmap, set sl_wmp to 28989 * tmp_wmp and change the state to sleep 28990 */ 28991 sl_wmp = tmp_wmp; 28992 state = SD_WM_WAIT_MAP; 28993 } else { 28994 state = SD_WM_LOCK_RANGE; 28995 } 28996 28997 } 28998 break; 28999 29000 case SD_WM_LOCK_RANGE: 29001 ASSERT(un->un_wm_cache); 29002 /* 29003 * The range need to be locked, try to get a wmap. 29004 * First attempt it with NO_SLEEP, want to avoid a sleep 29005 * if possible as we will have to release the sd mutex 29006 * if we have to sleep. 29007 */ 29008 if (wmp == NULL) 29009 wmp = kmem_cache_alloc(un->un_wm_cache, 29010 KM_NOSLEEP); 29011 if (wmp == NULL) { 29012 mutex_exit(SD_MUTEX(un)); 29013 _NOTE(DATA_READABLE_WITHOUT_LOCK 29014 (sd_lun::un_wm_cache)) 29015 wmp = kmem_cache_alloc(un->un_wm_cache, 29016 KM_SLEEP); 29017 mutex_enter(SD_MUTEX(un)); 29018 /* 29019 * we released the mutex so recheck and go to 29020 * check list state. 29021 */ 29022 state = SD_WM_CHK_LIST; 29023 } else { 29024 /* 29025 * We exit out of state machine since we 29026 * have the wmap. Do the housekeeping first. 29027 * place the wmap on the wmap list if it is not 29028 * on it already and then set the state to done. 29029 */ 29030 wmp->wm_start = startb; 29031 wmp->wm_end = endb; 29032 wmp->wm_flags = typ | SD_WM_BUSY; 29033 if (typ & SD_WTYPE_RMW) { 29034 un->un_rmw_count++; 29035 } 29036 /* 29037 * If not already on the list then link 29038 */ 29039 if (!ONLIST(un, wmp)) { 29040 wmp->wm_next = un->un_wm; 29041 wmp->wm_prev = NULL; 29042 if (wmp->wm_next) 29043 wmp->wm_next->wm_prev = wmp; 29044 un->un_wm = wmp; 29045 } 29046 state = SD_WM_DONE; 29047 } 29048 break; 29049 29050 case SD_WM_WAIT_MAP: 29051 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29052 /* 29053 * Wait is done on sl_wmp, which is set in the 29054 * check_list state. 29055 */ 29056 sl_wmp->wm_wanted_count++; 29057 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29058 sl_wmp->wm_wanted_count--; 29059 if (!(sl_wmp->wm_flags & SD_WM_BUSY)) { 29060 if (wmp != NULL) 29061 CHK_N_FREEWMP(un, wmp); 29062 wmp = sl_wmp; 29063 } 29064 sl_wmp = NULL; 29065 /* 29066 * After waking up, need to recheck for availability of 29067 * range. 29068 */ 29069 state = SD_WM_CHK_LIST; 29070 break; 29071 29072 default: 29073 panic("sd_range_lock: " 29074 "Unknown state %d in sd_range_lock", state); 29075 /*NOTREACHED*/ 29076 } /* switch(state) */ 29077 29078 } /* while(state != SD_WM_DONE) */ 29079 29080 mutex_exit(SD_MUTEX(un)); 29081 29082 ASSERT(wmp != NULL); 29083 29084 return (wmp); 29085 } 29086 29087 29088 /* 29089 * Function: sd_get_range() 29090 * 29091 * Description: Find if there any overlapping I/O to this one 29092 * Returns the write-map of 1st such I/O, NULL otherwise. 29093 * 29094 * Arguments: un - sd_lun structure for the device. 29095 * startb - The starting block number 29096 * endb - The end block number 29097 * 29098 * Return Code: wm - pointer to the wmap structure. 29099 */ 29100 29101 static struct sd_w_map * 29102 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29103 { 29104 struct sd_w_map *wmp; 29105 29106 ASSERT(un != NULL); 29107 29108 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29109 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29110 continue; 29111 } 29112 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29113 break; 29114 } 29115 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29116 break; 29117 } 29118 } 29119 29120 return (wmp); 29121 } 29122 29123 29124 /* 29125 * Function: sd_free_inlist_wmap() 29126 * 29127 * Description: Unlink and free a write map struct. 29128 * 29129 * Arguments: un - sd_lun structure for the device. 29130 * wmp - sd_w_map which needs to be unlinked. 29131 */ 29132 29133 static void 29134 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29135 { 29136 ASSERT(un != NULL); 29137 29138 if (un->un_wm == wmp) { 29139 un->un_wm = wmp->wm_next; 29140 } else { 29141 wmp->wm_prev->wm_next = wmp->wm_next; 29142 } 29143 29144 if (wmp->wm_next) { 29145 wmp->wm_next->wm_prev = wmp->wm_prev; 29146 } 29147 29148 wmp->wm_next = wmp->wm_prev = NULL; 29149 29150 kmem_cache_free(un->un_wm_cache, wmp); 29151 } 29152 29153 29154 /* 29155 * Function: sd_range_unlock() 29156 * 29157 * Description: Unlock the range locked by wm. 29158 * Free write map if nobody else is waiting on it. 29159 * 29160 * Arguments: un - sd_lun structure for the device. 29161 * wmp - sd_w_map which needs to be unlinked. 29162 */ 29163 29164 static void 29165 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29166 { 29167 ASSERT(un != NULL); 29168 ASSERT(wm != NULL); 29169 ASSERT(!mutex_owned(SD_MUTEX(un))); 29170 29171 mutex_enter(SD_MUTEX(un)); 29172 29173 if (wm->wm_flags & SD_WTYPE_RMW) { 29174 un->un_rmw_count--; 29175 } 29176 29177 if (wm->wm_wanted_count) { 29178 wm->wm_flags = 0; 29179 /* 29180 * Broadcast that the wmap is available now. 29181 */ 29182 cv_broadcast(&wm->wm_avail); 29183 } else { 29184 /* 29185 * If no one is waiting on the map, it should be free'ed. 29186 */ 29187 sd_free_inlist_wmap(un, wm); 29188 } 29189 29190 mutex_exit(SD_MUTEX(un)); 29191 } 29192 29193 29194 /* 29195 * Function: sd_read_modify_write_task 29196 * 29197 * Description: Called from a taskq thread to initiate the write phase of 29198 * a read-modify-write request. This is used for targets where 29199 * un->un_sys_blocksize != un->un_tgt_blocksize. 29200 * 29201 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29202 * 29203 * Context: Called under taskq thread context. 29204 */ 29205 29206 static void 29207 sd_read_modify_write_task(void *arg) 29208 { 29209 struct sd_mapblocksize_info *bsp; 29210 struct buf *bp; 29211 struct sd_xbuf *xp; 29212 struct sd_lun *un; 29213 29214 bp = arg; /* The bp is given in arg */ 29215 ASSERT(bp != NULL); 29216 29217 /* Get the pointer to the layer-private data struct */ 29218 xp = SD_GET_XBUF(bp); 29219 ASSERT(xp != NULL); 29220 bsp = xp->xb_private; 29221 ASSERT(bsp != NULL); 29222 29223 un = SD_GET_UN(bp); 29224 ASSERT(un != NULL); 29225 ASSERT(!mutex_owned(SD_MUTEX(un))); 29226 29227 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29228 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29229 29230 /* 29231 * This is the write phase of a read-modify-write request, called 29232 * under the context of a taskq thread in response to the completion 29233 * of the read portion of the rmw request completing under interrupt 29234 * context. The write request must be sent from here down the iostart 29235 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29236 * we use the layer index saved in the layer-private data area. 29237 */ 29238 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29239 29240 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29241 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29242 } 29243 29244 29245 /* 29246 * Function: sddump_do_read_of_rmw() 29247 * 29248 * Description: This routine will be called from sddump, If sddump is called 29249 * with an I/O which not aligned on device blocksize boundary 29250 * then the write has to be converted to read-modify-write. 29251 * Do the read part here in order to keep sddump simple. 29252 * Note - That the sd_mutex is held across the call to this 29253 * routine. 29254 * 29255 * Arguments: un - sd_lun 29256 * blkno - block number in terms of media block size. 29257 * nblk - number of blocks. 29258 * bpp - pointer to pointer to the buf structure. On return 29259 * from this function, *bpp points to the valid buffer 29260 * to which the write has to be done. 29261 * 29262 * Return Code: 0 for success or errno-type return code 29263 */ 29264 29265 static int 29266 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29267 struct buf **bpp) 29268 { 29269 int err; 29270 int i; 29271 int rval; 29272 struct buf *bp; 29273 struct scsi_pkt *pkt = NULL; 29274 uint32_t target_blocksize; 29275 29276 ASSERT(un != NULL); 29277 ASSERT(mutex_owned(SD_MUTEX(un))); 29278 29279 target_blocksize = un->un_tgt_blocksize; 29280 29281 mutex_exit(SD_MUTEX(un)); 29282 29283 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29284 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29285 if (bp == NULL) { 29286 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29287 "no resources for dumping; giving up"); 29288 err = ENOMEM; 29289 goto done; 29290 } 29291 29292 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29293 blkno, nblk); 29294 if (rval != 0) { 29295 scsi_free_consistent_buf(bp); 29296 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29297 "no resources for dumping; giving up"); 29298 err = ENOMEM; 29299 goto done; 29300 } 29301 29302 pkt->pkt_flags |= FLAG_NOINTR; 29303 29304 err = EIO; 29305 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29306 29307 /* 29308 * Scsi_poll returns 0 (success) if the command completes and 29309 * the status block is STATUS_GOOD. We should only check 29310 * errors if this condition is not true. Even then we should 29311 * send our own request sense packet only if we have a check 29312 * condition and auto request sense has not been performed by 29313 * the hba. 29314 */ 29315 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29316 29317 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29318 err = 0; 29319 break; 29320 } 29321 29322 /* 29323 * Check CMD_DEV_GONE 1st, give up if device is gone, 29324 * no need to read RQS data. 29325 */ 29326 if (pkt->pkt_reason == CMD_DEV_GONE) { 29327 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29328 "Device is gone\n"); 29329 break; 29330 } 29331 29332 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29333 SD_INFO(SD_LOG_DUMP, un, 29334 "sddump: read failed with CHECK, try # %d\n", i); 29335 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29336 (void) sd_send_polled_RQS(un); 29337 } 29338 29339 continue; 29340 } 29341 29342 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29343 int reset_retval = 0; 29344 29345 SD_INFO(SD_LOG_DUMP, un, 29346 "sddump: read failed with BUSY, try # %d\n", i); 29347 29348 if (un->un_f_lun_reset_enabled == TRUE) { 29349 reset_retval = scsi_reset(SD_ADDRESS(un), 29350 RESET_LUN); 29351 } 29352 if (reset_retval == 0) { 29353 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29354 } 29355 (void) sd_send_polled_RQS(un); 29356 29357 } else { 29358 SD_INFO(SD_LOG_DUMP, un, 29359 "sddump: read failed with 0x%x, try # %d\n", 29360 SD_GET_PKT_STATUS(pkt), i); 29361 mutex_enter(SD_MUTEX(un)); 29362 sd_reset_target(un, pkt); 29363 mutex_exit(SD_MUTEX(un)); 29364 } 29365 29366 /* 29367 * If we are not getting anywhere with lun/target resets, 29368 * let's reset the bus. 29369 */ 29370 if (i > SD_NDUMP_RETRIES/2) { 29371 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29372 (void) sd_send_polled_RQS(un); 29373 } 29374 29375 } 29376 scsi_destroy_pkt(pkt); 29377 29378 if (err != 0) { 29379 scsi_free_consistent_buf(bp); 29380 *bpp = NULL; 29381 } else { 29382 *bpp = bp; 29383 } 29384 29385 done: 29386 mutex_enter(SD_MUTEX(un)); 29387 return (err); 29388 } 29389 29390 29391 /* 29392 * Function: sd_failfast_flushq 29393 * 29394 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29395 * in b_flags and move them onto the failfast queue, then kick 29396 * off a thread to return all bp's on the failfast queue to 29397 * their owners with an error set. 29398 * 29399 * Arguments: un - pointer to the soft state struct for the instance. 29400 * 29401 * Context: may execute in interrupt context. 29402 */ 29403 29404 static void 29405 sd_failfast_flushq(struct sd_lun *un) 29406 { 29407 struct buf *bp; 29408 struct buf *next_waitq_bp; 29409 struct buf *prev_waitq_bp = NULL; 29410 29411 ASSERT(un != NULL); 29412 ASSERT(mutex_owned(SD_MUTEX(un))); 29413 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29414 ASSERT(un->un_failfast_bp == NULL); 29415 29416 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29417 "sd_failfast_flushq: entry: un:0x%p\n", un); 29418 29419 /* 29420 * Check if we should flush all bufs when entering failfast state, or 29421 * just those with B_FAILFAST set. 29422 */ 29423 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29424 /* 29425 * Move *all* bp's on the wait queue to the failfast flush 29426 * queue, including those that do NOT have B_FAILFAST set. 29427 */ 29428 if (un->un_failfast_headp == NULL) { 29429 ASSERT(un->un_failfast_tailp == NULL); 29430 un->un_failfast_headp = un->un_waitq_headp; 29431 } else { 29432 ASSERT(un->un_failfast_tailp != NULL); 29433 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29434 } 29435 29436 un->un_failfast_tailp = un->un_waitq_tailp; 29437 29438 /* update kstat for each bp moved out of the waitq */ 29439 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29440 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29441 } 29442 29443 /* empty the waitq */ 29444 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29445 29446 } else { 29447 /* 29448 * Go thru the wait queue, pick off all entries with 29449 * B_FAILFAST set, and move these onto the failfast queue. 29450 */ 29451 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29452 /* 29453 * Save the pointer to the next bp on the wait queue, 29454 * so we get to it on the next iteration of this loop. 29455 */ 29456 next_waitq_bp = bp->av_forw; 29457 29458 /* 29459 * If this bp from the wait queue does NOT have 29460 * B_FAILFAST set, just move on to the next element 29461 * in the wait queue. Note, this is the only place 29462 * where it is correct to set prev_waitq_bp. 29463 */ 29464 if ((bp->b_flags & B_FAILFAST) == 0) { 29465 prev_waitq_bp = bp; 29466 continue; 29467 } 29468 29469 /* 29470 * Remove the bp from the wait queue. 29471 */ 29472 if (bp == un->un_waitq_headp) { 29473 /* The bp is the first element of the waitq. */ 29474 un->un_waitq_headp = next_waitq_bp; 29475 if (un->un_waitq_headp == NULL) { 29476 /* The wait queue is now empty */ 29477 un->un_waitq_tailp = NULL; 29478 } 29479 } else { 29480 /* 29481 * The bp is either somewhere in the middle 29482 * or at the end of the wait queue. 29483 */ 29484 ASSERT(un->un_waitq_headp != NULL); 29485 ASSERT(prev_waitq_bp != NULL); 29486 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29487 == 0); 29488 if (bp == un->un_waitq_tailp) { 29489 /* bp is the last entry on the waitq. */ 29490 ASSERT(next_waitq_bp == NULL); 29491 un->un_waitq_tailp = prev_waitq_bp; 29492 } 29493 prev_waitq_bp->av_forw = next_waitq_bp; 29494 } 29495 bp->av_forw = NULL; 29496 29497 /* 29498 * update kstat since the bp is moved out of 29499 * the waitq 29500 */ 29501 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29502 29503 /* 29504 * Now put the bp onto the failfast queue. 29505 */ 29506 if (un->un_failfast_headp == NULL) { 29507 /* failfast queue is currently empty */ 29508 ASSERT(un->un_failfast_tailp == NULL); 29509 un->un_failfast_headp = 29510 un->un_failfast_tailp = bp; 29511 } else { 29512 /* Add the bp to the end of the failfast q */ 29513 ASSERT(un->un_failfast_tailp != NULL); 29514 ASSERT(un->un_failfast_tailp->b_flags & 29515 B_FAILFAST); 29516 un->un_failfast_tailp->av_forw = bp; 29517 un->un_failfast_tailp = bp; 29518 } 29519 } 29520 } 29521 29522 /* 29523 * Now return all bp's on the failfast queue to their owners. 29524 */ 29525 while ((bp = un->un_failfast_headp) != NULL) { 29526 29527 un->un_failfast_headp = bp->av_forw; 29528 if (un->un_failfast_headp == NULL) { 29529 un->un_failfast_tailp = NULL; 29530 } 29531 29532 /* 29533 * We want to return the bp with a failure error code, but 29534 * we do not want a call to sd_start_cmds() to occur here, 29535 * so use sd_return_failed_command_no_restart() instead of 29536 * sd_return_failed_command(). 29537 */ 29538 sd_return_failed_command_no_restart(un, bp, EIO); 29539 } 29540 29541 /* Flush the xbuf queues if required. */ 29542 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29543 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29544 } 29545 29546 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29547 "sd_failfast_flushq: exit: un:0x%p\n", un); 29548 } 29549 29550 29551 /* 29552 * Function: sd_failfast_flushq_callback 29553 * 29554 * Description: Return TRUE if the given bp meets the criteria for failfast 29555 * flushing. Used with ddi_xbuf_flushq(9F). 29556 * 29557 * Arguments: bp - ptr to buf struct to be examined. 29558 * 29559 * Context: Any 29560 */ 29561 29562 static int 29563 sd_failfast_flushq_callback(struct buf *bp) 29564 { 29565 /* 29566 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29567 * state is entered; OR (2) the given bp has B_FAILFAST set. 29568 */ 29569 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29570 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29571 } 29572 29573 29574 29575 #if defined(__i386) || defined(__amd64) 29576 /* 29577 * Function: sd_setup_next_xfer 29578 * 29579 * Description: Prepare next I/O operation using DMA_PARTIAL 29580 * 29581 */ 29582 29583 static int 29584 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29585 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29586 { 29587 ssize_t num_blks_not_xfered; 29588 daddr_t strt_blk_num; 29589 ssize_t bytes_not_xfered; 29590 int rval; 29591 29592 ASSERT(pkt->pkt_resid == 0); 29593 29594 /* 29595 * Calculate next block number and amount to be transferred. 29596 * 29597 * How much data NOT transfered to the HBA yet. 29598 */ 29599 bytes_not_xfered = xp->xb_dma_resid; 29600 29601 /* 29602 * figure how many blocks NOT transfered to the HBA yet. 29603 */ 29604 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29605 29606 /* 29607 * set starting block number to the end of what WAS transfered. 29608 */ 29609 strt_blk_num = xp->xb_blkno + 29610 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29611 29612 /* 29613 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29614 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29615 * the disk mutex here. 29616 */ 29617 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29618 strt_blk_num, num_blks_not_xfered); 29619 29620 if (rval == 0) { 29621 29622 /* 29623 * Success. 29624 * 29625 * Adjust things if there are still more blocks to be 29626 * transfered. 29627 */ 29628 xp->xb_dma_resid = pkt->pkt_resid; 29629 pkt->pkt_resid = 0; 29630 29631 return (1); 29632 } 29633 29634 /* 29635 * There's really only one possible return value from 29636 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29637 * returns NULL. 29638 */ 29639 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29640 29641 bp->b_resid = bp->b_bcount; 29642 bp->b_flags |= B_ERROR; 29643 29644 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29645 "Error setting up next portion of DMA transfer\n"); 29646 29647 return (0); 29648 } 29649 #endif 29650 29651 /* 29652 * Note: The following sd_faultinjection_ioctl( ) routines implement 29653 * driver support for handling fault injection for error analysis 29654 * causing faults in multiple layers of the driver. 29655 * 29656 */ 29657 29658 #ifdef SD_FAULT_INJECTION 29659 static uint_t sd_fault_injection_on = 0; 29660 29661 /* 29662 * Function: sd_faultinjection_ioctl() 29663 * 29664 * Description: This routine is the driver entry point for handling 29665 * faultinjection ioctls to inject errors into the 29666 * layer model 29667 * 29668 * Arguments: cmd - the ioctl cmd recieved 29669 * arg - the arguments from user and returns 29670 */ 29671 29672 static void 29673 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29674 29675 uint_t i; 29676 uint_t rval; 29677 29678 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29679 29680 mutex_enter(SD_MUTEX(un)); 29681 29682 switch (cmd) { 29683 case SDIOCRUN: 29684 /* Allow pushed faults to be injected */ 29685 SD_INFO(SD_LOG_SDTEST, un, 29686 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29687 29688 sd_fault_injection_on = 1; 29689 29690 SD_INFO(SD_LOG_IOERR, un, 29691 "sd_faultinjection_ioctl: run finished\n"); 29692 break; 29693 29694 case SDIOCSTART: 29695 /* Start Injection Session */ 29696 SD_INFO(SD_LOG_SDTEST, un, 29697 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29698 29699 sd_fault_injection_on = 0; 29700 un->sd_injection_mask = 0xFFFFFFFF; 29701 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29702 un->sd_fi_fifo_pkt[i] = NULL; 29703 un->sd_fi_fifo_xb[i] = NULL; 29704 un->sd_fi_fifo_un[i] = NULL; 29705 un->sd_fi_fifo_arq[i] = NULL; 29706 } 29707 un->sd_fi_fifo_start = 0; 29708 un->sd_fi_fifo_end = 0; 29709 29710 mutex_enter(&(un->un_fi_mutex)); 29711 un->sd_fi_log[0] = '\0'; 29712 un->sd_fi_buf_len = 0; 29713 mutex_exit(&(un->un_fi_mutex)); 29714 29715 SD_INFO(SD_LOG_IOERR, un, 29716 "sd_faultinjection_ioctl: start finished\n"); 29717 break; 29718 29719 case SDIOCSTOP: 29720 /* Stop Injection Session */ 29721 SD_INFO(SD_LOG_SDTEST, un, 29722 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29723 sd_fault_injection_on = 0; 29724 un->sd_injection_mask = 0x0; 29725 29726 /* Empty stray or unuseds structs from fifo */ 29727 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29728 if (un->sd_fi_fifo_pkt[i] != NULL) { 29729 kmem_free(un->sd_fi_fifo_pkt[i], 29730 sizeof (struct sd_fi_pkt)); 29731 } 29732 if (un->sd_fi_fifo_xb[i] != NULL) { 29733 kmem_free(un->sd_fi_fifo_xb[i], 29734 sizeof (struct sd_fi_xb)); 29735 } 29736 if (un->sd_fi_fifo_un[i] != NULL) { 29737 kmem_free(un->sd_fi_fifo_un[i], 29738 sizeof (struct sd_fi_un)); 29739 } 29740 if (un->sd_fi_fifo_arq[i] != NULL) { 29741 kmem_free(un->sd_fi_fifo_arq[i], 29742 sizeof (struct sd_fi_arq)); 29743 } 29744 un->sd_fi_fifo_pkt[i] = NULL; 29745 un->sd_fi_fifo_un[i] = NULL; 29746 un->sd_fi_fifo_xb[i] = NULL; 29747 un->sd_fi_fifo_arq[i] = NULL; 29748 } 29749 un->sd_fi_fifo_start = 0; 29750 un->sd_fi_fifo_end = 0; 29751 29752 SD_INFO(SD_LOG_IOERR, un, 29753 "sd_faultinjection_ioctl: stop finished\n"); 29754 break; 29755 29756 case SDIOCINSERTPKT: 29757 /* Store a packet struct to be pushed onto fifo */ 29758 SD_INFO(SD_LOG_SDTEST, un, 29759 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29760 29761 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29762 29763 sd_fault_injection_on = 0; 29764 29765 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29766 if (un->sd_fi_fifo_pkt[i] != NULL) { 29767 kmem_free(un->sd_fi_fifo_pkt[i], 29768 sizeof (struct sd_fi_pkt)); 29769 } 29770 if (arg != NULL) { 29771 un->sd_fi_fifo_pkt[i] = 29772 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29773 if (un->sd_fi_fifo_pkt[i] == NULL) { 29774 /* Alloc failed don't store anything */ 29775 break; 29776 } 29777 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29778 sizeof (struct sd_fi_pkt), 0); 29779 if (rval == -1) { 29780 kmem_free(un->sd_fi_fifo_pkt[i], 29781 sizeof (struct sd_fi_pkt)); 29782 un->sd_fi_fifo_pkt[i] = NULL; 29783 } 29784 } else { 29785 SD_INFO(SD_LOG_IOERR, un, 29786 "sd_faultinjection_ioctl: pkt null\n"); 29787 } 29788 break; 29789 29790 case SDIOCINSERTXB: 29791 /* Store a xb struct to be pushed onto fifo */ 29792 SD_INFO(SD_LOG_SDTEST, un, 29793 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29794 29795 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29796 29797 sd_fault_injection_on = 0; 29798 29799 if (un->sd_fi_fifo_xb[i] != NULL) { 29800 kmem_free(un->sd_fi_fifo_xb[i], 29801 sizeof (struct sd_fi_xb)); 29802 un->sd_fi_fifo_xb[i] = NULL; 29803 } 29804 if (arg != NULL) { 29805 un->sd_fi_fifo_xb[i] = 29806 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29807 if (un->sd_fi_fifo_xb[i] == NULL) { 29808 /* Alloc failed don't store anything */ 29809 break; 29810 } 29811 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29812 sizeof (struct sd_fi_xb), 0); 29813 29814 if (rval == -1) { 29815 kmem_free(un->sd_fi_fifo_xb[i], 29816 sizeof (struct sd_fi_xb)); 29817 un->sd_fi_fifo_xb[i] = NULL; 29818 } 29819 } else { 29820 SD_INFO(SD_LOG_IOERR, un, 29821 "sd_faultinjection_ioctl: xb null\n"); 29822 } 29823 break; 29824 29825 case SDIOCINSERTUN: 29826 /* Store a un struct to be pushed onto fifo */ 29827 SD_INFO(SD_LOG_SDTEST, un, 29828 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29829 29830 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29831 29832 sd_fault_injection_on = 0; 29833 29834 if (un->sd_fi_fifo_un[i] != NULL) { 29835 kmem_free(un->sd_fi_fifo_un[i], 29836 sizeof (struct sd_fi_un)); 29837 un->sd_fi_fifo_un[i] = NULL; 29838 } 29839 if (arg != NULL) { 29840 un->sd_fi_fifo_un[i] = 29841 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29842 if (un->sd_fi_fifo_un[i] == NULL) { 29843 /* Alloc failed don't store anything */ 29844 break; 29845 } 29846 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29847 sizeof (struct sd_fi_un), 0); 29848 if (rval == -1) { 29849 kmem_free(un->sd_fi_fifo_un[i], 29850 sizeof (struct sd_fi_un)); 29851 un->sd_fi_fifo_un[i] = NULL; 29852 } 29853 29854 } else { 29855 SD_INFO(SD_LOG_IOERR, un, 29856 "sd_faultinjection_ioctl: un null\n"); 29857 } 29858 29859 break; 29860 29861 case SDIOCINSERTARQ: 29862 /* Store a arq struct to be pushed onto fifo */ 29863 SD_INFO(SD_LOG_SDTEST, un, 29864 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29865 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29866 29867 sd_fault_injection_on = 0; 29868 29869 if (un->sd_fi_fifo_arq[i] != NULL) { 29870 kmem_free(un->sd_fi_fifo_arq[i], 29871 sizeof (struct sd_fi_arq)); 29872 un->sd_fi_fifo_arq[i] = NULL; 29873 } 29874 if (arg != NULL) { 29875 un->sd_fi_fifo_arq[i] = 29876 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29877 if (un->sd_fi_fifo_arq[i] == NULL) { 29878 /* Alloc failed don't store anything */ 29879 break; 29880 } 29881 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29882 sizeof (struct sd_fi_arq), 0); 29883 if (rval == -1) { 29884 kmem_free(un->sd_fi_fifo_arq[i], 29885 sizeof (struct sd_fi_arq)); 29886 un->sd_fi_fifo_arq[i] = NULL; 29887 } 29888 29889 } else { 29890 SD_INFO(SD_LOG_IOERR, un, 29891 "sd_faultinjection_ioctl: arq null\n"); 29892 } 29893 29894 break; 29895 29896 case SDIOCPUSH: 29897 /* Push stored xb, pkt, un, and arq onto fifo */ 29898 sd_fault_injection_on = 0; 29899 29900 if (arg != NULL) { 29901 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29902 if (rval != -1 && 29903 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29904 un->sd_fi_fifo_end += i; 29905 } 29906 } else { 29907 SD_INFO(SD_LOG_IOERR, un, 29908 "sd_faultinjection_ioctl: push arg null\n"); 29909 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29910 un->sd_fi_fifo_end++; 29911 } 29912 } 29913 SD_INFO(SD_LOG_IOERR, un, 29914 "sd_faultinjection_ioctl: push to end=%d\n", 29915 un->sd_fi_fifo_end); 29916 break; 29917 29918 case SDIOCRETRIEVE: 29919 /* Return buffer of log from Injection session */ 29920 SD_INFO(SD_LOG_SDTEST, un, 29921 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29922 29923 sd_fault_injection_on = 0; 29924 29925 mutex_enter(&(un->un_fi_mutex)); 29926 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29927 un->sd_fi_buf_len+1, 0); 29928 mutex_exit(&(un->un_fi_mutex)); 29929 29930 if (rval == -1) { 29931 /* 29932 * arg is possibly invalid setting 29933 * it to NULL for return 29934 */ 29935 arg = NULL; 29936 } 29937 break; 29938 } 29939 29940 mutex_exit(SD_MUTEX(un)); 29941 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29942 " exit\n"); 29943 } 29944 29945 29946 /* 29947 * Function: sd_injection_log() 29948 * 29949 * Description: This routine adds buff to the already existing injection log 29950 * for retrieval via faultinjection_ioctl for use in fault 29951 * detection and recovery 29952 * 29953 * Arguments: buf - the string to add to the log 29954 */ 29955 29956 static void 29957 sd_injection_log(char *buf, struct sd_lun *un) 29958 { 29959 uint_t len; 29960 29961 ASSERT(un != NULL); 29962 ASSERT(buf != NULL); 29963 29964 mutex_enter(&(un->un_fi_mutex)); 29965 29966 len = min(strlen(buf), 255); 29967 /* Add logged value to Injection log to be returned later */ 29968 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29969 uint_t offset = strlen((char *)un->sd_fi_log); 29970 char *destp = (char *)un->sd_fi_log + offset; 29971 int i; 29972 for (i = 0; i < len; i++) { 29973 *destp++ = *buf++; 29974 } 29975 un->sd_fi_buf_len += len; 29976 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29977 } 29978 29979 mutex_exit(&(un->un_fi_mutex)); 29980 } 29981 29982 29983 /* 29984 * Function: sd_faultinjection() 29985 * 29986 * Description: This routine takes the pkt and changes its 29987 * content based on error injection scenerio. 29988 * 29989 * Arguments: pktp - packet to be changed 29990 */ 29991 29992 static void 29993 sd_faultinjection(struct scsi_pkt *pktp) 29994 { 29995 uint_t i; 29996 struct sd_fi_pkt *fi_pkt; 29997 struct sd_fi_xb *fi_xb; 29998 struct sd_fi_un *fi_un; 29999 struct sd_fi_arq *fi_arq; 30000 struct buf *bp; 30001 struct sd_xbuf *xb; 30002 struct sd_lun *un; 30003 30004 ASSERT(pktp != NULL); 30005 30006 /* pull bp xb and un from pktp */ 30007 bp = (struct buf *)pktp->pkt_private; 30008 xb = SD_GET_XBUF(bp); 30009 un = SD_GET_UN(bp); 30010 30011 ASSERT(un != NULL); 30012 30013 mutex_enter(SD_MUTEX(un)); 30014 30015 SD_TRACE(SD_LOG_SDTEST, un, 30016 "sd_faultinjection: entry Injection from sdintr\n"); 30017 30018 /* if injection is off return */ 30019 if (sd_fault_injection_on == 0 || 30020 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30021 mutex_exit(SD_MUTEX(un)); 30022 return; 30023 } 30024 30025 30026 /* take next set off fifo */ 30027 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30028 30029 fi_pkt = un->sd_fi_fifo_pkt[i]; 30030 fi_xb = un->sd_fi_fifo_xb[i]; 30031 fi_un = un->sd_fi_fifo_un[i]; 30032 fi_arq = un->sd_fi_fifo_arq[i]; 30033 30034 30035 /* set variables accordingly */ 30036 /* set pkt if it was on fifo */ 30037 if (fi_pkt != NULL) { 30038 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30039 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30040 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30041 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30042 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30043 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30044 30045 } 30046 30047 /* set xb if it was on fifo */ 30048 if (fi_xb != NULL) { 30049 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30050 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30051 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30052 SD_CONDSET(xb, xb, xb_victim_retry_count, 30053 "xb_victim_retry_count"); 30054 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30055 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30056 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30057 30058 /* copy in block data from sense */ 30059 if (fi_xb->xb_sense_data[0] != -1) { 30060 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30061 SENSE_LENGTH); 30062 } 30063 30064 /* copy in extended sense codes */ 30065 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 30066 "es_code"); 30067 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 30068 "es_key"); 30069 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 30070 "es_add_code"); 30071 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 30072 es_qual_code, "es_qual_code"); 30073 } 30074 30075 /* set un if it was on fifo */ 30076 if (fi_un != NULL) { 30077 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30078 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30079 SD_CONDSET(un, un, un_reset_retry_count, 30080 "un_reset_retry_count"); 30081 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30082 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30083 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30084 SD_CONDSET(un, un, un_f_geometry_is_valid, 30085 "un_f_geometry_is_valid"); 30086 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30087 "un_f_allow_bus_device_reset"); 30088 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30089 30090 } 30091 30092 /* copy in auto request sense if it was on fifo */ 30093 if (fi_arq != NULL) { 30094 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30095 } 30096 30097 /* free structs */ 30098 if (un->sd_fi_fifo_pkt[i] != NULL) { 30099 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30100 } 30101 if (un->sd_fi_fifo_xb[i] != NULL) { 30102 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30103 } 30104 if (un->sd_fi_fifo_un[i] != NULL) { 30105 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30106 } 30107 if (un->sd_fi_fifo_arq[i] != NULL) { 30108 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30109 } 30110 30111 /* 30112 * kmem_free does not gurantee to set to NULL 30113 * since we uses these to determine if we set 30114 * values or not lets confirm they are always 30115 * NULL after free 30116 */ 30117 un->sd_fi_fifo_pkt[i] = NULL; 30118 un->sd_fi_fifo_un[i] = NULL; 30119 un->sd_fi_fifo_xb[i] = NULL; 30120 un->sd_fi_fifo_arq[i] = NULL; 30121 30122 un->sd_fi_fifo_start++; 30123 30124 mutex_exit(SD_MUTEX(un)); 30125 30126 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30127 } 30128 30129 #endif /* SD_FAULT_INJECTION */ 30130