1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSI disk target driver. 31 */ 32 33 #include <sys/scsi/scsi.h> 34 #include <sys/dkbad.h> 35 #include <sys/dklabel.h> 36 #include <sys/dkio.h> 37 #include <sys/fdio.h> 38 #include <sys/cdio.h> 39 #include <sys/mhd.h> 40 #include <sys/vtoc.h> 41 #include <sys/dktp/fdisk.h> 42 #include <sys/file.h> 43 #include <sys/stat.h> 44 #include <sys/kstat.h> 45 #include <sys/vtrace.h> 46 #include <sys/note.h> 47 #include <sys/thread.h> 48 #include <sys/proc.h> 49 #include <sys/efi_partition.h> 50 #include <sys/var.h> 51 #include <sys/aio_req.h> 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 65 66 /* 67 * Loadable module info. 68 */ 69 #if (defined(__fibre)) 70 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 71 char _depends_on[] = "misc/scsi drv/fcp"; 72 #else 73 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 74 char _depends_on[] = "misc/scsi"; 75 #endif 76 77 /* 78 * Define the interconnect type, to allow the driver to distinguish 79 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 80 * 81 * This is really for backward compatability. In the future, the driver 82 * should actually check the "interconnect-type" property as reported by 83 * the HBA; however at present this property is not defined by all HBAs, 84 * so we will use this #define (1) to permit the driver to run in 85 * backward-compatability mode; and (2) to print a notification message 86 * if an FC HBA does not support the "interconnect-type" property. The 87 * behavior of the driver will be to assume parallel SCSI behaviors unless 88 * the "interconnect-type" property is defined by the HBA **AND** has a 89 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 90 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 91 * Channel behaviors (as per the old ssd). (Note that the 92 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 93 * will result in the driver assuming parallel SCSI behaviors.) 94 * 95 * (see common/sys/scsi/impl/services.h) 96 * 97 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 98 * since some FC HBAs may already support that, and there is some code in 99 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 100 * default would confuse that code, and besides things should work fine 101 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 102 * "interconnect_type" property. 103 */ 104 #if (defined(__fibre)) 105 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 106 #else 107 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 108 #endif 109 110 /* 111 * The name of the driver, established from the module name in _init. 112 */ 113 static char *sd_label = NULL; 114 115 /* 116 * Driver name is unfortunately prefixed on some driver.conf properties. 117 */ 118 #if (defined(__fibre)) 119 #define sd_max_xfer_size ssd_max_xfer_size 120 #define sd_config_list ssd_config_list 121 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 122 static char *sd_config_list = "ssd-config-list"; 123 #else 124 static char *sd_max_xfer_size = "sd_max_xfer_size"; 125 static char *sd_config_list = "sd-config-list"; 126 #endif 127 128 /* 129 * Driver global variables 130 */ 131 132 #if (defined(__fibre)) 133 /* 134 * These #defines are to avoid namespace collisions that occur because this 135 * code is currently used to compile two seperate driver modules: sd and ssd. 136 * All global variables need to be treated this way (even if declared static) 137 * in order to allow the debugger to resolve the names properly. 138 * It is anticipated that in the near future the ssd module will be obsoleted, 139 * at which time this namespace issue should go away. 140 */ 141 #define sd_state ssd_state 142 #define sd_io_time ssd_io_time 143 #define sd_failfast_enable ssd_failfast_enable 144 #define sd_ua_retry_count ssd_ua_retry_count 145 #define sd_report_pfa ssd_report_pfa 146 #define sd_max_throttle ssd_max_throttle 147 #define sd_min_throttle ssd_min_throttle 148 #define sd_rot_delay ssd_rot_delay 149 150 #define sd_retry_on_reservation_conflict \ 151 ssd_retry_on_reservation_conflict 152 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 153 #define sd_resv_conflict_name ssd_resv_conflict_name 154 155 #define sd_component_mask ssd_component_mask 156 #define sd_level_mask ssd_level_mask 157 #define sd_debug_un ssd_debug_un 158 #define sd_error_level ssd_error_level 159 160 #define sd_xbuf_active_limit ssd_xbuf_active_limit 161 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 162 163 #define sd_tr ssd_tr 164 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 165 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout 166 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable 167 #define sd_check_media_time ssd_check_media_time 168 #define sd_wait_cmds_complete ssd_wait_cmds_complete 169 #define sd_label_mutex ssd_label_mutex 170 #define sd_detach_mutex ssd_detach_mutex 171 #define sd_log_buf ssd_log_buf 172 #define sd_log_mutex ssd_log_mutex 173 174 #define sd_disk_table ssd_disk_table 175 #define sd_disk_table_size ssd_disk_table_size 176 #define sd_sense_mutex ssd_sense_mutex 177 #define sd_cdbtab ssd_cdbtab 178 179 #define sd_cb_ops ssd_cb_ops 180 #define sd_ops ssd_ops 181 #define sd_additional_codes ssd_additional_codes 182 183 #define sd_minor_data ssd_minor_data 184 #define sd_minor_data_efi ssd_minor_data_efi 185 186 #define sd_tq ssd_tq 187 #define sd_wmr_tq ssd_wmr_tq 188 #define sd_taskq_name ssd_taskq_name 189 #define sd_wmr_taskq_name ssd_wmr_taskq_name 190 #define sd_taskq_minalloc ssd_taskq_minalloc 191 #define sd_taskq_maxalloc ssd_taskq_maxalloc 192 193 #define sd_dump_format_string ssd_dump_format_string 194 195 #define sd_iostart_chain ssd_iostart_chain 196 #define sd_iodone_chain ssd_iodone_chain 197 198 #define sd_pm_idletime ssd_pm_idletime 199 200 #define sd_force_pm_supported ssd_force_pm_supported 201 202 #define sd_dtype_optical_bind ssd_dtype_optical_bind 203 204 #endif 205 206 207 #ifdef SDDEBUG 208 int sd_force_pm_supported = 0; 209 #endif /* SDDEBUG */ 210 211 void *sd_state = NULL; 212 int sd_io_time = SD_IO_TIME; 213 int sd_failfast_enable = 1; 214 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 215 int sd_report_pfa = 1; 216 int sd_max_throttle = SD_MAX_THROTTLE; 217 int sd_min_throttle = SD_MIN_THROTTLE; 218 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 219 int sd_qfull_throttle_enable = TRUE; 220 221 int sd_retry_on_reservation_conflict = 1; 222 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 223 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 224 225 static int sd_dtype_optical_bind = -1; 226 227 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 228 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 229 230 /* 231 * Global data for debug logging. To enable debug printing, sd_component_mask 232 * and sd_level_mask should be set to the desired bit patterns as outlined in 233 * sddef.h. 234 */ 235 uint_t sd_component_mask = 0x0; 236 uint_t sd_level_mask = 0x0; 237 struct sd_lun *sd_debug_un = NULL; 238 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 239 240 /* Note: these may go away in the future... */ 241 static uint32_t sd_xbuf_active_limit = 512; 242 static uint32_t sd_xbuf_reserve_limit = 16; 243 244 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 245 246 /* 247 * Timer value used to reset the throttle after it has been reduced 248 * (typically in response to TRAN_BUSY or STATUS_QFULL) 249 */ 250 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 251 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT; 252 253 /* 254 * Interval value associated with the media change scsi watch. 255 */ 256 static int sd_check_media_time = 3000000; 257 258 /* 259 * Wait value used for in progress operations during a DDI_SUSPEND 260 */ 261 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 262 263 /* 264 * sd_label_mutex protects a static buffer used in the disk label 265 * component of the driver 266 */ 267 static kmutex_t sd_label_mutex; 268 269 /* 270 * sd_detach_mutex protects un_layer_count, un_detach_count, and 271 * un_opens_in_progress in the sd_lun structure. 272 */ 273 static kmutex_t sd_detach_mutex; 274 275 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 276 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 277 278 /* 279 * Global buffer and mutex for debug logging 280 */ 281 static char sd_log_buf[1024]; 282 static kmutex_t sd_log_mutex; 283 284 285 /* 286 * "Smart" Probe Caching structs, globals, #defines, etc. 287 * For parallel scsi and non-self-identify device only. 288 */ 289 290 /* 291 * The following resources and routines are implemented to support 292 * "smart" probing, which caches the scsi_probe() results in an array, 293 * in order to help avoid long probe times. 294 */ 295 struct sd_scsi_probe_cache { 296 struct sd_scsi_probe_cache *next; 297 dev_info_t *pdip; 298 int cache[NTARGETS_WIDE]; 299 }; 300 301 static kmutex_t sd_scsi_probe_cache_mutex; 302 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 303 304 /* 305 * Really we only need protection on the head of the linked list, but 306 * better safe than sorry. 307 */ 308 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 309 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 310 311 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 312 sd_scsi_probe_cache_head)) 313 314 315 /* 316 * Vendor specific data name property declarations 317 */ 318 319 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 320 321 static sd_tunables seagate_properties = { 322 SEAGATE_THROTTLE_VALUE, 323 0, 324 0, 325 0, 326 0, 327 0, 328 0, 329 0, 330 0 331 }; 332 333 334 static sd_tunables fujitsu_properties = { 335 FUJITSU_THROTTLE_VALUE, 336 0, 337 0, 338 0, 339 0, 340 0, 341 0, 342 0, 343 0 344 }; 345 346 static sd_tunables ibm_properties = { 347 IBM_THROTTLE_VALUE, 348 0, 349 0, 350 0, 351 0, 352 0, 353 0, 354 0, 355 0 356 }; 357 358 static sd_tunables purple_properties = { 359 PURPLE_THROTTLE_VALUE, 360 0, 361 0, 362 PURPLE_BUSY_RETRIES, 363 PURPLE_RESET_RETRY_COUNT, 364 PURPLE_RESERVE_RELEASE_TIME, 365 0, 366 0, 367 0 368 }; 369 370 static sd_tunables sve_properties = { 371 SVE_THROTTLE_VALUE, 372 0, 373 0, 374 SVE_BUSY_RETRIES, 375 SVE_RESET_RETRY_COUNT, 376 SVE_RESERVE_RELEASE_TIME, 377 SVE_MIN_THROTTLE_VALUE, 378 SVE_DISKSORT_DISABLED_FLAG, 379 0 380 }; 381 382 static sd_tunables maserati_properties = { 383 0, 384 0, 385 0, 386 0, 387 0, 388 0, 389 0, 390 MASERATI_DISKSORT_DISABLED_FLAG, 391 MASERATI_LUN_RESET_ENABLED_FLAG 392 }; 393 394 static sd_tunables pirus_properties = { 395 PIRUS_THROTTLE_VALUE, 396 0, 397 PIRUS_NRR_COUNT, 398 PIRUS_BUSY_RETRIES, 399 PIRUS_RESET_RETRY_COUNT, 400 0, 401 PIRUS_MIN_THROTTLE_VALUE, 402 PIRUS_DISKSORT_DISABLED_FLAG, 403 PIRUS_LUN_RESET_ENABLED_FLAG 404 }; 405 406 #endif 407 408 #if (defined(__sparc) && !defined(__fibre)) || \ 409 (defined(__i386) || defined(__amd64)) 410 411 412 static sd_tunables elite_properties = { 413 ELITE_THROTTLE_VALUE, 414 0, 415 0, 416 0, 417 0, 418 0, 419 0, 420 0, 421 0 422 }; 423 424 static sd_tunables st31200n_properties = { 425 ST31200N_THROTTLE_VALUE, 426 0, 427 0, 428 0, 429 0, 430 0, 431 0, 432 0, 433 0 434 }; 435 436 #endif /* Fibre or not */ 437 438 static sd_tunables lsi_properties_scsi = { 439 LSI_THROTTLE_VALUE, 440 0, 441 LSI_NOTREADY_RETRIES, 442 0, 443 0, 444 0, 445 0, 446 0, 447 0 448 }; 449 450 static sd_tunables symbios_properties = { 451 SYMBIOS_THROTTLE_VALUE, 452 0, 453 SYMBIOS_NOTREADY_RETRIES, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0 460 }; 461 462 static sd_tunables lsi_properties = { 463 0, 464 0, 465 LSI_NOTREADY_RETRIES, 466 0, 467 0, 468 0, 469 0, 470 0, 471 0 472 }; 473 474 static sd_tunables lsi_oem_properties = { 475 0, 476 0, 477 LSI_OEM_NOTREADY_RETRIES, 478 0, 479 0, 480 0, 481 0, 482 0, 483 0 484 }; 485 486 487 488 #if (defined(SD_PROP_TST)) 489 490 #define SD_TST_CTYPE_VAL CTYPE_CDROM 491 #define SD_TST_THROTTLE_VAL 16 492 #define SD_TST_NOTREADY_VAL 12 493 #define SD_TST_BUSY_VAL 60 494 #define SD_TST_RST_RETRY_VAL 36 495 #define SD_TST_RSV_REL_TIME 60 496 497 static sd_tunables tst_properties = { 498 SD_TST_THROTTLE_VAL, 499 SD_TST_CTYPE_VAL, 500 SD_TST_NOTREADY_VAL, 501 SD_TST_BUSY_VAL, 502 SD_TST_RST_RETRY_VAL, 503 SD_TST_RSV_REL_TIME, 504 0, 505 0, 506 0 507 }; 508 #endif 509 510 /* This is similiar to the ANSI toupper implementation */ 511 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 512 513 /* 514 * Static Driver Configuration Table 515 * 516 * This is the table of disks which need throttle adjustment (or, perhaps 517 * something else as defined by the flags at a future time.) device_id 518 * is a string consisting of concatenated vid (vendor), pid (product/model) 519 * and revision strings as defined in the scsi_inquiry structure. Offsets of 520 * the parts of the string are as defined by the sizes in the scsi_inquiry 521 * structure. Device type is searched as far as the device_id string is 522 * defined. Flags defines which values are to be set in the driver from the 523 * properties list. 524 * 525 * Entries below which begin and end with a "*" are a special case. 526 * These do not have a specific vendor, and the string which follows 527 * can appear anywhere in the 16 byte PID portion of the inquiry data. 528 * 529 * Entries below which begin and end with a " " (blank) are a special 530 * case. The comparison function will treat multiple consecutive blanks 531 * as equivalent to a single blank. For example, this causes a 532 * sd_disk_table entry of " NEC CDROM " to match a device's id string 533 * of "NEC CDROM". 534 * 535 * Note: The MD21 controller type has been obsoleted. 536 * ST318202F is a Legacy device 537 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 538 * made with an FC connection. The entries here are a legacy. 539 */ 540 static sd_disk_config_t sd_disk_table[] = { 541 #if defined(__fibre) || defined(__i386) || defined(__amd64) 542 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 543 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 544 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 545 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 546 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 547 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 548 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 549 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 550 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 551 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 552 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 553 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 554 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 555 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 556 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 557 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 558 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 559 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 560 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 561 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 562 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 563 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 564 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 565 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 566 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 567 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 568 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 569 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 570 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 571 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 572 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 573 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 574 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 575 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 576 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 577 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 578 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 579 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 580 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 581 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 582 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 583 { "SUN T3", SD_CONF_BSET_THROTTLE | 584 SD_CONF_BSET_BSY_RETRY_COUNT| 585 SD_CONF_BSET_RST_RETRIES| 586 SD_CONF_BSET_RSV_REL_TIME, 587 &purple_properties }, 588 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 589 SD_CONF_BSET_BSY_RETRY_COUNT| 590 SD_CONF_BSET_RST_RETRIES| 591 SD_CONF_BSET_RSV_REL_TIME| 592 SD_CONF_BSET_MIN_THROTTLE| 593 SD_CONF_BSET_DISKSORT_DISABLED, 594 &sve_properties }, 595 { "SUN T4", SD_CONF_BSET_THROTTLE | 596 SD_CONF_BSET_BSY_RETRY_COUNT| 597 SD_CONF_BSET_RST_RETRIES| 598 SD_CONF_BSET_RSV_REL_TIME, 599 &purple_properties }, 600 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 601 SD_CONF_BSET_LUN_RESET_ENABLED, 602 &maserati_properties }, 603 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 604 SD_CONF_BSET_NRR_COUNT| 605 SD_CONF_BSET_BSY_RETRY_COUNT| 606 SD_CONF_BSET_RST_RETRIES| 607 SD_CONF_BSET_MIN_THROTTLE| 608 SD_CONF_BSET_DISKSORT_DISABLED| 609 SD_CONF_BSET_LUN_RESET_ENABLED, 610 &pirus_properties }, 611 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 612 SD_CONF_BSET_NRR_COUNT| 613 SD_CONF_BSET_BSY_RETRY_COUNT| 614 SD_CONF_BSET_RST_RETRIES| 615 SD_CONF_BSET_MIN_THROTTLE| 616 SD_CONF_BSET_DISKSORT_DISABLED| 617 SD_CONF_BSET_LUN_RESET_ENABLED, 618 &pirus_properties }, 619 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 620 SD_CONF_BSET_NRR_COUNT| 621 SD_CONF_BSET_BSY_RETRY_COUNT| 622 SD_CONF_BSET_RST_RETRIES| 623 SD_CONF_BSET_MIN_THROTTLE| 624 SD_CONF_BSET_DISKSORT_DISABLED| 625 SD_CONF_BSET_LUN_RESET_ENABLED, 626 &pirus_properties }, 627 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 628 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 629 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 630 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 631 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 632 #endif /* fibre or NON-sparc platforms */ 633 #if ((defined(__sparc) && !defined(__fibre)) ||\ 634 (defined(__i386) || defined(__amd64))) 635 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 636 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 637 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 638 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 639 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 640 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 641 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 642 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 643 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 644 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 645 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 646 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 647 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 648 &symbios_properties }, 649 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 650 &lsi_properties_scsi }, 651 #if defined(__i386) || defined(__amd64) 652 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 653 | SD_CONF_BSET_READSUB_BCD 654 | SD_CONF_BSET_READ_TOC_ADDR_BCD 655 | SD_CONF_BSET_NO_READ_HEADER 656 | SD_CONF_BSET_READ_CD_XD4), NULL }, 657 658 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 659 | SD_CONF_BSET_READSUB_BCD 660 | SD_CONF_BSET_READ_TOC_ADDR_BCD 661 | SD_CONF_BSET_NO_READ_HEADER 662 | SD_CONF_BSET_READ_CD_XD4), NULL }, 663 #endif /* __i386 || __amd64 */ 664 #endif /* sparc NON-fibre or NON-sparc platforms */ 665 666 #if (defined(SD_PROP_TST)) 667 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 668 | SD_CONF_BSET_CTYPE 669 | SD_CONF_BSET_NRR_COUNT 670 | SD_CONF_BSET_FAB_DEVID 671 | SD_CONF_BSET_NOCACHE 672 | SD_CONF_BSET_BSY_RETRY_COUNT 673 | SD_CONF_BSET_PLAYMSF_BCD 674 | SD_CONF_BSET_READSUB_BCD 675 | SD_CONF_BSET_READ_TOC_TRK_BCD 676 | SD_CONF_BSET_READ_TOC_ADDR_BCD 677 | SD_CONF_BSET_NO_READ_HEADER 678 | SD_CONF_BSET_READ_CD_XD4 679 | SD_CONF_BSET_RST_RETRIES 680 | SD_CONF_BSET_RSV_REL_TIME 681 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 682 #endif 683 }; 684 685 static const int sd_disk_table_size = 686 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 687 688 689 /* 690 * Return codes of sd_uselabel(). 691 */ 692 #define SD_LABEL_IS_VALID 0 693 #define SD_LABEL_IS_INVALID 1 694 695 #define SD_INTERCONNECT_PARALLEL 0 696 #define SD_INTERCONNECT_FABRIC 1 697 #define SD_INTERCONNECT_FIBRE 2 698 #define SD_INTERCONNECT_SSA 3 699 #define SD_IS_PARALLEL_SCSI(un) \ 700 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 701 702 /* 703 * Definitions used by device id registration routines 704 */ 705 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 706 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 707 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 708 #define WD_NODE 7 /* the whole disk minor */ 709 710 static kmutex_t sd_sense_mutex = {0}; 711 712 /* 713 * Macros for updates of the driver state 714 */ 715 #define New_state(un, s) \ 716 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 717 #define Restore_state(un) \ 718 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 719 720 static struct sd_cdbinfo sd_cdbtab[] = { 721 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 722 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 723 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 724 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 725 }; 726 727 /* 728 * Specifies the number of seconds that must have elapsed since the last 729 * cmd. has completed for a device to be declared idle to the PM framework. 730 */ 731 static int sd_pm_idletime = 1; 732 733 /* 734 * Internal function prototypes 735 */ 736 737 #if (defined(__fibre)) 738 /* 739 * These #defines are to avoid namespace collisions that occur because this 740 * code is currently used to compile two seperate driver modules: sd and ssd. 741 * All function names need to be treated this way (even if declared static) 742 * in order to allow the debugger to resolve the names properly. 743 * It is anticipated that in the near future the ssd module will be obsoleted, 744 * at which time this ugliness should go away. 745 */ 746 #define sd_log_trace ssd_log_trace 747 #define sd_log_info ssd_log_info 748 #define sd_log_err ssd_log_err 749 #define sdprobe ssdprobe 750 #define sdinfo ssdinfo 751 #define sd_prop_op ssd_prop_op 752 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 753 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 754 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 755 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 756 #define sd_spin_up_unit ssd_spin_up_unit 757 #define sd_enable_descr_sense ssd_enable_descr_sense 758 #define sd_set_mmc_caps ssd_set_mmc_caps 759 #define sd_read_unit_properties ssd_read_unit_properties 760 #define sd_process_sdconf_file ssd_process_sdconf_file 761 #define sd_process_sdconf_table ssd_process_sdconf_table 762 #define sd_sdconf_id_match ssd_sdconf_id_match 763 #define sd_blank_cmp ssd_blank_cmp 764 #define sd_chk_vers1_data ssd_chk_vers1_data 765 #define sd_set_vers1_properties ssd_set_vers1_properties 766 #define sd_validate_geometry ssd_validate_geometry 767 768 #if defined(_SUNOS_VTOC_16) 769 #define sd_convert_geometry ssd_convert_geometry 770 #endif 771 772 #define sd_resync_geom_caches ssd_resync_geom_caches 773 #define sd_read_fdisk ssd_read_fdisk 774 #define sd_get_physical_geometry ssd_get_physical_geometry 775 #define sd_get_virtual_geometry ssd_get_virtual_geometry 776 #define sd_update_block_info ssd_update_block_info 777 #define sd_swap_efi_gpt ssd_swap_efi_gpt 778 #define sd_swap_efi_gpe ssd_swap_efi_gpe 779 #define sd_validate_efi ssd_validate_efi 780 #define sd_use_efi ssd_use_efi 781 #define sd_uselabel ssd_uselabel 782 #define sd_build_default_label ssd_build_default_label 783 #define sd_has_max_chs_vals ssd_has_max_chs_vals 784 #define sd_inq_fill ssd_inq_fill 785 #define sd_register_devid ssd_register_devid 786 #define sd_get_devid_block ssd_get_devid_block 787 #define sd_get_devid ssd_get_devid 788 #define sd_create_devid ssd_create_devid 789 #define sd_write_deviceid ssd_write_deviceid 790 #define sd_check_vpd_page_support ssd_check_vpd_page_support 791 #define sd_setup_pm ssd_setup_pm 792 #define sd_create_pm_components ssd_create_pm_components 793 #define sd_ddi_suspend ssd_ddi_suspend 794 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 795 #define sd_ddi_resume ssd_ddi_resume 796 #define sd_ddi_pm_resume ssd_ddi_pm_resume 797 #define sdpower ssdpower 798 #define sdattach ssdattach 799 #define sddetach ssddetach 800 #define sd_unit_attach ssd_unit_attach 801 #define sd_unit_detach ssd_unit_detach 802 #define sd_create_minor_nodes ssd_create_minor_nodes 803 #define sd_create_errstats ssd_create_errstats 804 #define sd_set_errstats ssd_set_errstats 805 #define sd_set_pstats ssd_set_pstats 806 #define sddump ssddump 807 #define sd_scsi_poll ssd_scsi_poll 808 #define sd_send_polled_RQS ssd_send_polled_RQS 809 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 810 #define sd_init_event_callbacks ssd_init_event_callbacks 811 #define sd_event_callback ssd_event_callback 812 #define sd_disable_caching ssd_disable_caching 813 #define sd_make_device ssd_make_device 814 #define sdopen ssdopen 815 #define sdclose ssdclose 816 #define sd_ready_and_valid ssd_ready_and_valid 817 #define sdmin ssdmin 818 #define sdread ssdread 819 #define sdwrite ssdwrite 820 #define sdaread ssdaread 821 #define sdawrite ssdawrite 822 #define sdstrategy ssdstrategy 823 #define sdioctl ssdioctl 824 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 825 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 826 #define sd_checksum_iostart ssd_checksum_iostart 827 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 828 #define sd_pm_iostart ssd_pm_iostart 829 #define sd_core_iostart ssd_core_iostart 830 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 831 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 832 #define sd_checksum_iodone ssd_checksum_iodone 833 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 834 #define sd_pm_iodone ssd_pm_iodone 835 #define sd_initpkt_for_buf ssd_initpkt_for_buf 836 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 837 #define sd_setup_rw_pkt ssd_setup_rw_pkt 838 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 839 #define sd_buf_iodone ssd_buf_iodone 840 #define sd_uscsi_strategy ssd_uscsi_strategy 841 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 842 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 843 #define sd_uscsi_iodone ssd_uscsi_iodone 844 #define sd_xbuf_strategy ssd_xbuf_strategy 845 #define sd_xbuf_init ssd_xbuf_init 846 #define sd_pm_entry ssd_pm_entry 847 #define sd_pm_exit ssd_pm_exit 848 849 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 850 #define sd_pm_timeout_handler ssd_pm_timeout_handler 851 852 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 853 #define sdintr ssdintr 854 #define sd_start_cmds ssd_start_cmds 855 #define sd_send_scsi_cmd ssd_send_scsi_cmd 856 #define sd_bioclone_alloc ssd_bioclone_alloc 857 #define sd_bioclone_free ssd_bioclone_free 858 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 859 #define sd_shadow_buf_free ssd_shadow_buf_free 860 #define sd_print_transport_rejected_message \ 861 ssd_print_transport_rejected_message 862 #define sd_retry_command ssd_retry_command 863 #define sd_set_retry_bp ssd_set_retry_bp 864 #define sd_send_request_sense_command ssd_send_request_sense_command 865 #define sd_start_retry_command ssd_start_retry_command 866 #define sd_start_direct_priority_command \ 867 ssd_start_direct_priority_command 868 #define sd_return_failed_command ssd_return_failed_command 869 #define sd_return_failed_command_no_restart \ 870 ssd_return_failed_command_no_restart 871 #define sd_return_command ssd_return_command 872 #define sd_sync_with_callback ssd_sync_with_callback 873 #define sdrunout ssdrunout 874 #define sd_mark_rqs_busy ssd_mark_rqs_busy 875 #define sd_mark_rqs_idle ssd_mark_rqs_idle 876 #define sd_reduce_throttle ssd_reduce_throttle 877 #define sd_restore_throttle ssd_restore_throttle 878 #define sd_print_incomplete_msg ssd_print_incomplete_msg 879 #define sd_init_cdb_limits ssd_init_cdb_limits 880 #define sd_pkt_status_good ssd_pkt_status_good 881 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 882 #define sd_pkt_status_busy ssd_pkt_status_busy 883 #define sd_pkt_status_reservation_conflict \ 884 ssd_pkt_status_reservation_conflict 885 #define sd_pkt_status_qfull ssd_pkt_status_qfull 886 #define sd_handle_request_sense ssd_handle_request_sense 887 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 888 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 889 #define sd_validate_sense_data ssd_validate_sense_data 890 #define sd_decode_sense ssd_decode_sense 891 #define sd_print_sense_msg ssd_print_sense_msg 892 #define sd_extract_sense_info_descr ssd_extract_sense_info_descr 893 #define sd_sense_key_no_sense ssd_sense_key_no_sense 894 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 895 #define sd_sense_key_not_ready ssd_sense_key_not_ready 896 #define sd_sense_key_medium_or_hardware_error \ 897 ssd_sense_key_medium_or_hardware_error 898 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 899 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 900 #define sd_sense_key_fail_command ssd_sense_key_fail_command 901 #define sd_sense_key_blank_check ssd_sense_key_blank_check 902 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 903 #define sd_sense_key_default ssd_sense_key_default 904 #define sd_print_retry_msg ssd_print_retry_msg 905 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 906 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 907 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 908 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 909 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 910 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 911 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 912 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 913 #define sd_pkt_reason_default ssd_pkt_reason_default 914 #define sd_reset_target ssd_reset_target 915 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 916 #define sd_start_stop_unit_task ssd_start_stop_unit_task 917 #define sd_taskq_create ssd_taskq_create 918 #define sd_taskq_delete ssd_taskq_delete 919 #define sd_media_change_task ssd_media_change_task 920 #define sd_handle_mchange ssd_handle_mchange 921 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 922 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 923 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 924 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 925 #define sd_send_scsi_feature_GET_CONFIGURATION \ 926 sd_send_scsi_feature_GET_CONFIGURATION 927 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 928 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 929 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 930 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 931 ssd_send_scsi_PERSISTENT_RESERVE_IN 932 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 933 ssd_send_scsi_PERSISTENT_RESERVE_OUT 934 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 935 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 936 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 937 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 938 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 939 #define sd_alloc_rqs ssd_alloc_rqs 940 #define sd_free_rqs ssd_free_rqs 941 #define sd_dump_memory ssd_dump_memory 942 #define sd_uscsi_ioctl ssd_uscsi_ioctl 943 #define sd_get_media_info ssd_get_media_info 944 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 945 #define sd_dkio_get_geometry ssd_dkio_get_geometry 946 #define sd_dkio_set_geometry ssd_dkio_set_geometry 947 #define sd_dkio_get_partition ssd_dkio_get_partition 948 #define sd_dkio_set_partition ssd_dkio_set_partition 949 #define sd_dkio_partition ssd_dkio_partition 950 #define sd_dkio_get_vtoc ssd_dkio_get_vtoc 951 #define sd_dkio_get_efi ssd_dkio_get_efi 952 #define sd_build_user_vtoc ssd_build_user_vtoc 953 #define sd_dkio_set_vtoc ssd_dkio_set_vtoc 954 #define sd_dkio_set_efi ssd_dkio_set_efi 955 #define sd_build_label_vtoc ssd_build_label_vtoc 956 #define sd_write_label ssd_write_label 957 #define sd_clear_vtoc ssd_clear_vtoc 958 #define sd_clear_efi ssd_clear_efi 959 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 960 #define sd_setup_next_xfer ssd_setup_next_xfer 961 #define sd_dkio_get_temp ssd_dkio_get_temp 962 #define sd_dkio_get_mboot ssd_dkio_get_mboot 963 #define sd_dkio_set_mboot ssd_dkio_set_mboot 964 #define sd_setup_default_geometry ssd_setup_default_geometry 965 #define sd_update_fdisk_and_vtoc ssd_update_fdisk_and_vtoc 966 #define sd_check_mhd ssd_check_mhd 967 #define sd_mhd_watch_cb ssd_mhd_watch_cb 968 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 969 #define sd_sname ssd_sname 970 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 971 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 972 #define sd_take_ownership ssd_take_ownership 973 #define sd_reserve_release ssd_reserve_release 974 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 975 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 976 #define sd_persistent_reservation_in_read_keys \ 977 ssd_persistent_reservation_in_read_keys 978 #define sd_persistent_reservation_in_read_resv \ 979 ssd_persistent_reservation_in_read_resv 980 #define sd_mhdioc_takeown ssd_mhdioc_takeown 981 #define sd_mhdioc_failfast ssd_mhdioc_failfast 982 #define sd_mhdioc_release ssd_mhdioc_release 983 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 984 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 985 #define sd_mhdioc_inresv ssd_mhdioc_inresv 986 #define sr_change_blkmode ssr_change_blkmode 987 #define sr_change_speed ssr_change_speed 988 #define sr_atapi_change_speed ssr_atapi_change_speed 989 #define sr_pause_resume ssr_pause_resume 990 #define sr_play_msf ssr_play_msf 991 #define sr_play_trkind ssr_play_trkind 992 #define sr_read_all_subcodes ssr_read_all_subcodes 993 #define sr_read_subchannel ssr_read_subchannel 994 #define sr_read_tocentry ssr_read_tocentry 995 #define sr_read_tochdr ssr_read_tochdr 996 #define sr_read_cdda ssr_read_cdda 997 #define sr_read_cdxa ssr_read_cdxa 998 #define sr_read_mode1 ssr_read_mode1 999 #define sr_read_mode2 ssr_read_mode2 1000 #define sr_read_cd_mode2 ssr_read_cd_mode2 1001 #define sr_sector_mode ssr_sector_mode 1002 #define sr_eject ssr_eject 1003 #define sr_ejected ssr_ejected 1004 #define sr_check_wp ssr_check_wp 1005 #define sd_check_media ssd_check_media 1006 #define sd_media_watch_cb ssd_media_watch_cb 1007 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1008 #define sr_volume_ctrl ssr_volume_ctrl 1009 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1010 #define sd_log_page_supported ssd_log_page_supported 1011 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1012 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1013 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1014 #define sd_range_lock ssd_range_lock 1015 #define sd_get_range ssd_get_range 1016 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1017 #define sd_range_unlock ssd_range_unlock 1018 #define sd_read_modify_write_task ssd_read_modify_write_task 1019 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1020 1021 #define sd_iostart_chain ssd_iostart_chain 1022 #define sd_iodone_chain ssd_iodone_chain 1023 #define sd_initpkt_map ssd_initpkt_map 1024 #define sd_destroypkt_map ssd_destroypkt_map 1025 #define sd_chain_type_map ssd_chain_type_map 1026 #define sd_chain_index_map ssd_chain_index_map 1027 1028 #define sd_failfast_flushctl ssd_failfast_flushctl 1029 #define sd_failfast_flushq ssd_failfast_flushq 1030 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1031 1032 #define sd_is_lsi ssd_is_lsi 1033 1034 #endif /* #if (defined(__fibre)) */ 1035 1036 1037 int _init(void); 1038 int _fini(void); 1039 int _info(struct modinfo *modinfop); 1040 1041 /*PRINTFLIKE3*/ 1042 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1043 /*PRINTFLIKE3*/ 1044 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1045 /*PRINTFLIKE3*/ 1046 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1047 1048 static int sdprobe(dev_info_t *devi); 1049 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1050 void **result); 1051 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1052 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1053 1054 /* 1055 * Smart probe for parallel scsi 1056 */ 1057 static void sd_scsi_probe_cache_init(void); 1058 static void sd_scsi_probe_cache_fini(void); 1059 static void sd_scsi_clear_probe_cache(void); 1060 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1061 1062 static int sd_spin_up_unit(struct sd_lun *un); 1063 #ifdef _LP64 1064 static void sd_enable_descr_sense(struct sd_lun *un); 1065 #endif /* _LP64 */ 1066 static void sd_set_mmc_caps(struct sd_lun *un); 1067 1068 static void sd_read_unit_properties(struct sd_lun *un); 1069 static int sd_process_sdconf_file(struct sd_lun *un); 1070 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1071 int *data_list, sd_tunables *values); 1072 static void sd_process_sdconf_table(struct sd_lun *un); 1073 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1074 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1075 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1076 int list_len, char *dataname_ptr); 1077 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1078 sd_tunables *prop_list); 1079 static int sd_validate_geometry(struct sd_lun *un, int path_flag); 1080 1081 #if defined(_SUNOS_VTOC_16) 1082 static void sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g); 1083 #endif 1084 1085 static void sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 1086 int path_flag); 1087 static int sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, 1088 int path_flag); 1089 static void sd_get_physical_geometry(struct sd_lun *un, 1090 struct geom_cache *pgeom_p, int capacity, int lbasize, int path_flag); 1091 static void sd_get_virtual_geometry(struct sd_lun *un, int capacity, 1092 int lbasize); 1093 static int sd_uselabel(struct sd_lun *un, struct dk_label *l, int path_flag); 1094 static void sd_swap_efi_gpt(efi_gpt_t *); 1095 static void sd_swap_efi_gpe(int nparts, efi_gpe_t *); 1096 static int sd_validate_efi(efi_gpt_t *); 1097 static int sd_use_efi(struct sd_lun *, int); 1098 static void sd_build_default_label(struct sd_lun *un); 1099 1100 #if defined(_FIRMWARE_NEEDS_FDISK) 1101 static int sd_has_max_chs_vals(struct ipart *fdp); 1102 #endif 1103 static void sd_inq_fill(char *p, int l, char *s); 1104 1105 1106 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1107 int reservation_flag); 1108 static daddr_t sd_get_devid_block(struct sd_lun *un); 1109 static int sd_get_devid(struct sd_lun *un); 1110 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1111 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1112 static int sd_write_deviceid(struct sd_lun *un); 1113 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1114 static int sd_check_vpd_page_support(struct sd_lun *un); 1115 1116 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1117 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1118 1119 static int sd_ddi_suspend(dev_info_t *devi); 1120 static int sd_ddi_pm_suspend(struct sd_lun *un); 1121 static int sd_ddi_resume(dev_info_t *devi); 1122 static int sd_ddi_pm_resume(struct sd_lun *un); 1123 static int sdpower(dev_info_t *devi, int component, int level); 1124 1125 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1126 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1127 static int sd_unit_attach(dev_info_t *devi); 1128 static int sd_unit_detach(dev_info_t *devi); 1129 1130 static int sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi); 1131 static void sd_create_errstats(struct sd_lun *un, int instance); 1132 static void sd_set_errstats(struct sd_lun *un); 1133 static void sd_set_pstats(struct sd_lun *un); 1134 1135 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1136 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1137 static int sd_send_polled_RQS(struct sd_lun *un); 1138 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1139 1140 #if (defined(__fibre)) 1141 /* 1142 * Event callbacks (photon) 1143 */ 1144 static void sd_init_event_callbacks(struct sd_lun *un); 1145 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1146 #endif 1147 1148 1149 static int sd_disable_caching(struct sd_lun *un); 1150 static dev_t sd_make_device(dev_info_t *devi); 1151 1152 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1153 uint64_t capacity); 1154 1155 /* 1156 * Driver entry point functions. 1157 */ 1158 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1159 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1160 static int sd_ready_and_valid(struct sd_lun *un); 1161 1162 static void sdmin(struct buf *bp); 1163 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1164 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1165 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1166 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1167 1168 static int sdstrategy(struct buf *bp); 1169 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1170 1171 /* 1172 * Function prototypes for layering functions in the iostart chain. 1173 */ 1174 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1175 struct buf *bp); 1176 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1177 struct buf *bp); 1178 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1179 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1180 struct buf *bp); 1181 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1182 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1183 1184 /* 1185 * Function prototypes for layering functions in the iodone chain. 1186 */ 1187 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1188 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1189 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1190 struct buf *bp); 1191 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1192 struct buf *bp); 1193 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1194 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1195 struct buf *bp); 1196 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1197 1198 /* 1199 * Prototypes for functions to support buf(9S) based IO. 1200 */ 1201 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1202 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1203 static void sd_destroypkt_for_buf(struct buf *); 1204 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1205 struct buf *bp, int flags, 1206 int (*callback)(caddr_t), caddr_t callback_arg, 1207 diskaddr_t lba, uint32_t blockcount); 1208 #if defined(__i386) || defined(__amd64) 1209 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1210 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1211 #endif /* defined(__i386) || defined(__amd64) */ 1212 1213 /* 1214 * Prototypes for functions to support USCSI IO. 1215 */ 1216 static int sd_uscsi_strategy(struct buf *bp); 1217 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1218 static void sd_destroypkt_for_uscsi(struct buf *); 1219 1220 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1221 uchar_t chain_type, void *pktinfop); 1222 1223 static int sd_pm_entry(struct sd_lun *un); 1224 static void sd_pm_exit(struct sd_lun *un); 1225 1226 static void sd_pm_idletimeout_handler(void *arg); 1227 1228 /* 1229 * sd_core internal functions (used at the sd_core_io layer). 1230 */ 1231 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1232 static void sdintr(struct scsi_pkt *pktp); 1233 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1234 1235 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 1236 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 1237 int path_flag); 1238 1239 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1240 daddr_t blkno, int (*func)(struct buf *)); 1241 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1242 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1243 static void sd_bioclone_free(struct buf *bp); 1244 static void sd_shadow_buf_free(struct buf *bp); 1245 1246 static void sd_print_transport_rejected_message(struct sd_lun *un, 1247 struct sd_xbuf *xp, int code); 1248 1249 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1250 int retry_check_flag, 1251 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1252 int c), 1253 void *user_arg, int failure_code, clock_t retry_delay, 1254 void (*statp)(kstat_io_t *)); 1255 1256 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1257 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1258 1259 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1260 struct scsi_pkt *pktp); 1261 static void sd_start_retry_command(void *arg); 1262 static void sd_start_direct_priority_command(void *arg); 1263 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1264 int errcode); 1265 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1266 struct buf *bp, int errcode); 1267 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1268 static void sd_sync_with_callback(struct sd_lun *un); 1269 static int sdrunout(caddr_t arg); 1270 1271 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1272 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1273 1274 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1275 static void sd_restore_throttle(void *arg); 1276 1277 static void sd_init_cdb_limits(struct sd_lun *un); 1278 1279 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1280 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1281 1282 /* 1283 * Error handling functions 1284 */ 1285 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1286 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1287 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1288 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1289 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1290 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1291 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1292 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1293 1294 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1295 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1296 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1297 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1298 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1299 struct sd_xbuf *xp); 1300 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1301 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1302 1303 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1304 void *arg, int code); 1305 static diskaddr_t sd_extract_sense_info_descr( 1306 struct scsi_descr_sense_hdr *sdsp); 1307 1308 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1309 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1310 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1311 uint8_t asc, 1312 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1313 static void sd_sense_key_not_ready(struct sd_lun *un, 1314 uint8_t asc, uint8_t ascq, 1315 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1316 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1317 int sense_key, uint8_t asc, 1318 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1319 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1320 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1321 static void sd_sense_key_unit_attention(struct sd_lun *un, 1322 uint8_t asc, 1323 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1324 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1325 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1326 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1328 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_sense_key_default(struct sd_lun *un, 1331 int sense_key, 1332 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1333 1334 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1335 void *arg, int flag); 1336 1337 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1338 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1339 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1340 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1341 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1342 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1343 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1344 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1345 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1346 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1347 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1348 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1349 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1350 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1351 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1352 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1353 1354 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1355 1356 static void sd_start_stop_unit_callback(void *arg); 1357 static void sd_start_stop_unit_task(void *arg); 1358 1359 static void sd_taskq_create(void); 1360 static void sd_taskq_delete(void); 1361 static void sd_media_change_task(void *arg); 1362 1363 static int sd_handle_mchange(struct sd_lun *un); 1364 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1365 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1366 uint32_t *lbap, int path_flag); 1367 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1368 uint32_t *lbap, int path_flag); 1369 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1370 int path_flag); 1371 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1372 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1373 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1374 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1375 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1376 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1377 uchar_t usr_cmd, uchar_t *usr_bufp); 1378 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un); 1379 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1380 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1381 uchar_t *bufaddr, uint_t buflen); 1382 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1383 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1384 uchar_t *bufaddr, uint_t buflen, char feature); 1385 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1386 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1387 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1388 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1389 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1390 size_t buflen, daddr_t start_block, int path_flag); 1391 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1392 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1393 path_flag) 1394 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1395 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1396 path_flag) 1397 1398 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1399 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1400 uint16_t param_ptr, int path_flag); 1401 1402 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1403 static void sd_free_rqs(struct sd_lun *un); 1404 1405 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1406 uchar_t *data, int len, int fmt); 1407 1408 /* 1409 * Disk Ioctl Function Prototypes 1410 */ 1411 static int sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag); 1412 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1413 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1414 static int sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, 1415 int geom_validated); 1416 static int sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag); 1417 static int sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, 1418 int geom_validated); 1419 static int sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag); 1420 static int sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, 1421 int geom_validated); 1422 static int sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag); 1423 static int sd_dkio_partition(dev_t dev, caddr_t arg, int flag); 1424 static void sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1425 static int sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag); 1426 static int sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag); 1427 static int sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1428 static int sd_write_label(dev_t dev); 1429 static int sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl); 1430 static void sd_clear_vtoc(struct sd_lun *un); 1431 static void sd_clear_efi(struct sd_lun *un); 1432 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1433 static int sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag); 1434 static int sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag); 1435 static void sd_setup_default_geometry(struct sd_lun *un); 1436 #if defined(__i386) || defined(__amd64) 1437 static int sd_update_fdisk_and_vtoc(struct sd_lun *un); 1438 #endif 1439 1440 /* 1441 * Multi-host Ioctl Prototypes 1442 */ 1443 static int sd_check_mhd(dev_t dev, int interval); 1444 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1445 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1446 static char *sd_sname(uchar_t status); 1447 static void sd_mhd_resvd_recover(void *arg); 1448 static void sd_resv_reclaim_thread(); 1449 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1450 static int sd_reserve_release(dev_t dev, int cmd); 1451 static void sd_rmv_resv_reclaim_req(dev_t dev); 1452 static void sd_mhd_reset_notify_cb(caddr_t arg); 1453 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1454 mhioc_inkeys_t *usrp, int flag); 1455 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1456 mhioc_inresvs_t *usrp, int flag); 1457 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1458 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1459 static int sd_mhdioc_release(dev_t dev); 1460 static int sd_mhdioc_register_devid(dev_t dev); 1461 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1462 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1463 1464 /* 1465 * SCSI removable prototypes 1466 */ 1467 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1468 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1469 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1470 static int sr_pause_resume(dev_t dev, int mode); 1471 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1472 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1473 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1474 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1475 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1476 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1477 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1478 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1479 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1480 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1481 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1482 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1483 static int sr_eject(dev_t dev); 1484 static void sr_ejected(register struct sd_lun *un); 1485 static int sr_check_wp(dev_t dev); 1486 static int sd_check_media(dev_t dev, enum dkio_state state); 1487 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1488 static void sd_delayed_cv_broadcast(void *arg); 1489 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1490 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1491 1492 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1493 1494 /* 1495 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1496 */ 1497 static void sd_check_for_writable_cd(struct sd_lun *un); 1498 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1499 static void sd_wm_cache_destructor(void *wm, void *un); 1500 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1501 daddr_t endb, ushort_t typ); 1502 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1503 daddr_t endb); 1504 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1505 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1506 static void sd_read_modify_write_task(void * arg); 1507 static int 1508 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1509 struct buf **bpp); 1510 1511 1512 /* 1513 * Function prototypes for failfast support. 1514 */ 1515 static void sd_failfast_flushq(struct sd_lun *un); 1516 static int sd_failfast_flushq_callback(struct buf *bp); 1517 1518 /* 1519 * Function prototypes to check for lsi devices 1520 */ 1521 static void sd_is_lsi(struct sd_lun *un); 1522 1523 /* 1524 * Function prototypes for x86 support 1525 */ 1526 #if defined(__i386) || defined(__amd64) 1527 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1528 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1529 #endif 1530 1531 /* 1532 * Constants for failfast support: 1533 * 1534 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1535 * failfast processing being performed. 1536 * 1537 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1538 * failfast processing on all bufs with B_FAILFAST set. 1539 */ 1540 1541 #define SD_FAILFAST_INACTIVE 0 1542 #define SD_FAILFAST_ACTIVE 1 1543 1544 /* 1545 * Bitmask to control behavior of buf(9S) flushes when a transition to 1546 * the failfast state occurs. Optional bits include: 1547 * 1548 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1549 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1550 * be flushed. 1551 * 1552 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1553 * driver, in addition to the regular wait queue. This includes the xbuf 1554 * queues. When clear, only the driver's wait queue will be flushed. 1555 */ 1556 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1557 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1558 1559 /* 1560 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1561 * to flush all queues within the driver. 1562 */ 1563 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1564 1565 1566 /* 1567 * SD Testing Fault Injection 1568 */ 1569 #ifdef SD_FAULT_INJECTION 1570 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1571 static void sd_faultinjection(struct scsi_pkt *pktp); 1572 static void sd_injection_log(char *buf, struct sd_lun *un); 1573 #endif 1574 1575 /* 1576 * Device driver ops vector 1577 */ 1578 static struct cb_ops sd_cb_ops = { 1579 sdopen, /* open */ 1580 sdclose, /* close */ 1581 sdstrategy, /* strategy */ 1582 nodev, /* print */ 1583 sddump, /* dump */ 1584 sdread, /* read */ 1585 sdwrite, /* write */ 1586 sdioctl, /* ioctl */ 1587 nodev, /* devmap */ 1588 nodev, /* mmap */ 1589 nodev, /* segmap */ 1590 nochpoll, /* poll */ 1591 sd_prop_op, /* cb_prop_op */ 1592 0, /* streamtab */ 1593 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1594 CB_REV, /* cb_rev */ 1595 sdaread, /* async I/O read entry point */ 1596 sdawrite /* async I/O write entry point */ 1597 }; 1598 1599 static struct dev_ops sd_ops = { 1600 DEVO_REV, /* devo_rev, */ 1601 0, /* refcnt */ 1602 sdinfo, /* info */ 1603 nulldev, /* identify */ 1604 sdprobe, /* probe */ 1605 sdattach, /* attach */ 1606 sddetach, /* detach */ 1607 nodev, /* reset */ 1608 &sd_cb_ops, /* driver operations */ 1609 NULL, /* bus operations */ 1610 sdpower /* power */ 1611 }; 1612 1613 1614 /* 1615 * This is the loadable module wrapper. 1616 */ 1617 #include <sys/modctl.h> 1618 1619 static struct modldrv modldrv = { 1620 &mod_driverops, /* Type of module. This one is a driver */ 1621 SD_MODULE_NAME, /* Module name. */ 1622 &sd_ops /* driver ops */ 1623 }; 1624 1625 1626 static struct modlinkage modlinkage = { 1627 MODREV_1, 1628 &modldrv, 1629 NULL 1630 }; 1631 1632 1633 static struct scsi_asq_key_strings sd_additional_codes[] = { 1634 0x81, 0, "Logical Unit is Reserved", 1635 0x85, 0, "Audio Address Not Valid", 1636 0xb6, 0, "Media Load Mechanism Failed", 1637 0xB9, 0, "Audio Play Operation Aborted", 1638 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1639 0x53, 2, "Medium removal prevented", 1640 0x6f, 0, "Authentication failed during key exchange", 1641 0x6f, 1, "Key not present", 1642 0x6f, 2, "Key not established", 1643 0x6f, 3, "Read without proper authentication", 1644 0x6f, 4, "Mismatched region to this logical unit", 1645 0x6f, 5, "Region reset count error", 1646 0xffff, 0x0, NULL 1647 }; 1648 1649 1650 /* 1651 * Struct for passing printing information for sense data messages 1652 */ 1653 struct sd_sense_info { 1654 int ssi_severity; 1655 int ssi_pfa_flag; 1656 }; 1657 1658 /* 1659 * Table of function pointers for iostart-side routines. Seperate "chains" 1660 * of layered function calls are formed by placing the function pointers 1661 * sequentially in the desired order. Functions are called according to an 1662 * incrementing table index ordering. The last function in each chain must 1663 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1664 * in the sd_iodone_chain[] array. 1665 * 1666 * Note: It may seem more natural to organize both the iostart and iodone 1667 * functions together, into an array of structures (or some similar 1668 * organization) with a common index, rather than two seperate arrays which 1669 * must be maintained in synchronization. The purpose of this division is 1670 * to achiece improved performance: individual arrays allows for more 1671 * effective cache line utilization on certain platforms. 1672 */ 1673 1674 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1675 1676 1677 static sd_chain_t sd_iostart_chain[] = { 1678 1679 /* Chain for buf IO for disk drive targets (PM enabled) */ 1680 sd_mapblockaddr_iostart, /* Index: 0 */ 1681 sd_pm_iostart, /* Index: 1 */ 1682 sd_core_iostart, /* Index: 2 */ 1683 1684 /* Chain for buf IO for disk drive targets (PM disabled) */ 1685 sd_mapblockaddr_iostart, /* Index: 3 */ 1686 sd_core_iostart, /* Index: 4 */ 1687 1688 /* Chain for buf IO for removable-media targets (PM enabled) */ 1689 sd_mapblockaddr_iostart, /* Index: 5 */ 1690 sd_mapblocksize_iostart, /* Index: 6 */ 1691 sd_pm_iostart, /* Index: 7 */ 1692 sd_core_iostart, /* Index: 8 */ 1693 1694 /* Chain for buf IO for removable-media targets (PM disabled) */ 1695 sd_mapblockaddr_iostart, /* Index: 9 */ 1696 sd_mapblocksize_iostart, /* Index: 10 */ 1697 sd_core_iostart, /* Index: 11 */ 1698 1699 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1700 sd_mapblockaddr_iostart, /* Index: 12 */ 1701 sd_checksum_iostart, /* Index: 13 */ 1702 sd_pm_iostart, /* Index: 14 */ 1703 sd_core_iostart, /* Index: 15 */ 1704 1705 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1706 sd_mapblockaddr_iostart, /* Index: 16 */ 1707 sd_checksum_iostart, /* Index: 17 */ 1708 sd_core_iostart, /* Index: 18 */ 1709 1710 /* Chain for USCSI commands (all targets) */ 1711 sd_pm_iostart, /* Index: 19 */ 1712 sd_core_iostart, /* Index: 20 */ 1713 1714 /* Chain for checksumming USCSI commands (all targets) */ 1715 sd_checksum_uscsi_iostart, /* Index: 21 */ 1716 sd_pm_iostart, /* Index: 22 */ 1717 sd_core_iostart, /* Index: 23 */ 1718 1719 /* Chain for "direct" USCSI commands (all targets) */ 1720 sd_core_iostart, /* Index: 24 */ 1721 1722 /* Chain for "direct priority" USCSI commands (all targets) */ 1723 sd_core_iostart, /* Index: 25 */ 1724 }; 1725 1726 /* 1727 * Macros to locate the first function of each iostart chain in the 1728 * sd_iostart_chain[] array. These are located by the index in the array. 1729 */ 1730 #define SD_CHAIN_DISK_IOSTART 0 1731 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1732 #define SD_CHAIN_RMMEDIA_IOSTART 5 1733 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1734 #define SD_CHAIN_CHKSUM_IOSTART 12 1735 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1736 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1737 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1738 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1739 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1740 1741 1742 /* 1743 * Table of function pointers for the iodone-side routines for the driver- 1744 * internal layering mechanism. The calling sequence for iodone routines 1745 * uses a decrementing table index, so the last routine called in a chain 1746 * must be at the lowest array index location for that chain. The last 1747 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1748 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1749 * of the functions in an iodone side chain must correspond to the ordering 1750 * of the iostart routines for that chain. Note that there is no iodone 1751 * side routine that corresponds to sd_core_iostart(), so there is no 1752 * entry in the table for this. 1753 */ 1754 1755 static sd_chain_t sd_iodone_chain[] = { 1756 1757 /* Chain for buf IO for disk drive targets (PM enabled) */ 1758 sd_buf_iodone, /* Index: 0 */ 1759 sd_mapblockaddr_iodone, /* Index: 1 */ 1760 sd_pm_iodone, /* Index: 2 */ 1761 1762 /* Chain for buf IO for disk drive targets (PM disabled) */ 1763 sd_buf_iodone, /* Index: 3 */ 1764 sd_mapblockaddr_iodone, /* Index: 4 */ 1765 1766 /* Chain for buf IO for removable-media targets (PM enabled) */ 1767 sd_buf_iodone, /* Index: 5 */ 1768 sd_mapblockaddr_iodone, /* Index: 6 */ 1769 sd_mapblocksize_iodone, /* Index: 7 */ 1770 sd_pm_iodone, /* Index: 8 */ 1771 1772 /* Chain for buf IO for removable-media targets (PM disabled) */ 1773 sd_buf_iodone, /* Index: 9 */ 1774 sd_mapblockaddr_iodone, /* Index: 10 */ 1775 sd_mapblocksize_iodone, /* Index: 11 */ 1776 1777 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1778 sd_buf_iodone, /* Index: 12 */ 1779 sd_mapblockaddr_iodone, /* Index: 13 */ 1780 sd_checksum_iodone, /* Index: 14 */ 1781 sd_pm_iodone, /* Index: 15 */ 1782 1783 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1784 sd_buf_iodone, /* Index: 16 */ 1785 sd_mapblockaddr_iodone, /* Index: 17 */ 1786 sd_checksum_iodone, /* Index: 18 */ 1787 1788 /* Chain for USCSI commands (non-checksum targets) */ 1789 sd_uscsi_iodone, /* Index: 19 */ 1790 sd_pm_iodone, /* Index: 20 */ 1791 1792 /* Chain for USCSI commands (checksum targets) */ 1793 sd_uscsi_iodone, /* Index: 21 */ 1794 sd_checksum_uscsi_iodone, /* Index: 22 */ 1795 sd_pm_iodone, /* Index: 22 */ 1796 1797 /* Chain for "direct" USCSI commands (all targets) */ 1798 sd_uscsi_iodone, /* Index: 24 */ 1799 1800 /* Chain for "direct priority" USCSI commands (all targets) */ 1801 sd_uscsi_iodone, /* Index: 25 */ 1802 }; 1803 1804 1805 /* 1806 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1807 * each iodone-side chain. These are located by the array index, but as the 1808 * iodone side functions are called in a decrementing-index order, the 1809 * highest index number in each chain must be specified (as these correspond 1810 * to the first function in the iodone chain that will be called by the core 1811 * at IO completion time). 1812 */ 1813 1814 #define SD_CHAIN_DISK_IODONE 2 1815 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1816 #define SD_CHAIN_RMMEDIA_IODONE 8 1817 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1818 #define SD_CHAIN_CHKSUM_IODONE 15 1819 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1820 #define SD_CHAIN_USCSI_CMD_IODONE 20 1821 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1822 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1823 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1824 1825 1826 1827 1828 /* 1829 * Array to map a layering chain index to the appropriate initpkt routine. 1830 * The redundant entries are present so that the index used for accessing 1831 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1832 * with this table as well. 1833 */ 1834 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1835 1836 static sd_initpkt_t sd_initpkt_map[] = { 1837 1838 /* Chain for buf IO for disk drive targets (PM enabled) */ 1839 sd_initpkt_for_buf, /* Index: 0 */ 1840 sd_initpkt_for_buf, /* Index: 1 */ 1841 sd_initpkt_for_buf, /* Index: 2 */ 1842 1843 /* Chain for buf IO for disk drive targets (PM disabled) */ 1844 sd_initpkt_for_buf, /* Index: 3 */ 1845 sd_initpkt_for_buf, /* Index: 4 */ 1846 1847 /* Chain for buf IO for removable-media targets (PM enabled) */ 1848 sd_initpkt_for_buf, /* Index: 5 */ 1849 sd_initpkt_for_buf, /* Index: 6 */ 1850 sd_initpkt_for_buf, /* Index: 7 */ 1851 sd_initpkt_for_buf, /* Index: 8 */ 1852 1853 /* Chain for buf IO for removable-media targets (PM disabled) */ 1854 sd_initpkt_for_buf, /* Index: 9 */ 1855 sd_initpkt_for_buf, /* Index: 10 */ 1856 sd_initpkt_for_buf, /* Index: 11 */ 1857 1858 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1859 sd_initpkt_for_buf, /* Index: 12 */ 1860 sd_initpkt_for_buf, /* Index: 13 */ 1861 sd_initpkt_for_buf, /* Index: 14 */ 1862 sd_initpkt_for_buf, /* Index: 15 */ 1863 1864 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1865 sd_initpkt_for_buf, /* Index: 16 */ 1866 sd_initpkt_for_buf, /* Index: 17 */ 1867 sd_initpkt_for_buf, /* Index: 18 */ 1868 1869 /* Chain for USCSI commands (non-checksum targets) */ 1870 sd_initpkt_for_uscsi, /* Index: 19 */ 1871 sd_initpkt_for_uscsi, /* Index: 20 */ 1872 1873 /* Chain for USCSI commands (checksum targets) */ 1874 sd_initpkt_for_uscsi, /* Index: 21 */ 1875 sd_initpkt_for_uscsi, /* Index: 22 */ 1876 sd_initpkt_for_uscsi, /* Index: 22 */ 1877 1878 /* Chain for "direct" USCSI commands (all targets) */ 1879 sd_initpkt_for_uscsi, /* Index: 24 */ 1880 1881 /* Chain for "direct priority" USCSI commands (all targets) */ 1882 sd_initpkt_for_uscsi, /* Index: 25 */ 1883 1884 }; 1885 1886 1887 /* 1888 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1889 * The redundant entries are present so that the index used for accessing 1890 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1891 * with this table as well. 1892 */ 1893 typedef void (*sd_destroypkt_t)(struct buf *); 1894 1895 static sd_destroypkt_t sd_destroypkt_map[] = { 1896 1897 /* Chain for buf IO for disk drive targets (PM enabled) */ 1898 sd_destroypkt_for_buf, /* Index: 0 */ 1899 sd_destroypkt_for_buf, /* Index: 1 */ 1900 sd_destroypkt_for_buf, /* Index: 2 */ 1901 1902 /* Chain for buf IO for disk drive targets (PM disabled) */ 1903 sd_destroypkt_for_buf, /* Index: 3 */ 1904 sd_destroypkt_for_buf, /* Index: 4 */ 1905 1906 /* Chain for buf IO for removable-media targets (PM enabled) */ 1907 sd_destroypkt_for_buf, /* Index: 5 */ 1908 sd_destroypkt_for_buf, /* Index: 6 */ 1909 sd_destroypkt_for_buf, /* Index: 7 */ 1910 sd_destroypkt_for_buf, /* Index: 8 */ 1911 1912 /* Chain for buf IO for removable-media targets (PM disabled) */ 1913 sd_destroypkt_for_buf, /* Index: 9 */ 1914 sd_destroypkt_for_buf, /* Index: 10 */ 1915 sd_destroypkt_for_buf, /* Index: 11 */ 1916 1917 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1918 sd_destroypkt_for_buf, /* Index: 12 */ 1919 sd_destroypkt_for_buf, /* Index: 13 */ 1920 sd_destroypkt_for_buf, /* Index: 14 */ 1921 sd_destroypkt_for_buf, /* Index: 15 */ 1922 1923 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1924 sd_destroypkt_for_buf, /* Index: 16 */ 1925 sd_destroypkt_for_buf, /* Index: 17 */ 1926 sd_destroypkt_for_buf, /* Index: 18 */ 1927 1928 /* Chain for USCSI commands (non-checksum targets) */ 1929 sd_destroypkt_for_uscsi, /* Index: 19 */ 1930 sd_destroypkt_for_uscsi, /* Index: 20 */ 1931 1932 /* Chain for USCSI commands (checksum targets) */ 1933 sd_destroypkt_for_uscsi, /* Index: 21 */ 1934 sd_destroypkt_for_uscsi, /* Index: 22 */ 1935 sd_destroypkt_for_uscsi, /* Index: 22 */ 1936 1937 /* Chain for "direct" USCSI commands (all targets) */ 1938 sd_destroypkt_for_uscsi, /* Index: 24 */ 1939 1940 /* Chain for "direct priority" USCSI commands (all targets) */ 1941 sd_destroypkt_for_uscsi, /* Index: 25 */ 1942 1943 }; 1944 1945 1946 1947 /* 1948 * Array to map a layering chain index to the appropriate chain "type". 1949 * The chain type indicates a specific property/usage of the chain. 1950 * The redundant entries are present so that the index used for accessing 1951 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1952 * with this table as well. 1953 */ 1954 1955 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1956 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1957 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1958 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1959 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1960 /* (for error recovery) */ 1961 1962 static int sd_chain_type_map[] = { 1963 1964 /* Chain for buf IO for disk drive targets (PM enabled) */ 1965 SD_CHAIN_BUFIO, /* Index: 0 */ 1966 SD_CHAIN_BUFIO, /* Index: 1 */ 1967 SD_CHAIN_BUFIO, /* Index: 2 */ 1968 1969 /* Chain for buf IO for disk drive targets (PM disabled) */ 1970 SD_CHAIN_BUFIO, /* Index: 3 */ 1971 SD_CHAIN_BUFIO, /* Index: 4 */ 1972 1973 /* Chain for buf IO for removable-media targets (PM enabled) */ 1974 SD_CHAIN_BUFIO, /* Index: 5 */ 1975 SD_CHAIN_BUFIO, /* Index: 6 */ 1976 SD_CHAIN_BUFIO, /* Index: 7 */ 1977 SD_CHAIN_BUFIO, /* Index: 8 */ 1978 1979 /* Chain for buf IO for removable-media targets (PM disabled) */ 1980 SD_CHAIN_BUFIO, /* Index: 9 */ 1981 SD_CHAIN_BUFIO, /* Index: 10 */ 1982 SD_CHAIN_BUFIO, /* Index: 11 */ 1983 1984 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1985 SD_CHAIN_BUFIO, /* Index: 12 */ 1986 SD_CHAIN_BUFIO, /* Index: 13 */ 1987 SD_CHAIN_BUFIO, /* Index: 14 */ 1988 SD_CHAIN_BUFIO, /* Index: 15 */ 1989 1990 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1991 SD_CHAIN_BUFIO, /* Index: 16 */ 1992 SD_CHAIN_BUFIO, /* Index: 17 */ 1993 SD_CHAIN_BUFIO, /* Index: 18 */ 1994 1995 /* Chain for USCSI commands (non-checksum targets) */ 1996 SD_CHAIN_USCSI, /* Index: 19 */ 1997 SD_CHAIN_USCSI, /* Index: 20 */ 1998 1999 /* Chain for USCSI commands (checksum targets) */ 2000 SD_CHAIN_USCSI, /* Index: 21 */ 2001 SD_CHAIN_USCSI, /* Index: 22 */ 2002 SD_CHAIN_USCSI, /* Index: 22 */ 2003 2004 /* Chain for "direct" USCSI commands (all targets) */ 2005 SD_CHAIN_DIRECT, /* Index: 24 */ 2006 2007 /* Chain for "direct priority" USCSI commands (all targets) */ 2008 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 2009 }; 2010 2011 2012 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 2013 #define SD_IS_BUFIO(xp) \ 2014 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 2015 2016 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 2017 #define SD_IS_DIRECT_PRIORITY(xp) \ 2018 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2019 2020 2021 2022 /* 2023 * Struct, array, and macros to map a specific chain to the appropriate 2024 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2025 * 2026 * The sd_chain_index_map[] array is used at attach time to set the various 2027 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2028 * chain to be used with the instance. This allows different instances to use 2029 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2030 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2031 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2032 * dynamically & without the use of locking; and (2) a layer may update the 2033 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2034 * to allow for deferred processing of an IO within the same chain from a 2035 * different execution context. 2036 */ 2037 2038 struct sd_chain_index { 2039 int sci_iostart_index; 2040 int sci_iodone_index; 2041 }; 2042 2043 static struct sd_chain_index sd_chain_index_map[] = { 2044 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2045 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2046 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2047 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2048 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2049 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2050 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2051 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2052 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2053 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2054 }; 2055 2056 2057 /* 2058 * The following are indexes into the sd_chain_index_map[] array. 2059 */ 2060 2061 /* un->un_buf_chain_type must be set to one of these */ 2062 #define SD_CHAIN_INFO_DISK 0 2063 #define SD_CHAIN_INFO_DISK_NO_PM 1 2064 #define SD_CHAIN_INFO_RMMEDIA 2 2065 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2066 #define SD_CHAIN_INFO_CHKSUM 4 2067 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2068 2069 /* un->un_uscsi_chain_type must be set to one of these */ 2070 #define SD_CHAIN_INFO_USCSI_CMD 6 2071 /* USCSI with PM disabled is the same as DIRECT */ 2072 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2073 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2074 2075 /* un->un_direct_chain_type must be set to one of these */ 2076 #define SD_CHAIN_INFO_DIRECT_CMD 8 2077 2078 /* un->un_priority_chain_type must be set to one of these */ 2079 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2080 2081 /* size for devid inquiries */ 2082 #define MAX_INQUIRY_SIZE 0xF0 2083 2084 /* 2085 * Macros used by functions to pass a given buf(9S) struct along to the 2086 * next function in the layering chain for further processing. 2087 * 2088 * In the following macros, passing more than three arguments to the called 2089 * routines causes the optimizer for the SPARC compiler to stop doing tail 2090 * call elimination which results in significant performance degradation. 2091 */ 2092 #define SD_BEGIN_IOSTART(index, un, bp) \ 2093 ((*(sd_iostart_chain[index]))(index, un, bp)) 2094 2095 #define SD_BEGIN_IODONE(index, un, bp) \ 2096 ((*(sd_iodone_chain[index]))(index, un, bp)) 2097 2098 #define SD_NEXT_IOSTART(index, un, bp) \ 2099 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2100 2101 #define SD_NEXT_IODONE(index, un, bp) \ 2102 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2103 2104 2105 /* 2106 * Function: _init 2107 * 2108 * Description: This is the driver _init(9E) entry point. 2109 * 2110 * Return Code: Returns the value from mod_install(9F) or 2111 * ddi_soft_state_init(9F) as appropriate. 2112 * 2113 * Context: Called when driver module loaded. 2114 */ 2115 2116 int 2117 _init(void) 2118 { 2119 int err; 2120 2121 /* establish driver name from module name */ 2122 sd_label = mod_modname(&modlinkage); 2123 2124 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2125 SD_MAXUNIT); 2126 2127 if (err != 0) { 2128 return (err); 2129 } 2130 2131 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2132 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2133 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2134 2135 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2136 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2137 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2138 2139 /* 2140 * it's ok to init here even for fibre device 2141 */ 2142 sd_scsi_probe_cache_init(); 2143 2144 /* 2145 * Creating taskq before mod_install ensures that all callers (threads) 2146 * that enter the module after a successfull mod_install encounter 2147 * a valid taskq. 2148 */ 2149 sd_taskq_create(); 2150 2151 err = mod_install(&modlinkage); 2152 if (err != 0) { 2153 /* delete taskq if install fails */ 2154 sd_taskq_delete(); 2155 2156 mutex_destroy(&sd_detach_mutex); 2157 mutex_destroy(&sd_log_mutex); 2158 mutex_destroy(&sd_label_mutex); 2159 2160 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2161 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2162 cv_destroy(&sd_tr.srq_inprocess_cv); 2163 2164 sd_scsi_probe_cache_fini(); 2165 2166 ddi_soft_state_fini(&sd_state); 2167 return (err); 2168 } 2169 2170 return (err); 2171 } 2172 2173 2174 /* 2175 * Function: _fini 2176 * 2177 * Description: This is the driver _fini(9E) entry point. 2178 * 2179 * Return Code: Returns the value from mod_remove(9F) 2180 * 2181 * Context: Called when driver module is unloaded. 2182 */ 2183 2184 int 2185 _fini(void) 2186 { 2187 int err; 2188 2189 if ((err = mod_remove(&modlinkage)) != 0) { 2190 return (err); 2191 } 2192 2193 sd_taskq_delete(); 2194 2195 mutex_destroy(&sd_detach_mutex); 2196 mutex_destroy(&sd_log_mutex); 2197 mutex_destroy(&sd_label_mutex); 2198 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2199 2200 sd_scsi_probe_cache_fini(); 2201 2202 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2203 cv_destroy(&sd_tr.srq_inprocess_cv); 2204 2205 ddi_soft_state_fini(&sd_state); 2206 2207 return (err); 2208 } 2209 2210 2211 /* 2212 * Function: _info 2213 * 2214 * Description: This is the driver _info(9E) entry point. 2215 * 2216 * Arguments: modinfop - pointer to the driver modinfo structure 2217 * 2218 * Return Code: Returns the value from mod_info(9F). 2219 * 2220 * Context: Kernel thread context 2221 */ 2222 2223 int 2224 _info(struct modinfo *modinfop) 2225 { 2226 return (mod_info(&modlinkage, modinfop)); 2227 } 2228 2229 2230 /* 2231 * The following routines implement the driver message logging facility. 2232 * They provide component- and level- based debug output filtering. 2233 * Output may also be restricted to messages for a single instance by 2234 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2235 * to NULL, then messages for all instances are printed. 2236 * 2237 * These routines have been cloned from each other due to the language 2238 * constraints of macros and variable argument list processing. 2239 */ 2240 2241 2242 /* 2243 * Function: sd_log_err 2244 * 2245 * Description: This routine is called by the SD_ERROR macro for debug 2246 * logging of error conditions. 2247 * 2248 * Arguments: comp - driver component being logged 2249 * dev - pointer to driver info structure 2250 * fmt - error string and format to be logged 2251 */ 2252 2253 static void 2254 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2255 { 2256 va_list ap; 2257 dev_info_t *dev; 2258 2259 ASSERT(un != NULL); 2260 dev = SD_DEVINFO(un); 2261 ASSERT(dev != NULL); 2262 2263 /* 2264 * Filter messages based on the global component and level masks. 2265 * Also print if un matches the value of sd_debug_un, or if 2266 * sd_debug_un is set to NULL. 2267 */ 2268 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2269 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2270 mutex_enter(&sd_log_mutex); 2271 va_start(ap, fmt); 2272 (void) vsprintf(sd_log_buf, fmt, ap); 2273 va_end(ap); 2274 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2275 mutex_exit(&sd_log_mutex); 2276 } 2277 #ifdef SD_FAULT_INJECTION 2278 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2279 if (un->sd_injection_mask & comp) { 2280 mutex_enter(&sd_log_mutex); 2281 va_start(ap, fmt); 2282 (void) vsprintf(sd_log_buf, fmt, ap); 2283 va_end(ap); 2284 sd_injection_log(sd_log_buf, un); 2285 mutex_exit(&sd_log_mutex); 2286 } 2287 #endif 2288 } 2289 2290 2291 /* 2292 * Function: sd_log_info 2293 * 2294 * Description: This routine is called by the SD_INFO macro for debug 2295 * logging of general purpose informational conditions. 2296 * 2297 * Arguments: comp - driver component being logged 2298 * dev - pointer to driver info structure 2299 * fmt - info string and format to be logged 2300 */ 2301 2302 static void 2303 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2304 { 2305 va_list ap; 2306 dev_info_t *dev; 2307 2308 ASSERT(un != NULL); 2309 dev = SD_DEVINFO(un); 2310 ASSERT(dev != NULL); 2311 2312 /* 2313 * Filter messages based on the global component and level masks. 2314 * Also print if un matches the value of sd_debug_un, or if 2315 * sd_debug_un is set to NULL. 2316 */ 2317 if ((sd_component_mask & component) && 2318 (sd_level_mask & SD_LOGMASK_INFO) && 2319 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2320 mutex_enter(&sd_log_mutex); 2321 va_start(ap, fmt); 2322 (void) vsprintf(sd_log_buf, fmt, ap); 2323 va_end(ap); 2324 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2325 mutex_exit(&sd_log_mutex); 2326 } 2327 #ifdef SD_FAULT_INJECTION 2328 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2329 if (un->sd_injection_mask & component) { 2330 mutex_enter(&sd_log_mutex); 2331 va_start(ap, fmt); 2332 (void) vsprintf(sd_log_buf, fmt, ap); 2333 va_end(ap); 2334 sd_injection_log(sd_log_buf, un); 2335 mutex_exit(&sd_log_mutex); 2336 } 2337 #endif 2338 } 2339 2340 2341 /* 2342 * Function: sd_log_trace 2343 * 2344 * Description: This routine is called by the SD_TRACE macro for debug 2345 * logging of trace conditions (i.e. function entry/exit). 2346 * 2347 * Arguments: comp - driver component being logged 2348 * dev - pointer to driver info structure 2349 * fmt - trace string and format to be logged 2350 */ 2351 2352 static void 2353 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2354 { 2355 va_list ap; 2356 dev_info_t *dev; 2357 2358 ASSERT(un != NULL); 2359 dev = SD_DEVINFO(un); 2360 ASSERT(dev != NULL); 2361 2362 /* 2363 * Filter messages based on the global component and level masks. 2364 * Also print if un matches the value of sd_debug_un, or if 2365 * sd_debug_un is set to NULL. 2366 */ 2367 if ((sd_component_mask & component) && 2368 (sd_level_mask & SD_LOGMASK_TRACE) && 2369 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2370 mutex_enter(&sd_log_mutex); 2371 va_start(ap, fmt); 2372 (void) vsprintf(sd_log_buf, fmt, ap); 2373 va_end(ap); 2374 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2375 mutex_exit(&sd_log_mutex); 2376 } 2377 #ifdef SD_FAULT_INJECTION 2378 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2379 if (un->sd_injection_mask & component) { 2380 mutex_enter(&sd_log_mutex); 2381 va_start(ap, fmt); 2382 (void) vsprintf(sd_log_buf, fmt, ap); 2383 va_end(ap); 2384 sd_injection_log(sd_log_buf, un); 2385 mutex_exit(&sd_log_mutex); 2386 } 2387 #endif 2388 } 2389 2390 2391 /* 2392 * Function: sdprobe 2393 * 2394 * Description: This is the driver probe(9e) entry point function. 2395 * 2396 * Arguments: devi - opaque device info handle 2397 * 2398 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2399 * DDI_PROBE_FAILURE: If the probe failed. 2400 * DDI_PROBE_PARTIAL: If the instance is not present now, 2401 * but may be present in the future. 2402 */ 2403 2404 static int 2405 sdprobe(dev_info_t *devi) 2406 { 2407 struct scsi_device *devp; 2408 int rval; 2409 int instance; 2410 2411 /* 2412 * if it wasn't for pln, sdprobe could actually be nulldev 2413 * in the "__fibre" case. 2414 */ 2415 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2416 return (DDI_PROBE_DONTCARE); 2417 } 2418 2419 devp = ddi_get_driver_private(devi); 2420 2421 if (devp == NULL) { 2422 /* Ooops... nexus driver is mis-configured... */ 2423 return (DDI_PROBE_FAILURE); 2424 } 2425 2426 instance = ddi_get_instance(devi); 2427 2428 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2429 return (DDI_PROBE_PARTIAL); 2430 } 2431 2432 /* 2433 * Call the SCSA utility probe routine to see if we actually 2434 * have a target at this SCSI nexus. 2435 */ 2436 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2437 case SCSIPROBE_EXISTS: 2438 switch (devp->sd_inq->inq_dtype) { 2439 case DTYPE_DIRECT: 2440 rval = DDI_PROBE_SUCCESS; 2441 break; 2442 case DTYPE_RODIRECT: 2443 /* CDs etc. Can be removable media */ 2444 rval = DDI_PROBE_SUCCESS; 2445 break; 2446 case DTYPE_OPTICAL: 2447 /* 2448 * Rewritable optical driver HP115AA 2449 * Can also be removable media 2450 */ 2451 2452 /* 2453 * Do not attempt to bind to DTYPE_OPTICAL if 2454 * pre solaris 9 sparc sd behavior is required 2455 * 2456 * If first time through and sd_dtype_optical_bind 2457 * has not been set in /etc/system check properties 2458 */ 2459 2460 if (sd_dtype_optical_bind < 0) { 2461 sd_dtype_optical_bind = ddi_prop_get_int 2462 (DDI_DEV_T_ANY, devi, 0, 2463 "optical-device-bind", 1); 2464 } 2465 2466 if (sd_dtype_optical_bind == 0) { 2467 rval = DDI_PROBE_FAILURE; 2468 } else { 2469 rval = DDI_PROBE_SUCCESS; 2470 } 2471 break; 2472 2473 case DTYPE_NOTPRESENT: 2474 default: 2475 rval = DDI_PROBE_FAILURE; 2476 break; 2477 } 2478 break; 2479 default: 2480 rval = DDI_PROBE_PARTIAL; 2481 break; 2482 } 2483 2484 /* 2485 * This routine checks for resource allocation prior to freeing, 2486 * so it will take care of the "smart probing" case where a 2487 * scsi_probe() may or may not have been issued and will *not* 2488 * free previously-freed resources. 2489 */ 2490 scsi_unprobe(devp); 2491 return (rval); 2492 } 2493 2494 2495 /* 2496 * Function: sdinfo 2497 * 2498 * Description: This is the driver getinfo(9e) entry point function. 2499 * Given the device number, return the devinfo pointer from 2500 * the scsi_device structure or the instance number 2501 * associated with the dev_t. 2502 * 2503 * Arguments: dip - pointer to device info structure 2504 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2505 * DDI_INFO_DEVT2INSTANCE) 2506 * arg - driver dev_t 2507 * resultp - user buffer for request response 2508 * 2509 * Return Code: DDI_SUCCESS 2510 * DDI_FAILURE 2511 */ 2512 /* ARGSUSED */ 2513 static int 2514 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2515 { 2516 struct sd_lun *un; 2517 dev_t dev; 2518 int instance; 2519 int error; 2520 2521 switch (infocmd) { 2522 case DDI_INFO_DEVT2DEVINFO: 2523 dev = (dev_t)arg; 2524 instance = SDUNIT(dev); 2525 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2526 return (DDI_FAILURE); 2527 } 2528 *result = (void *) SD_DEVINFO(un); 2529 error = DDI_SUCCESS; 2530 break; 2531 case DDI_INFO_DEVT2INSTANCE: 2532 dev = (dev_t)arg; 2533 instance = SDUNIT(dev); 2534 *result = (void *)(uintptr_t)instance; 2535 error = DDI_SUCCESS; 2536 break; 2537 default: 2538 error = DDI_FAILURE; 2539 } 2540 return (error); 2541 } 2542 2543 /* 2544 * Function: sd_prop_op 2545 * 2546 * Description: This is the driver prop_op(9e) entry point function. 2547 * Return the number of blocks for the partition in question 2548 * or forward the request to the property facilities. 2549 * 2550 * Arguments: dev - device number 2551 * dip - pointer to device info structure 2552 * prop_op - property operator 2553 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2554 * name - pointer to property name 2555 * valuep - pointer or address of the user buffer 2556 * lengthp - property length 2557 * 2558 * Return Code: DDI_PROP_SUCCESS 2559 * DDI_PROP_NOT_FOUND 2560 * DDI_PROP_UNDEFINED 2561 * DDI_PROP_NO_MEMORY 2562 * DDI_PROP_BUF_TOO_SMALL 2563 */ 2564 2565 static int 2566 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2567 char *name, caddr_t valuep, int *lengthp) 2568 { 2569 int instance = ddi_get_instance(dip); 2570 struct sd_lun *un; 2571 uint64_t nblocks64; 2572 2573 /* 2574 * Our dynamic properties are all device specific and size oriented. 2575 * Requests issued under conditions where size is valid are passed 2576 * to ddi_prop_op_nblocks with the size information, otherwise the 2577 * request is passed to ddi_prop_op. Size depends on valid geometry. 2578 */ 2579 un = ddi_get_soft_state(sd_state, instance); 2580 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2581 (un->un_f_geometry_is_valid == FALSE)) { 2582 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2583 name, valuep, lengthp)); 2584 } else { 2585 /* get nblocks value */ 2586 ASSERT(!mutex_owned(SD_MUTEX(un))); 2587 mutex_enter(SD_MUTEX(un)); 2588 nblocks64 = (ulong_t)un->un_map[SDPART(dev)].dkl_nblk; 2589 mutex_exit(SD_MUTEX(un)); 2590 2591 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2592 name, valuep, lengthp, nblocks64)); 2593 } 2594 } 2595 2596 /* 2597 * The following functions are for smart probing: 2598 * sd_scsi_probe_cache_init() 2599 * sd_scsi_probe_cache_fini() 2600 * sd_scsi_clear_probe_cache() 2601 * sd_scsi_probe_with_cache() 2602 */ 2603 2604 /* 2605 * Function: sd_scsi_probe_cache_init 2606 * 2607 * Description: Initializes the probe response cache mutex and head pointer. 2608 * 2609 * Context: Kernel thread context 2610 */ 2611 2612 static void 2613 sd_scsi_probe_cache_init(void) 2614 { 2615 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2616 sd_scsi_probe_cache_head = NULL; 2617 } 2618 2619 2620 /* 2621 * Function: sd_scsi_probe_cache_fini 2622 * 2623 * Description: Frees all resources associated with the probe response cache. 2624 * 2625 * Context: Kernel thread context 2626 */ 2627 2628 static void 2629 sd_scsi_probe_cache_fini(void) 2630 { 2631 struct sd_scsi_probe_cache *cp; 2632 struct sd_scsi_probe_cache *ncp; 2633 2634 /* Clean up our smart probing linked list */ 2635 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2636 ncp = cp->next; 2637 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2638 } 2639 sd_scsi_probe_cache_head = NULL; 2640 mutex_destroy(&sd_scsi_probe_cache_mutex); 2641 } 2642 2643 2644 /* 2645 * Function: sd_scsi_clear_probe_cache 2646 * 2647 * Description: This routine clears the probe response cache. This is 2648 * done when open() returns ENXIO so that when deferred 2649 * attach is attempted (possibly after a device has been 2650 * turned on) we will retry the probe. Since we don't know 2651 * which target we failed to open, we just clear the 2652 * entire cache. 2653 * 2654 * Context: Kernel thread context 2655 */ 2656 2657 static void 2658 sd_scsi_clear_probe_cache(void) 2659 { 2660 struct sd_scsi_probe_cache *cp; 2661 int i; 2662 2663 mutex_enter(&sd_scsi_probe_cache_mutex); 2664 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2665 /* 2666 * Reset all entries to SCSIPROBE_EXISTS. This will 2667 * force probing to be performed the next time 2668 * sd_scsi_probe_with_cache is called. 2669 */ 2670 for (i = 0; i < NTARGETS_WIDE; i++) { 2671 cp->cache[i] = SCSIPROBE_EXISTS; 2672 } 2673 } 2674 mutex_exit(&sd_scsi_probe_cache_mutex); 2675 } 2676 2677 2678 /* 2679 * Function: sd_scsi_probe_with_cache 2680 * 2681 * Description: This routine implements support for a scsi device probe 2682 * with cache. The driver maintains a cache of the target 2683 * responses to scsi probes. If we get no response from a 2684 * target during a probe inquiry, we remember that, and we 2685 * avoid additional calls to scsi_probe on non-zero LUNs 2686 * on the same target until the cache is cleared. By doing 2687 * so we avoid the 1/4 sec selection timeout for nonzero 2688 * LUNs. lun0 of a target is always probed. 2689 * 2690 * Arguments: devp - Pointer to a scsi_device(9S) structure 2691 * waitfunc - indicates what the allocator routines should 2692 * do when resources are not available. This value 2693 * is passed on to scsi_probe() when that routine 2694 * is called. 2695 * 2696 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2697 * otherwise the value returned by scsi_probe(9F). 2698 * 2699 * Context: Kernel thread context 2700 */ 2701 2702 static int 2703 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2704 { 2705 struct sd_scsi_probe_cache *cp; 2706 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2707 int lun, tgt; 2708 2709 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2710 SCSI_ADDR_PROP_LUN, 0); 2711 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2712 SCSI_ADDR_PROP_TARGET, -1); 2713 2714 /* Make sure caching enabled and target in range */ 2715 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2716 /* do it the old way (no cache) */ 2717 return (scsi_probe(devp, waitfn)); 2718 } 2719 2720 mutex_enter(&sd_scsi_probe_cache_mutex); 2721 2722 /* Find the cache for this scsi bus instance */ 2723 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2724 if (cp->pdip == pdip) { 2725 break; 2726 } 2727 } 2728 2729 /* If we can't find a cache for this pdip, create one */ 2730 if (cp == NULL) { 2731 int i; 2732 2733 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2734 KM_SLEEP); 2735 cp->pdip = pdip; 2736 cp->next = sd_scsi_probe_cache_head; 2737 sd_scsi_probe_cache_head = cp; 2738 for (i = 0; i < NTARGETS_WIDE; i++) { 2739 cp->cache[i] = SCSIPROBE_EXISTS; 2740 } 2741 } 2742 2743 mutex_exit(&sd_scsi_probe_cache_mutex); 2744 2745 /* Recompute the cache for this target if LUN zero */ 2746 if (lun == 0) { 2747 cp->cache[tgt] = SCSIPROBE_EXISTS; 2748 } 2749 2750 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2751 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2752 return (SCSIPROBE_NORESP); 2753 } 2754 2755 /* Do the actual probe; save & return the result */ 2756 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2757 } 2758 2759 2760 /* 2761 * Function: sd_spin_up_unit 2762 * 2763 * Description: Issues the following commands to spin-up the device: 2764 * START STOP UNIT, and INQUIRY. 2765 * 2766 * Arguments: un - driver soft state (unit) structure 2767 * 2768 * Return Code: 0 - success 2769 * EIO - failure 2770 * EACCES - reservation conflict 2771 * 2772 * Context: Kernel thread context 2773 */ 2774 2775 static int 2776 sd_spin_up_unit(struct sd_lun *un) 2777 { 2778 size_t resid = 0; 2779 int has_conflict = FALSE; 2780 uchar_t *bufaddr; 2781 2782 ASSERT(un != NULL); 2783 2784 /* 2785 * Send a throwaway START UNIT command. 2786 * 2787 * If we fail on this, we don't care presently what precisely 2788 * is wrong. EMC's arrays will also fail this with a check 2789 * condition (0x2/0x4/0x3) if the device is "inactive," but 2790 * we don't want to fail the attach because it may become 2791 * "active" later. 2792 */ 2793 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2794 == EACCES) 2795 has_conflict = TRUE; 2796 2797 /* 2798 * Send another INQUIRY command to the target. This is necessary for 2799 * non-removable media direct access devices because their INQUIRY data 2800 * may not be fully qualified until they are spun up (perhaps via the 2801 * START command above). Note: This seems to be needed for some 2802 * legacy devices only.) The INQUIRY command should succeed even if a 2803 * Reservation Conflict is present. 2804 */ 2805 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2806 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2807 kmem_free(bufaddr, SUN_INQSIZE); 2808 return (EIO); 2809 } 2810 2811 /* 2812 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2813 * Note that this routine does not return a failure here even if the 2814 * INQUIRY command did not return any data. This is a legacy behavior. 2815 */ 2816 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2817 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2818 } 2819 2820 kmem_free(bufaddr, SUN_INQSIZE); 2821 2822 /* If we hit a reservation conflict above, tell the caller. */ 2823 if (has_conflict == TRUE) { 2824 return (EACCES); 2825 } 2826 2827 return (0); 2828 } 2829 2830 #ifdef _LP64 2831 /* 2832 * Function: sd_enable_descr_sense 2833 * 2834 * Description: This routine attempts to select descriptor sense format 2835 * using the Control mode page. Devices that support 64 bit 2836 * LBAs (for >2TB luns) should also implement descriptor 2837 * sense data so we will call this function whenever we see 2838 * a lun larger than 2TB. If for some reason the device 2839 * supports 64 bit LBAs but doesn't support descriptor sense 2840 * presumably the mode select will fail. Everything will 2841 * continue to work normally except that we will not get 2842 * complete sense data for commands that fail with an LBA 2843 * larger than 32 bits. 2844 * 2845 * Arguments: un - driver soft state (unit) structure 2846 * 2847 * Context: Kernel thread context only 2848 */ 2849 2850 static void 2851 sd_enable_descr_sense(struct sd_lun *un) 2852 { 2853 uchar_t *header; 2854 struct mode_control_scsi3 *ctrl_bufp; 2855 size_t buflen; 2856 size_t bd_len; 2857 2858 /* 2859 * Read MODE SENSE page 0xA, Control Mode Page 2860 */ 2861 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 2862 sizeof (struct mode_control_scsi3); 2863 header = kmem_zalloc(buflen, KM_SLEEP); 2864 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 2865 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 2866 SD_ERROR(SD_LOG_COMMON, un, 2867 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 2868 goto eds_exit; 2869 } 2870 2871 /* 2872 * Determine size of Block Descriptors in order to locate 2873 * the mode page data. ATAPI devices return 0, SCSI devices 2874 * should return MODE_BLK_DESC_LENGTH. 2875 */ 2876 bd_len = ((struct mode_header *)header)->bdesc_length; 2877 2878 ctrl_bufp = (struct mode_control_scsi3 *) 2879 (header + MODE_HEADER_LENGTH + bd_len); 2880 2881 /* 2882 * Clear PS bit for MODE SELECT 2883 */ 2884 ctrl_bufp->mode_page.ps = 0; 2885 2886 /* 2887 * Set D_SENSE to enable descriptor sense format. 2888 */ 2889 ctrl_bufp->d_sense = 1; 2890 2891 /* 2892 * Use MODE SELECT to commit the change to the D_SENSE bit 2893 */ 2894 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 2895 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 2896 SD_INFO(SD_LOG_COMMON, un, 2897 "sd_enable_descr_sense: mode select ctrl page failed\n"); 2898 goto eds_exit; 2899 } 2900 2901 eds_exit: 2902 kmem_free(header, buflen); 2903 } 2904 #endif /* _LP64 */ 2905 2906 2907 /* 2908 * Function: sd_set_mmc_caps 2909 * 2910 * Description: This routine determines if the device is MMC compliant and if 2911 * the device supports CDDA via a mode sense of the CDVD 2912 * capabilities mode page. Also checks if the device is a 2913 * dvdram writable device. 2914 * 2915 * Arguments: un - driver soft state (unit) structure 2916 * 2917 * Context: Kernel thread context only 2918 */ 2919 2920 static void 2921 sd_set_mmc_caps(struct sd_lun *un) 2922 { 2923 struct mode_header_grp2 *sense_mhp; 2924 uchar_t *sense_page; 2925 caddr_t buf; 2926 int bd_len; 2927 int status; 2928 struct uscsi_cmd com; 2929 int rtn; 2930 uchar_t *out_data_rw, *out_data_hd; 2931 uchar_t *rqbuf_rw, *rqbuf_hd; 2932 2933 ASSERT(un != NULL); 2934 2935 /* 2936 * The flags which will be set in this function are - mmc compliant, 2937 * dvdram writable device, cdda support. Initialize them to FALSE 2938 * and if a capability is detected - it will be set to TRUE. 2939 */ 2940 un->un_f_mmc_cap = FALSE; 2941 un->un_f_dvdram_writable_device = FALSE; 2942 un->un_f_cfg_cdda = FALSE; 2943 2944 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 2945 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 2946 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 2947 2948 if (status != 0) { 2949 /* command failed; just return */ 2950 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2951 return; 2952 } 2953 /* 2954 * If the mode sense request for the CDROM CAPABILITIES 2955 * page (0x2A) succeeds the device is assumed to be MMC. 2956 */ 2957 un->un_f_mmc_cap = TRUE; 2958 2959 /* Get to the page data */ 2960 sense_mhp = (struct mode_header_grp2 *)buf; 2961 bd_len = (sense_mhp->bdesc_length_hi << 8) | 2962 sense_mhp->bdesc_length_lo; 2963 if (bd_len > MODE_BLK_DESC_LENGTH) { 2964 /* 2965 * We did not get back the expected block descriptor 2966 * length so we cannot determine if the device supports 2967 * CDDA. However, we still indicate the device is MMC 2968 * according to the successful response to the page 2969 * 0x2A mode sense request. 2970 */ 2971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 2972 "sd_set_mmc_caps: Mode Sense returned " 2973 "invalid block descriptor length\n"); 2974 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2975 return; 2976 } 2977 2978 /* See if read CDDA is supported */ 2979 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 2980 bd_len); 2981 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 2982 2983 /* See if writing DVD RAM is supported. */ 2984 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 2985 if (un->un_f_dvdram_writable_device == TRUE) { 2986 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2987 return; 2988 } 2989 2990 /* 2991 * If the device presents DVD or CD capabilities in the mode 2992 * page, we can return here since a RRD will not have 2993 * these capabilities. 2994 */ 2995 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 2996 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2997 return; 2998 } 2999 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3000 3001 /* 3002 * If un->un_f_dvdram_writable_device is still FALSE, 3003 * check for a Removable Rigid Disk (RRD). A RRD 3004 * device is identified by the features RANDOM_WRITABLE and 3005 * HARDWARE_DEFECT_MANAGEMENT. 3006 */ 3007 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3008 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3009 3010 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3011 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3012 RANDOM_WRITABLE); 3013 if (rtn != 0) { 3014 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3015 kmem_free(rqbuf_rw, SENSE_LENGTH); 3016 return; 3017 } 3018 3019 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3020 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3021 3022 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3023 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3024 HARDWARE_DEFECT_MANAGEMENT); 3025 if (rtn == 0) { 3026 /* 3027 * We have good information, check for random writable 3028 * and hardware defect features. 3029 */ 3030 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3031 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3032 un->un_f_dvdram_writable_device = TRUE; 3033 } 3034 } 3035 3036 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3037 kmem_free(rqbuf_rw, SENSE_LENGTH); 3038 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3039 kmem_free(rqbuf_hd, SENSE_LENGTH); 3040 } 3041 3042 /* 3043 * Function: sd_check_for_writable_cd 3044 * 3045 * Description: This routine determines if the media in the device is 3046 * writable or not. It uses the get configuration command (0x46) 3047 * to determine if the media is writable 3048 * 3049 * Arguments: un - driver soft state (unit) structure 3050 * 3051 * Context: Never called at interrupt context. 3052 */ 3053 3054 static void 3055 sd_check_for_writable_cd(struct sd_lun *un) 3056 { 3057 struct uscsi_cmd com; 3058 uchar_t *out_data; 3059 uchar_t *rqbuf; 3060 int rtn; 3061 uchar_t *out_data_rw, *out_data_hd; 3062 uchar_t *rqbuf_rw, *rqbuf_hd; 3063 struct mode_header_grp2 *sense_mhp; 3064 uchar_t *sense_page; 3065 caddr_t buf; 3066 int bd_len; 3067 int status; 3068 3069 ASSERT(un != NULL); 3070 ASSERT(mutex_owned(SD_MUTEX(un))); 3071 3072 /* 3073 * Initialize the writable media to false, if configuration info. 3074 * tells us otherwise then only we will set it. 3075 */ 3076 un->un_f_mmc_writable_media = FALSE; 3077 mutex_exit(SD_MUTEX(un)); 3078 3079 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3080 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3081 3082 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3083 out_data, SD_PROFILE_HEADER_LEN); 3084 3085 mutex_enter(SD_MUTEX(un)); 3086 if (rtn == 0) { 3087 /* 3088 * We have good information, check for writable DVD. 3089 */ 3090 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3091 un->un_f_mmc_writable_media = TRUE; 3092 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3093 kmem_free(rqbuf, SENSE_LENGTH); 3094 return; 3095 } 3096 } 3097 3098 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3099 kmem_free(rqbuf, SENSE_LENGTH); 3100 3101 /* 3102 * Determine if this is a RRD type device. 3103 */ 3104 mutex_exit(SD_MUTEX(un)); 3105 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3106 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3107 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3108 mutex_enter(SD_MUTEX(un)); 3109 if (status != 0) { 3110 /* command failed; just return */ 3111 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3112 return; 3113 } 3114 3115 /* Get to the page data */ 3116 sense_mhp = (struct mode_header_grp2 *)buf; 3117 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3118 if (bd_len > MODE_BLK_DESC_LENGTH) { 3119 /* 3120 * We did not get back the expected block descriptor length so 3121 * we cannot check the mode page. 3122 */ 3123 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3124 "sd_check_for_writable_cd: Mode Sense returned " 3125 "invalid block descriptor length\n"); 3126 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3127 return; 3128 } 3129 3130 /* 3131 * If the device presents DVD or CD capabilities in the mode 3132 * page, we can return here since a RRD device will not have 3133 * these capabilities. 3134 */ 3135 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3136 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3137 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3138 return; 3139 } 3140 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3141 3142 /* 3143 * If un->un_f_mmc_writable_media is still FALSE, 3144 * check for RRD type media. A RRD device is identified 3145 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3146 */ 3147 mutex_exit(SD_MUTEX(un)); 3148 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3149 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3150 3151 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3152 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3153 RANDOM_WRITABLE); 3154 if (rtn != 0) { 3155 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3156 kmem_free(rqbuf_rw, SENSE_LENGTH); 3157 mutex_enter(SD_MUTEX(un)); 3158 return; 3159 } 3160 3161 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3162 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3163 3164 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3165 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3166 HARDWARE_DEFECT_MANAGEMENT); 3167 mutex_enter(SD_MUTEX(un)); 3168 if (rtn == 0) { 3169 /* 3170 * We have good information, check for random writable 3171 * and hardware defect features as current. 3172 */ 3173 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3174 (out_data_rw[10] & 0x1) && 3175 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3176 (out_data_hd[10] & 0x1)) { 3177 un->un_f_mmc_writable_media = TRUE; 3178 } 3179 } 3180 3181 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3182 kmem_free(rqbuf_rw, SENSE_LENGTH); 3183 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3184 kmem_free(rqbuf_hd, SENSE_LENGTH); 3185 } 3186 3187 /* 3188 * Function: sd_read_unit_properties 3189 * 3190 * Description: The following implements a property lookup mechanism. 3191 * Properties for particular disks (keyed on vendor, model 3192 * and rev numbers) are sought in the sd.conf file via 3193 * sd_process_sdconf_file(), and if not found there, are 3194 * looked for in a list hardcoded in this driver via 3195 * sd_process_sdconf_table() Once located the properties 3196 * are used to update the driver unit structure. 3197 * 3198 * Arguments: un - driver soft state (unit) structure 3199 */ 3200 3201 static void 3202 sd_read_unit_properties(struct sd_lun *un) 3203 { 3204 /* 3205 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3206 * the "sd-config-list" property (from the sd.conf file) or if 3207 * there was not a match for the inquiry vid/pid. If this event 3208 * occurs the static driver configuration table is searched for 3209 * a match. 3210 */ 3211 ASSERT(un != NULL); 3212 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3213 sd_process_sdconf_table(un); 3214 } 3215 3216 /* check for LSI device */ 3217 sd_is_lsi(un); 3218 3219 /* 3220 * Set this in sd.conf to 0 in order to disable kstats. The default 3221 * is 1, so they are enabled by default. 3222 */ 3223 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 3224 SD_DEVINFO(un), DDI_PROP_DONTPASS, "enable-partition-kstats", 1)); 3225 } 3226 3227 3228 /* 3229 * Function: sd_process_sdconf_file 3230 * 3231 * Description: Use ddi_getlongprop to obtain the properties from the 3232 * driver's config file (ie, sd.conf) and update the driver 3233 * soft state structure accordingly. 3234 * 3235 * Arguments: un - driver soft state (unit) structure 3236 * 3237 * Return Code: SD_SUCCESS - The properties were successfully set according 3238 * to the driver configuration file. 3239 * SD_FAILURE - The driver config list was not obtained or 3240 * there was no vid/pid match. This indicates that 3241 * the static config table should be used. 3242 * 3243 * The config file has a property, "sd-config-list", which consists of 3244 * one or more duplets as follows: 3245 * 3246 * sd-config-list= 3247 * <duplet>, 3248 * [<duplet>,] 3249 * [<duplet>]; 3250 * 3251 * The structure of each duplet is as follows: 3252 * 3253 * <duplet>:= <vid+pid>,<data-property-name_list> 3254 * 3255 * The first entry of the duplet is the device ID string (the concatenated 3256 * vid & pid; not to be confused with a device_id). This is defined in 3257 * the same way as in the sd_disk_table. 3258 * 3259 * The second part of the duplet is a string that identifies a 3260 * data-property-name-list. The data-property-name-list is defined as 3261 * follows: 3262 * 3263 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3264 * 3265 * The syntax of <data-property-name> depends on the <version> field. 3266 * 3267 * If version = SD_CONF_VERSION_1 we have the following syntax: 3268 * 3269 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3270 * 3271 * where the prop0 value will be used to set prop0 if bit0 set in the 3272 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3273 * 3274 */ 3275 3276 static int 3277 sd_process_sdconf_file(struct sd_lun *un) 3278 { 3279 char *config_list = NULL; 3280 int config_list_len; 3281 int len; 3282 int dupletlen = 0; 3283 char *vidptr; 3284 int vidlen; 3285 char *dnlist_ptr; 3286 char *dataname_ptr; 3287 int dnlist_len; 3288 int dataname_len; 3289 int *data_list; 3290 int data_list_len; 3291 int rval = SD_FAILURE; 3292 int i; 3293 3294 ASSERT(un != NULL); 3295 3296 /* Obtain the configuration list associated with the .conf file */ 3297 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3298 sd_config_list, (caddr_t)&config_list, &config_list_len) 3299 != DDI_PROP_SUCCESS) { 3300 return (SD_FAILURE); 3301 } 3302 3303 /* 3304 * Compare vids in each duplet to the inquiry vid - if a match is 3305 * made, get the data value and update the soft state structure 3306 * accordingly. 3307 * 3308 * Note: This algorithm is complex and difficult to maintain. It should 3309 * be replaced with a more robust implementation. 3310 */ 3311 for (len = config_list_len, vidptr = config_list; len > 0; 3312 vidptr += dupletlen, len -= dupletlen) { 3313 /* 3314 * Note: The assumption here is that each vid entry is on 3315 * a unique line from its associated duplet. 3316 */ 3317 vidlen = dupletlen = (int)strlen(vidptr); 3318 if ((vidlen == 0) || 3319 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3320 dupletlen++; 3321 continue; 3322 } 3323 3324 /* 3325 * dnlist contains 1 or more blank separated 3326 * data-property-name entries 3327 */ 3328 dnlist_ptr = vidptr + vidlen + 1; 3329 dnlist_len = (int)strlen(dnlist_ptr); 3330 dupletlen += dnlist_len + 2; 3331 3332 /* 3333 * Set a pointer for the first data-property-name 3334 * entry in the list 3335 */ 3336 dataname_ptr = dnlist_ptr; 3337 dataname_len = 0; 3338 3339 /* 3340 * Loop through all data-property-name entries in the 3341 * data-property-name-list setting the properties for each. 3342 */ 3343 while (dataname_len < dnlist_len) { 3344 int version; 3345 3346 /* 3347 * Determine the length of the current 3348 * data-property-name entry by indexing until a 3349 * blank or NULL is encountered. When the space is 3350 * encountered reset it to a NULL for compliance 3351 * with ddi_getlongprop(). 3352 */ 3353 for (i = 0; ((dataname_ptr[i] != ' ') && 3354 (dataname_ptr[i] != '\0')); i++) { 3355 ; 3356 } 3357 3358 dataname_len += i; 3359 /* If not null terminated, Make it so */ 3360 if (dataname_ptr[i] == ' ') { 3361 dataname_ptr[i] = '\0'; 3362 } 3363 dataname_len++; 3364 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3365 "sd_process_sdconf_file: disk:%s, data:%s\n", 3366 vidptr, dataname_ptr); 3367 3368 /* Get the data list */ 3369 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3370 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3371 != DDI_PROP_SUCCESS) { 3372 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3373 "sd_process_sdconf_file: data property (%s)" 3374 " has no value\n", dataname_ptr); 3375 dataname_ptr = dnlist_ptr + dataname_len; 3376 continue; 3377 } 3378 3379 version = data_list[0]; 3380 3381 if (version == SD_CONF_VERSION_1) { 3382 sd_tunables values; 3383 3384 /* Set the properties */ 3385 if (sd_chk_vers1_data(un, data_list[1], 3386 &data_list[2], data_list_len, dataname_ptr) 3387 == SD_SUCCESS) { 3388 sd_get_tunables_from_conf(un, 3389 data_list[1], &data_list[2], 3390 &values); 3391 sd_set_vers1_properties(un, 3392 data_list[1], &values); 3393 rval = SD_SUCCESS; 3394 } else { 3395 rval = SD_FAILURE; 3396 } 3397 } else { 3398 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3399 "data property %s version 0x%x is invalid.", 3400 dataname_ptr, version); 3401 rval = SD_FAILURE; 3402 } 3403 kmem_free(data_list, data_list_len); 3404 dataname_ptr = dnlist_ptr + dataname_len; 3405 } 3406 } 3407 3408 /* free up the memory allocated by ddi_getlongprop */ 3409 if (config_list) { 3410 kmem_free(config_list, config_list_len); 3411 } 3412 3413 return (rval); 3414 } 3415 3416 /* 3417 * Function: sd_get_tunables_from_conf() 3418 * 3419 * 3420 * This function reads the data list from the sd.conf file and pulls 3421 * the values that can have numeric values as arguments and places 3422 * the values in the apropriate sd_tunables member. 3423 * Since the order of the data list members varies across platforms 3424 * This function reads them from the data list in a platform specific 3425 * order and places them into the correct sd_tunable member that is 3426 * a consistant across all platforms. 3427 */ 3428 static void 3429 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3430 sd_tunables *values) 3431 { 3432 int i; 3433 int mask; 3434 3435 bzero(values, sizeof (sd_tunables)); 3436 3437 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3438 3439 mask = 1 << i; 3440 if (mask > flags) { 3441 break; 3442 } 3443 3444 switch (mask & flags) { 3445 case 0: /* This mask bit not set in flags */ 3446 continue; 3447 case SD_CONF_BSET_THROTTLE: 3448 values->sdt_throttle = data_list[i]; 3449 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3450 "sd_get_tunables_from_conf: throttle = %d\n", 3451 values->sdt_throttle); 3452 break; 3453 case SD_CONF_BSET_CTYPE: 3454 values->sdt_ctype = data_list[i]; 3455 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3456 "sd_get_tunables_from_conf: ctype = %d\n", 3457 values->sdt_ctype); 3458 break; 3459 case SD_CONF_BSET_NRR_COUNT: 3460 values->sdt_not_rdy_retries = data_list[i]; 3461 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3462 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3463 values->sdt_not_rdy_retries); 3464 break; 3465 case SD_CONF_BSET_BSY_RETRY_COUNT: 3466 values->sdt_busy_retries = data_list[i]; 3467 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3468 "sd_get_tunables_from_conf: busy_retries = %d\n", 3469 values->sdt_busy_retries); 3470 break; 3471 case SD_CONF_BSET_RST_RETRIES: 3472 values->sdt_reset_retries = data_list[i]; 3473 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3474 "sd_get_tunables_from_conf: reset_retries = %d\n", 3475 values->sdt_reset_retries); 3476 break; 3477 case SD_CONF_BSET_RSV_REL_TIME: 3478 values->sdt_reserv_rel_time = data_list[i]; 3479 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3480 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3481 values->sdt_reserv_rel_time); 3482 break; 3483 case SD_CONF_BSET_MIN_THROTTLE: 3484 values->sdt_min_throttle = data_list[i]; 3485 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3486 "sd_get_tunables_from_conf: min_throttle = %d\n", 3487 values->sdt_min_throttle); 3488 break; 3489 case SD_CONF_BSET_DISKSORT_DISABLED: 3490 values->sdt_disk_sort_dis = data_list[i]; 3491 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3492 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3493 values->sdt_disk_sort_dis); 3494 break; 3495 case SD_CONF_BSET_LUN_RESET_ENABLED: 3496 values->sdt_lun_reset_enable = data_list[i]; 3497 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3498 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3499 "\n", values->sdt_lun_reset_enable); 3500 break; 3501 } 3502 } 3503 } 3504 3505 /* 3506 * Function: sd_process_sdconf_table 3507 * 3508 * Description: Search the static configuration table for a match on the 3509 * inquiry vid/pid and update the driver soft state structure 3510 * according to the table property values for the device. 3511 * 3512 * The form of a configuration table entry is: 3513 * <vid+pid>,<flags>,<property-data> 3514 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3515 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3516 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3517 * 3518 * Arguments: un - driver soft state (unit) structure 3519 */ 3520 3521 static void 3522 sd_process_sdconf_table(struct sd_lun *un) 3523 { 3524 char *id = NULL; 3525 int table_index; 3526 int idlen; 3527 3528 ASSERT(un != NULL); 3529 for (table_index = 0; table_index < sd_disk_table_size; 3530 table_index++) { 3531 id = sd_disk_table[table_index].device_id; 3532 idlen = strlen(id); 3533 if (idlen == 0) { 3534 continue; 3535 } 3536 3537 /* 3538 * The static configuration table currently does not 3539 * implement version 10 properties. Additionally, 3540 * multiple data-property-name entries are not 3541 * implemented in the static configuration table. 3542 */ 3543 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3544 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3545 "sd_process_sdconf_table: disk %s\n", id); 3546 sd_set_vers1_properties(un, 3547 sd_disk_table[table_index].flags, 3548 sd_disk_table[table_index].properties); 3549 break; 3550 } 3551 } 3552 } 3553 3554 3555 /* 3556 * Function: sd_sdconf_id_match 3557 * 3558 * Description: This local function implements a case sensitive vid/pid 3559 * comparison as well as the boundary cases of wild card and 3560 * multiple blanks. 3561 * 3562 * Note: An implicit assumption made here is that the scsi 3563 * inquiry structure will always keep the vid, pid and 3564 * revision strings in consecutive sequence, so they can be 3565 * read as a single string. If this assumption is not the 3566 * case, a separate string, to be used for the check, needs 3567 * to be built with these strings concatenated. 3568 * 3569 * Arguments: un - driver soft state (unit) structure 3570 * id - table or config file vid/pid 3571 * idlen - length of the vid/pid (bytes) 3572 * 3573 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3574 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3575 */ 3576 3577 static int 3578 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3579 { 3580 struct scsi_inquiry *sd_inq; 3581 int rval = SD_SUCCESS; 3582 3583 ASSERT(un != NULL); 3584 sd_inq = un->un_sd->sd_inq; 3585 ASSERT(id != NULL); 3586 3587 /* 3588 * We use the inq_vid as a pointer to a buffer containing the 3589 * vid and pid and use the entire vid/pid length of the table 3590 * entry for the comparison. This works because the inq_pid 3591 * data member follows inq_vid in the scsi_inquiry structure. 3592 */ 3593 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3594 /* 3595 * The user id string is compared to the inquiry vid/pid 3596 * using a case insensitive comparison and ignoring 3597 * multiple spaces. 3598 */ 3599 rval = sd_blank_cmp(un, id, idlen); 3600 if (rval != SD_SUCCESS) { 3601 /* 3602 * User id strings that start and end with a "*" 3603 * are a special case. These do not have a 3604 * specific vendor, and the product string can 3605 * appear anywhere in the 16 byte PID portion of 3606 * the inquiry data. This is a simple strstr() 3607 * type search for the user id in the inquiry data. 3608 */ 3609 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3610 char *pidptr = &id[1]; 3611 int i; 3612 int j; 3613 int pidstrlen = idlen - 2; 3614 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3615 pidstrlen; 3616 3617 if (j < 0) { 3618 return (SD_FAILURE); 3619 } 3620 for (i = 0; i < j; i++) { 3621 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3622 pidptr, pidstrlen) == 0) { 3623 rval = SD_SUCCESS; 3624 break; 3625 } 3626 } 3627 } 3628 } 3629 } 3630 return (rval); 3631 } 3632 3633 3634 /* 3635 * Function: sd_blank_cmp 3636 * 3637 * Description: If the id string starts and ends with a space, treat 3638 * multiple consecutive spaces as equivalent to a single 3639 * space. For example, this causes a sd_disk_table entry 3640 * of " NEC CDROM " to match a device's id string of 3641 * "NEC CDROM". 3642 * 3643 * Note: The success exit condition for this routine is if 3644 * the pointer to the table entry is '\0' and the cnt of 3645 * the inquiry length is zero. This will happen if the inquiry 3646 * string returned by the device is padded with spaces to be 3647 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3648 * SCSI spec states that the inquiry string is to be padded with 3649 * spaces. 3650 * 3651 * Arguments: un - driver soft state (unit) structure 3652 * id - table or config file vid/pid 3653 * idlen - length of the vid/pid (bytes) 3654 * 3655 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3656 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3657 */ 3658 3659 static int 3660 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3661 { 3662 char *p1; 3663 char *p2; 3664 int cnt; 3665 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3666 sizeof (SD_INQUIRY(un)->inq_pid); 3667 3668 ASSERT(un != NULL); 3669 p2 = un->un_sd->sd_inq->inq_vid; 3670 ASSERT(id != NULL); 3671 p1 = id; 3672 3673 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3674 /* 3675 * Note: string p1 is terminated by a NUL but string p2 3676 * isn't. The end of p2 is determined by cnt. 3677 */ 3678 for (;;) { 3679 /* skip over any extra blanks in both strings */ 3680 while ((*p1 != '\0') && (*p1 == ' ')) { 3681 p1++; 3682 } 3683 while ((cnt != 0) && (*p2 == ' ')) { 3684 p2++; 3685 cnt--; 3686 } 3687 3688 /* compare the two strings */ 3689 if ((cnt == 0) || 3690 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3691 break; 3692 } 3693 while ((cnt > 0) && 3694 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3695 p1++; 3696 p2++; 3697 cnt--; 3698 } 3699 } 3700 } 3701 3702 /* return SD_SUCCESS if both strings match */ 3703 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3704 } 3705 3706 3707 /* 3708 * Function: sd_chk_vers1_data 3709 * 3710 * Description: Verify the version 1 device properties provided by the 3711 * user via the configuration file 3712 * 3713 * Arguments: un - driver soft state (unit) structure 3714 * flags - integer mask indicating properties to be set 3715 * prop_list - integer list of property values 3716 * list_len - length of user provided data 3717 * 3718 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3719 * SD_FAILURE - Indicates the user provided data is invalid 3720 */ 3721 3722 static int 3723 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3724 int list_len, char *dataname_ptr) 3725 { 3726 int i; 3727 int mask = 1; 3728 int index = 0; 3729 3730 ASSERT(un != NULL); 3731 3732 /* Check for a NULL property name and list */ 3733 if (dataname_ptr == NULL) { 3734 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3735 "sd_chk_vers1_data: NULL data property name."); 3736 return (SD_FAILURE); 3737 } 3738 if (prop_list == NULL) { 3739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3740 "sd_chk_vers1_data: %s NULL data property list.", 3741 dataname_ptr); 3742 return (SD_FAILURE); 3743 } 3744 3745 /* Display a warning if undefined bits are set in the flags */ 3746 if (flags & ~SD_CONF_BIT_MASK) { 3747 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3748 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3749 "Properties not set.", 3750 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3751 return (SD_FAILURE); 3752 } 3753 3754 /* 3755 * Verify the length of the list by identifying the highest bit set 3756 * in the flags and validating that the property list has a length 3757 * up to the index of this bit. 3758 */ 3759 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3760 if (flags & mask) { 3761 index++; 3762 } 3763 mask = 1 << i; 3764 } 3765 if ((list_len / sizeof (int)) < (index + 2)) { 3766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3767 "sd_chk_vers1_data: " 3768 "Data property list %s size is incorrect. " 3769 "Properties not set.", dataname_ptr); 3770 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3771 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3772 return (SD_FAILURE); 3773 } 3774 return (SD_SUCCESS); 3775 } 3776 3777 3778 /* 3779 * Function: sd_set_vers1_properties 3780 * 3781 * Description: Set version 1 device properties based on a property list 3782 * retrieved from the driver configuration file or static 3783 * configuration table. Version 1 properties have the format: 3784 * 3785 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3786 * 3787 * where the prop0 value will be used to set prop0 if bit0 3788 * is set in the flags 3789 * 3790 * Arguments: un - driver soft state (unit) structure 3791 * flags - integer mask indicating properties to be set 3792 * prop_list - integer list of property values 3793 */ 3794 3795 static void 3796 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3797 { 3798 ASSERT(un != NULL); 3799 3800 /* 3801 * Set the flag to indicate cache is to be disabled. An attempt 3802 * to disable the cache via sd_disable_caching() will be made 3803 * later during attach once the basic initialization is complete. 3804 */ 3805 if (flags & SD_CONF_BSET_NOCACHE) { 3806 un->un_f_opt_disable_cache = TRUE; 3807 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3808 "sd_set_vers1_properties: caching disabled flag set\n"); 3809 } 3810 3811 /* CD-specific configuration parameters */ 3812 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3813 un->un_f_cfg_playmsf_bcd = TRUE; 3814 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3815 "sd_set_vers1_properties: playmsf_bcd set\n"); 3816 } 3817 if (flags & SD_CONF_BSET_READSUB_BCD) { 3818 un->un_f_cfg_readsub_bcd = TRUE; 3819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3820 "sd_set_vers1_properties: readsub_bcd set\n"); 3821 } 3822 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3823 un->un_f_cfg_read_toc_trk_bcd = TRUE; 3824 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3825 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 3826 } 3827 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 3828 un->un_f_cfg_read_toc_addr_bcd = TRUE; 3829 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3830 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 3831 } 3832 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 3833 un->un_f_cfg_no_read_header = TRUE; 3834 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3835 "sd_set_vers1_properties: no_read_header set\n"); 3836 } 3837 if (flags & SD_CONF_BSET_READ_CD_XD4) { 3838 un->un_f_cfg_read_cd_xd4 = TRUE; 3839 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3840 "sd_set_vers1_properties: read_cd_xd4 set\n"); 3841 } 3842 3843 /* Support for devices which do not have valid/unique serial numbers */ 3844 if (flags & SD_CONF_BSET_FAB_DEVID) { 3845 un->un_f_opt_fab_devid = TRUE; 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3847 "sd_set_vers1_properties: fab_devid bit set\n"); 3848 } 3849 3850 /* Support for user throttle configuration */ 3851 if (flags & SD_CONF_BSET_THROTTLE) { 3852 ASSERT(prop_list != NULL); 3853 un->un_saved_throttle = un->un_throttle = 3854 prop_list->sdt_throttle; 3855 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3856 "sd_set_vers1_properties: throttle set to %d\n", 3857 prop_list->sdt_throttle); 3858 } 3859 3860 /* Set the per disk retry count according to the conf file or table. */ 3861 if (flags & SD_CONF_BSET_NRR_COUNT) { 3862 ASSERT(prop_list != NULL); 3863 if (prop_list->sdt_not_rdy_retries) { 3864 un->un_notready_retry_count = 3865 prop_list->sdt_not_rdy_retries; 3866 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3867 "sd_set_vers1_properties: not ready retry count" 3868 " set to %d\n", un->un_notready_retry_count); 3869 } 3870 } 3871 3872 /* The controller type is reported for generic disk driver ioctls */ 3873 if (flags & SD_CONF_BSET_CTYPE) { 3874 ASSERT(prop_list != NULL); 3875 switch (prop_list->sdt_ctype) { 3876 case CTYPE_CDROM: 3877 un->un_ctype = prop_list->sdt_ctype; 3878 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3879 "sd_set_vers1_properties: ctype set to " 3880 "CTYPE_CDROM\n"); 3881 break; 3882 case CTYPE_CCS: 3883 un->un_ctype = prop_list->sdt_ctype; 3884 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3885 "sd_set_vers1_properties: ctype set to " 3886 "CTYPE_CCS\n"); 3887 break; 3888 case CTYPE_ROD: /* RW optical */ 3889 un->un_ctype = prop_list->sdt_ctype; 3890 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3891 "sd_set_vers1_properties: ctype set to " 3892 "CTYPE_ROD\n"); 3893 break; 3894 default: 3895 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3896 "sd_set_vers1_properties: Could not set " 3897 "invalid ctype value (%d)", 3898 prop_list->sdt_ctype); 3899 } 3900 } 3901 3902 /* Purple failover timeout */ 3903 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 3904 ASSERT(prop_list != NULL); 3905 un->un_busy_retry_count = 3906 prop_list->sdt_busy_retries; 3907 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3908 "sd_set_vers1_properties: " 3909 "busy retry count set to %d\n", 3910 un->un_busy_retry_count); 3911 } 3912 3913 /* Purple reset retry count */ 3914 if (flags & SD_CONF_BSET_RST_RETRIES) { 3915 ASSERT(prop_list != NULL); 3916 un->un_reset_retry_count = 3917 prop_list->sdt_reset_retries; 3918 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3919 "sd_set_vers1_properties: " 3920 "reset retry count set to %d\n", 3921 un->un_reset_retry_count); 3922 } 3923 3924 /* Purple reservation release timeout */ 3925 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 3926 ASSERT(prop_list != NULL); 3927 un->un_reserve_release_time = 3928 prop_list->sdt_reserv_rel_time; 3929 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3930 "sd_set_vers1_properties: " 3931 "reservation release timeout set to %d\n", 3932 un->un_reserve_release_time); 3933 } 3934 3935 /* 3936 * Driver flag telling the driver to verify that no commands are pending 3937 * for a device before issuing a Test Unit Ready. This is a workaround 3938 * for a firmware bug in some Seagate eliteI drives. 3939 */ 3940 if (flags & SD_CONF_BSET_TUR_CHECK) { 3941 un->un_f_cfg_tur_check = TRUE; 3942 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3943 "sd_set_vers1_properties: tur queue check set\n"); 3944 } 3945 3946 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 3947 un->un_min_throttle = prop_list->sdt_min_throttle; 3948 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3949 "sd_set_vers1_properties: min throttle set to %d\n", 3950 un->un_min_throttle); 3951 } 3952 3953 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 3954 un->un_f_disksort_disabled = 3955 (prop_list->sdt_disk_sort_dis != 0) ? 3956 TRUE : FALSE; 3957 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3958 "sd_set_vers1_properties: disksort disabled " 3959 "flag set to %d\n", 3960 prop_list->sdt_disk_sort_dis); 3961 } 3962 3963 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 3964 un->un_f_lun_reset_enabled = 3965 (prop_list->sdt_lun_reset_enable != 0) ? 3966 TRUE : FALSE; 3967 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3968 "sd_set_vers1_properties: lun reset enabled " 3969 "flag set to %d\n", 3970 prop_list->sdt_lun_reset_enable); 3971 } 3972 3973 /* 3974 * Validate the throttle values. 3975 * If any of the numbers are invalid, set everything to defaults. 3976 */ 3977 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3978 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3979 (un->un_min_throttle > un->un_throttle)) { 3980 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3981 un->un_min_throttle = sd_min_throttle; 3982 } 3983 } 3984 3985 /* 3986 * Function: sd_is_lsi() 3987 * 3988 * Description: Check for lsi devices, step throught the static device 3989 * table to match vid/pid. 3990 * 3991 * Args: un - ptr to sd_lun 3992 * 3993 * Notes: When creating new LSI property, need to add the new LSI property 3994 * to this function. 3995 */ 3996 static void 3997 sd_is_lsi(struct sd_lun *un) 3998 { 3999 char *id = NULL; 4000 int table_index; 4001 int idlen; 4002 void *prop; 4003 4004 ASSERT(un != NULL); 4005 for (table_index = 0; table_index < sd_disk_table_size; 4006 table_index++) { 4007 id = sd_disk_table[table_index].device_id; 4008 idlen = strlen(id); 4009 if (idlen == 0) { 4010 continue; 4011 } 4012 4013 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 4014 prop = sd_disk_table[table_index].properties; 4015 if (prop == &lsi_properties || 4016 prop == &lsi_oem_properties || 4017 prop == &lsi_properties_scsi || 4018 prop == &symbios_properties) { 4019 un->un_f_cfg_is_lsi = TRUE; 4020 } 4021 break; 4022 } 4023 } 4024 } 4025 4026 4027 /* 4028 * The following routines support reading and interpretation of disk labels, 4029 * including Solaris BE (8-slice) vtoc's, Solaris LE (16-slice) vtoc's, and 4030 * fdisk tables. 4031 */ 4032 4033 /* 4034 * Function: sd_validate_geometry 4035 * 4036 * Description: Read the label from the disk (if present). Update the unit's 4037 * geometry and vtoc information from the data in the label. 4038 * Verify that the label is valid. 4039 * 4040 * Arguments: un - driver soft state (unit) structure 4041 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4042 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4043 * to use the USCSI "direct" chain and bypass the normal 4044 * command waitq. 4045 * 4046 * Return Code: 0 - Successful completion 4047 * EINVAL - Invalid value in un->un_tgt_blocksize or 4048 * un->un_blockcount; or label on disk is corrupted 4049 * or unreadable. 4050 * EACCES - Reservation conflict at the device. 4051 * ENOMEM - Resource allocation error 4052 * ENOTSUP - geometry not applicable 4053 * 4054 * Context: Kernel thread only (can sleep). 4055 */ 4056 4057 static int 4058 sd_validate_geometry(struct sd_lun *un, int path_flag) 4059 { 4060 static char labelstring[128]; 4061 static char buf[256]; 4062 char *label = NULL; 4063 int label_error = 0; 4064 int gvalid = un->un_f_geometry_is_valid; 4065 int lbasize; 4066 uint_t capacity; 4067 int count; 4068 4069 ASSERT(un != NULL); 4070 ASSERT(mutex_owned(SD_MUTEX(un))); 4071 4072 /* 4073 * If the required values are not valid, then try getting them 4074 * once via read capacity. If that fails, then fail this call. 4075 * This is necessary with the new mpxio failover behavior in 4076 * the T300 where we can get an attach for the inactive path 4077 * before the active path. The inactive path fails commands with 4078 * sense data of 02,04,88 which happens to the read capacity 4079 * before mpxio has had sufficient knowledge to know if it should 4080 * force a fail over or not. (Which it won't do at attach anyhow). 4081 * If the read capacity at attach time fails, un_tgt_blocksize and 4082 * un_blockcount won't be valid. 4083 */ 4084 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4085 (un->un_f_blockcount_is_valid != TRUE)) { 4086 uint64_t cap; 4087 uint32_t lbasz; 4088 int rval; 4089 4090 mutex_exit(SD_MUTEX(un)); 4091 rval = sd_send_scsi_READ_CAPACITY(un, &cap, 4092 &lbasz, SD_PATH_DIRECT); 4093 mutex_enter(SD_MUTEX(un)); 4094 if (rval == 0) { 4095 /* 4096 * The following relies on 4097 * sd_send_scsi_READ_CAPACITY never 4098 * returning 0 for capacity and/or lbasize. 4099 */ 4100 sd_update_block_info(un, lbasz, cap); 4101 } 4102 4103 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4104 (un->un_f_blockcount_is_valid != TRUE)) { 4105 return (EINVAL); 4106 } 4107 } 4108 4109 /* 4110 * Copy the lbasize and capacity so that if they're reset while we're 4111 * not holding the SD_MUTEX, we will continue to use valid values 4112 * after the SD_MUTEX is reacquired. (4119659) 4113 */ 4114 lbasize = un->un_tgt_blocksize; 4115 capacity = un->un_blockcount; 4116 4117 #if defined(_SUNOS_VTOC_16) 4118 /* 4119 * Set up the "whole disk" fdisk partition; this should always 4120 * exist, regardless of whether the disk contains an fdisk table 4121 * or vtoc. 4122 */ 4123 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 4124 un->un_map[P0_RAW_DISK].dkl_nblk = capacity; 4125 #endif 4126 4127 /* 4128 * Refresh the logical and physical geometry caches. 4129 * (data from MODE SENSE format/rigid disk geometry pages, 4130 * and scsi_ifgetcap("geometry"). 4131 */ 4132 sd_resync_geom_caches(un, capacity, lbasize, path_flag); 4133 4134 label_error = sd_use_efi(un, path_flag); 4135 if (label_error == 0) { 4136 /* found a valid EFI label */ 4137 SD_TRACE(SD_LOG_IO_PARTITION, un, 4138 "sd_validate_geometry: found EFI label\n"); 4139 un->un_solaris_offset = 0; 4140 un->un_solaris_size = capacity; 4141 return (ENOTSUP); 4142 } 4143 if (un->un_blockcount > DK_MAX_BLOCKS) { 4144 if (label_error == ESRCH) { 4145 /* 4146 * they've configured a LUN over 1TB, but used 4147 * format.dat to restrict format's view of the 4148 * capacity to be under 1TB 4149 */ 4150 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4151 "is >1TB and has a VTOC label: use format(1M) to either decrease the"); 4152 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 4153 "size to be < 1TB or relabel the disk with an EFI label"); 4154 } else { 4155 /* unlabeled disk over 1TB */ 4156 return (ENOTSUP); 4157 } 4158 } 4159 label_error = 0; 4160 4161 /* 4162 * at this point it is either labeled with a VTOC or it is 4163 * under 1TB 4164 */ 4165 4166 /* 4167 * Only DIRECT ACCESS devices will have Sun labels. 4168 * CD's supposedly have a Sun label, too 4169 */ 4170 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 4171 struct dk_label *dkl; 4172 offset_t dkl1; 4173 offset_t label_addr, real_addr; 4174 int rval; 4175 size_t buffer_size; 4176 4177 /* 4178 * Note: This will set up un->un_solaris_size and 4179 * un->un_solaris_offset. 4180 */ 4181 switch (sd_read_fdisk(un, capacity, lbasize, path_flag)) { 4182 case SD_CMD_RESERVATION_CONFLICT: 4183 ASSERT(mutex_owned(SD_MUTEX(un))); 4184 return (EACCES); 4185 case SD_CMD_FAILURE: 4186 ASSERT(mutex_owned(SD_MUTEX(un))); 4187 return (ENOMEM); 4188 } 4189 4190 if (un->un_solaris_size <= DK_LABEL_LOC) { 4191 /* 4192 * Found fdisk table but no Solaris partition entry, 4193 * so don't call sd_uselabel() and don't create 4194 * a default label. 4195 */ 4196 label_error = 0; 4197 un->un_f_geometry_is_valid = TRUE; 4198 goto no_solaris_partition; 4199 } 4200 label_addr = (daddr_t)(un->un_solaris_offset + DK_LABEL_LOC); 4201 4202 /* 4203 * sys_blocksize != tgt_blocksize, need to re-adjust 4204 * blkno and save the index to beginning of dk_label 4205 */ 4206 real_addr = SD_SYS2TGTBLOCK(un, label_addr); 4207 buffer_size = SD_REQBYTES2TGTBYTES(un, 4208 sizeof (struct dk_label)); 4209 4210 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_validate_geometry: " 4211 "label_addr: 0x%x allocation size: 0x%x\n", 4212 label_addr, buffer_size); 4213 dkl = kmem_zalloc(buffer_size, KM_NOSLEEP); 4214 if (dkl == NULL) { 4215 return (ENOMEM); 4216 } 4217 4218 mutex_exit(SD_MUTEX(un)); 4219 rval = sd_send_scsi_READ(un, dkl, buffer_size, real_addr, 4220 path_flag); 4221 mutex_enter(SD_MUTEX(un)); 4222 4223 switch (rval) { 4224 case 0: 4225 /* 4226 * sd_uselabel will establish that the geometry 4227 * is valid. 4228 * For sys_blocksize != tgt_blocksize, need 4229 * to index into the beginning of dk_label 4230 */ 4231 dkl1 = (daddr_t)dkl 4232 + SD_TGTBYTEOFFSET(un, label_addr, real_addr); 4233 if (sd_uselabel(un, (struct dk_label *)(uintptr_t)dkl1, 4234 path_flag) != SD_LABEL_IS_VALID) { 4235 label_error = EINVAL; 4236 } 4237 break; 4238 case EACCES: 4239 label_error = EACCES; 4240 break; 4241 default: 4242 label_error = EINVAL; 4243 break; 4244 } 4245 4246 kmem_free(dkl, buffer_size); 4247 4248 #if defined(_SUNOS_VTOC_8) 4249 label = (char *)un->un_asciilabel; 4250 #elif defined(_SUNOS_VTOC_16) 4251 label = (char *)un->un_vtoc.v_asciilabel; 4252 #else 4253 #error "No VTOC format defined." 4254 #endif 4255 } 4256 4257 /* 4258 * If a valid label was not found, AND if no reservation conflict 4259 * was detected, then go ahead and create a default label (4069506). 4260 * 4261 * Note: currently, for VTOC_8 devices, the default label is created 4262 * for removables only. For VTOC_16 devices, the default label will 4263 * be created for both removables and non-removables alike. 4264 * (see sd_build_default_label) 4265 */ 4266 #if defined(_SUNOS_VTOC_8) 4267 if (ISREMOVABLE(un) && (label_error != EACCES)) { 4268 #elif defined(_SUNOS_VTOC_16) 4269 if (label_error != EACCES) { 4270 #endif 4271 if (un->un_f_geometry_is_valid == FALSE) { 4272 sd_build_default_label(un); 4273 } 4274 label_error = 0; 4275 } 4276 4277 no_solaris_partition: 4278 if ((!ISREMOVABLE(un) || 4279 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 4280 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 4281 /* 4282 * Print out a message indicating who and what we are. 4283 * We do this only when we happen to really validate the 4284 * geometry. We may call sd_validate_geometry() at other 4285 * times, e.g., ioctl()'s like Get VTOC in which case we 4286 * don't want to print the label. 4287 * If the geometry is valid, print the label string, 4288 * else print vendor and product info, if available 4289 */ 4290 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 4291 SD_INFO(SD_LOG_ATTACH_DETACH, un, "?<%s>\n", label); 4292 } else { 4293 mutex_enter(&sd_label_mutex); 4294 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 4295 labelstring); 4296 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 4297 &labelstring[64]); 4298 (void) sprintf(buf, "?Vendor '%s', product '%s'", 4299 labelstring, &labelstring[64]); 4300 if (un->un_f_blockcount_is_valid == TRUE) { 4301 (void) sprintf(&buf[strlen(buf)], 4302 ", %llu %u byte blocks\n", 4303 (longlong_t)un->un_blockcount, 4304 un->un_tgt_blocksize); 4305 } else { 4306 (void) sprintf(&buf[strlen(buf)], 4307 ", (unknown capacity)\n"); 4308 } 4309 SD_INFO(SD_LOG_ATTACH_DETACH, un, buf); 4310 mutex_exit(&sd_label_mutex); 4311 } 4312 } 4313 4314 #if defined(_SUNOS_VTOC_16) 4315 /* 4316 * If we have valid geometry, set up the remaining fdisk partitions. 4317 * Note that dkl_cylno is not used for the fdisk map entries, so 4318 * we set it to an entirely bogus value. 4319 */ 4320 for (count = 0; count < FD_NUMPART; count++) { 4321 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 4322 un->un_map[FDISK_P1 + count].dkl_nblk = 4323 un->un_fmap[count].fmap_nblk; 4324 4325 un->un_offset[FDISK_P1 + count] = 4326 un->un_fmap[count].fmap_start; 4327 } 4328 #endif 4329 4330 for (count = 0; count < NDKMAP; count++) { 4331 #if defined(_SUNOS_VTOC_8) 4332 struct dk_map *lp = &un->un_map[count]; 4333 un->un_offset[count] = 4334 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 4335 #elif defined(_SUNOS_VTOC_16) 4336 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 4337 4338 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 4339 #else 4340 #error "No VTOC format defined." 4341 #endif 4342 } 4343 4344 return (label_error); 4345 } 4346 4347 4348 #if defined(_SUNOS_VTOC_16) 4349 /* 4350 * Macro: MAX_BLKS 4351 * 4352 * This macro is used for table entries where we need to have the largest 4353 * possible sector value for that head & SPT (sectors per track) 4354 * combination. Other entries for some smaller disk sizes are set by 4355 * convention to match those used by X86 BIOS usage. 4356 */ 4357 #define MAX_BLKS(heads, spt) UINT16_MAX * heads * spt, heads, spt 4358 4359 /* 4360 * Function: sd_convert_geometry 4361 * 4362 * Description: Convert physical geometry into a dk_geom structure. In 4363 * other words, make sure we don't wrap 16-bit values. 4364 * e.g. converting from geom_cache to dk_geom 4365 * 4366 * Context: Kernel thread only 4367 */ 4368 static void 4369 sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g) 4370 { 4371 int i; 4372 static const struct chs_values { 4373 uint_t max_cap; /* Max Capacity for this HS. */ 4374 uint_t nhead; /* Heads to use. */ 4375 uint_t nsect; /* SPT to use. */ 4376 } CHS_values[] = { 4377 {0x00200000, 64, 32}, /* 1GB or smaller disk. */ 4378 {0x01000000, 128, 32}, /* 8GB or smaller disk. */ 4379 {MAX_BLKS(255, 63)}, /* 502.02GB or smaller disk. */ 4380 {MAX_BLKS(255, 126)}, /* .98TB or smaller disk. */ 4381 {DK_MAX_BLOCKS, 255, 189} /* Max size is just under 1TB */ 4382 }; 4383 4384 /* Unlabeled SCSI floppy device */ 4385 if (capacity <= 0x1000) { 4386 un_g->dkg_nhead = 2; 4387 un_g->dkg_ncyl = 80; 4388 un_g->dkg_nsect = capacity / (un_g->dkg_nhead * un_g->dkg_ncyl); 4389 return; 4390 } 4391 4392 /* 4393 * For all devices we calculate cylinders using the 4394 * heads and sectors we assign based on capacity of the 4395 * device. The table is designed to be compatible with the 4396 * way other operating systems lay out fdisk tables for X86 4397 * and to insure that the cylinders never exceed 65535 to 4398 * prevent problems with X86 ioctls that report geometry. 4399 * We use SPT that are multiples of 63, since other OSes that 4400 * are not limited to 16-bits for cylinders stop at 63 SPT 4401 * we make do by using multiples of 63 SPT. 4402 * 4403 * Note than capacities greater than or equal to 1TB will simply 4404 * get the largest geometry from the table. This should be okay 4405 * since disks this large shouldn't be using CHS values anyway. 4406 */ 4407 for (i = 0; CHS_values[i].max_cap < capacity && 4408 CHS_values[i].max_cap != DK_MAX_BLOCKS; i++) 4409 ; 4410 4411 un_g->dkg_nhead = CHS_values[i].nhead; 4412 un_g->dkg_nsect = CHS_values[i].nsect; 4413 } 4414 #endif 4415 4416 4417 /* 4418 * Function: sd_resync_geom_caches 4419 * 4420 * Description: (Re)initialize both geometry caches: the virtual geometry 4421 * information is extracted from the HBA (the "geometry" 4422 * capability), and the physical geometry cache data is 4423 * generated by issuing MODE SENSE commands. 4424 * 4425 * Arguments: un - driver soft state (unit) structure 4426 * capacity - disk capacity in #blocks 4427 * lbasize - disk block size in bytes 4428 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4429 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4430 * to use the USCSI "direct" chain and bypass the normal 4431 * command waitq. 4432 * 4433 * Context: Kernel thread only (can sleep). 4434 */ 4435 4436 static void 4437 sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 4438 int path_flag) 4439 { 4440 struct geom_cache pgeom; 4441 struct geom_cache *pgeom_p = &pgeom; 4442 int spc; 4443 unsigned short nhead; 4444 unsigned short nsect; 4445 4446 ASSERT(un != NULL); 4447 ASSERT(mutex_owned(SD_MUTEX(un))); 4448 4449 /* 4450 * Ask the controller for its logical geometry. 4451 * Note: if the HBA does not support scsi_ifgetcap("geometry"), 4452 * then the lgeom cache will be invalid. 4453 */ 4454 sd_get_virtual_geometry(un, capacity, lbasize); 4455 4456 /* 4457 * Initialize the pgeom cache from lgeom, so that if MODE SENSE 4458 * doesn't work, DKIOCG_PHYSGEOM can return reasonable values. 4459 */ 4460 if (un->un_lgeom.g_nsect == 0 || un->un_lgeom.g_nhead == 0) { 4461 /* 4462 * Note: Perhaps this needs to be more adaptive? The rationale 4463 * is that, if there's no HBA geometry from the HBA driver, any 4464 * guess is good, since this is the physical geometry. If MODE 4465 * SENSE fails this gives a max cylinder size for non-LBA access 4466 */ 4467 nhead = 255; 4468 nsect = 63; 4469 } else { 4470 nhead = un->un_lgeom.g_nhead; 4471 nsect = un->un_lgeom.g_nsect; 4472 } 4473 4474 if (ISCD(un)) { 4475 pgeom_p->g_nhead = 1; 4476 pgeom_p->g_nsect = nsect * nhead; 4477 } else { 4478 pgeom_p->g_nhead = nhead; 4479 pgeom_p->g_nsect = nsect; 4480 } 4481 4482 spc = pgeom_p->g_nhead * pgeom_p->g_nsect; 4483 pgeom_p->g_capacity = capacity; 4484 pgeom_p->g_ncyl = pgeom_p->g_capacity / spc; 4485 pgeom_p->g_acyl = 0; 4486 4487 /* 4488 * Retrieve fresh geometry data from the hardware, stash it 4489 * here temporarily before we rebuild the incore label. 4490 * 4491 * We want to use the MODE SENSE commands to derive the 4492 * physical geometry of the device, but if either command 4493 * fails, the logical geometry is used as the fallback for 4494 * disk label geometry. 4495 */ 4496 mutex_exit(SD_MUTEX(un)); 4497 sd_get_physical_geometry(un, pgeom_p, capacity, lbasize, path_flag); 4498 mutex_enter(SD_MUTEX(un)); 4499 4500 /* 4501 * Now update the real copy while holding the mutex. This 4502 * way the global copy is never in an inconsistent state. 4503 */ 4504 bcopy(pgeom_p, &un->un_pgeom, sizeof (un->un_pgeom)); 4505 4506 SD_INFO(SD_LOG_COMMON, un, "sd_resync_geom_caches: " 4507 "(cached from lgeom)\n"); 4508 SD_INFO(SD_LOG_COMMON, un, 4509 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4510 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4511 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4512 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4513 "intrlv: %d; rpm: %d\n", un->un_pgeom.g_secsize, 4514 un->un_pgeom.g_capacity, un->un_pgeom.g_intrlv, 4515 un->un_pgeom.g_rpm); 4516 } 4517 4518 4519 /* 4520 * Function: sd_read_fdisk 4521 * 4522 * Description: utility routine to read the fdisk table. 4523 * 4524 * Arguments: un - driver soft state (unit) structure 4525 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4526 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4527 * to use the USCSI "direct" chain and bypass the normal 4528 * command waitq. 4529 * 4530 * Return Code: SD_CMD_SUCCESS 4531 * SD_CMD_FAILURE 4532 * 4533 * Context: Kernel thread only (can sleep). 4534 */ 4535 /* ARGSUSED */ 4536 static int 4537 sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, int path_flag) 4538 { 4539 #if defined(_NO_FDISK_PRESENT) 4540 4541 un->un_solaris_offset = 0; 4542 un->un_solaris_size = capacity; 4543 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4544 return (SD_CMD_SUCCESS); 4545 4546 #elif defined(_FIRMWARE_NEEDS_FDISK) 4547 4548 struct ipart *fdp; 4549 struct mboot *mbp; 4550 struct ipart fdisk[FD_NUMPART]; 4551 int i; 4552 char sigbuf[2]; 4553 caddr_t bufp; 4554 int uidx; 4555 int rval; 4556 int lba = 0; 4557 uint_t solaris_offset; /* offset to solaris part. */ 4558 daddr_t solaris_size; /* size of solaris partition */ 4559 uint32_t blocksize; 4560 4561 ASSERT(un != NULL); 4562 ASSERT(mutex_owned(SD_MUTEX(un))); 4563 ASSERT(un->un_f_tgt_blocksize_is_valid == TRUE); 4564 4565 blocksize = un->un_tgt_blocksize; 4566 4567 /* 4568 * Start off assuming no fdisk table 4569 */ 4570 solaris_offset = 0; 4571 solaris_size = capacity; 4572 4573 mutex_exit(SD_MUTEX(un)); 4574 bufp = kmem_zalloc(blocksize, KM_SLEEP); 4575 rval = sd_send_scsi_READ(un, bufp, blocksize, 0, path_flag); 4576 mutex_enter(SD_MUTEX(un)); 4577 4578 if (rval != 0) { 4579 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4580 "sd_read_fdisk: fdisk read err\n"); 4581 kmem_free(bufp, blocksize); 4582 return (SD_CMD_FAILURE); 4583 } 4584 4585 mbp = (struct mboot *)bufp; 4586 4587 /* 4588 * The fdisk table does not begin on a 4-byte boundary within the 4589 * master boot record, so we copy it to an aligned structure to avoid 4590 * alignment exceptions on some processors. 4591 */ 4592 bcopy(&mbp->parts[0], fdisk, sizeof (fdisk)); 4593 4594 /* 4595 * Check for lba support before verifying sig; sig might not be 4596 * there, say on a blank disk, but the max_chs mark may still 4597 * be present. 4598 * 4599 * Note: LBA support and BEFs are an x86-only concept but this 4600 * code should work OK on SPARC as well. 4601 */ 4602 4603 /* 4604 * First, check for lba-access-ok on root node (or prom root node) 4605 * if present there, don't need to search fdisk table. 4606 */ 4607 if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0, 4608 "lba-access-ok", 0) != 0) { 4609 /* All drives do LBA; don't search fdisk table */ 4610 lba = 1; 4611 } else { 4612 /* Okay, look for mark in fdisk table */ 4613 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4614 /* accumulate "lba" value from all partitions */ 4615 lba = (lba || sd_has_max_chs_vals(fdp)); 4616 } 4617 } 4618 4619 /* 4620 * Next, look for 'no-bef-lba-access' prop on parent. 4621 * Its presence means the realmode driver doesn't support 4622 * LBA, so the target driver shouldn't advertise it as ok. 4623 * This should be a temporary condition; one day all 4624 * BEFs should support the LBA access functions. 4625 */ 4626 if ((lba != 0) && (ddi_getprop(DDI_DEV_T_ANY, 4627 ddi_get_parent(SD_DEVINFO(un)), DDI_PROP_DONTPASS, 4628 "no-bef-lba-access", 0) != 0)) { 4629 /* BEF doesn't support LBA; don't advertise it as ok */ 4630 lba = 0; 4631 } 4632 4633 if (lba != 0) { 4634 dev_t dev = sd_make_device(SD_DEVINFO(un)); 4635 4636 if (ddi_getprop(dev, SD_DEVINFO(un), DDI_PROP_DONTPASS, 4637 "lba-access-ok", 0) == 0) { 4638 /* not found; create it */ 4639 if (ddi_prop_create(dev, SD_DEVINFO(un), 0, 4640 "lba-access-ok", (caddr_t)NULL, 0) != 4641 DDI_PROP_SUCCESS) { 4642 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4643 "sd_read_fdisk: Can't create lba property " 4644 "for instance %d\n", 4645 ddi_get_instance(SD_DEVINFO(un))); 4646 } 4647 } 4648 } 4649 4650 bcopy(&mbp->signature, sigbuf, sizeof (sigbuf)); 4651 4652 /* 4653 * Endian-independent signature check 4654 */ 4655 if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) || 4656 (sigbuf[0] != (MBB_MAGIC & 0xFF))) { 4657 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4658 "sd_read_fdisk: no fdisk\n"); 4659 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4660 rval = SD_CMD_SUCCESS; 4661 goto done; 4662 } 4663 4664 #ifdef SDDEBUG 4665 if (sd_level_mask & SD_LOGMASK_INFO) { 4666 fdp = fdisk; 4667 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_read_fdisk:\n"); 4668 SD_INFO(SD_LOG_ATTACH_DETACH, un, " relsect " 4669 "numsect sysid bootid\n"); 4670 for (i = 0; i < FD_NUMPART; i++, fdp++) { 4671 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4672 " %d: %8d %8d 0x%08x 0x%08x\n", 4673 i, fdp->relsect, fdp->numsect, 4674 fdp->systid, fdp->bootid); 4675 } 4676 } 4677 #endif 4678 4679 /* 4680 * Try to find the unix partition 4681 */ 4682 uidx = -1; 4683 solaris_offset = 0; 4684 solaris_size = 0; 4685 4686 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4687 int relsect; 4688 int numsect; 4689 4690 if (fdp->numsect == 0) { 4691 un->un_fmap[i].fmap_start = 0; 4692 un->un_fmap[i].fmap_nblk = 0; 4693 continue; 4694 } 4695 4696 /* 4697 * Data in the fdisk table is little-endian. 4698 */ 4699 relsect = LE_32(fdp->relsect); 4700 numsect = LE_32(fdp->numsect); 4701 4702 un->un_fmap[i].fmap_start = relsect; 4703 un->un_fmap[i].fmap_nblk = numsect; 4704 4705 if (fdp->systid != SUNIXOS && 4706 fdp->systid != SUNIXOS2 && 4707 fdp->systid != EFI_PMBR) { 4708 continue; 4709 } 4710 4711 /* 4712 * use the last active solaris partition id found 4713 * (there should only be 1 active partition id) 4714 * 4715 * if there are no active solaris partition id 4716 * then use the first inactive solaris partition id 4717 */ 4718 if ((uidx == -1) || (fdp->bootid == ACTIVE)) { 4719 uidx = i; 4720 solaris_offset = relsect; 4721 solaris_size = numsect; 4722 } 4723 } 4724 4725 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk 0x%x 0x%lx", 4726 un->un_solaris_offset, un->un_solaris_size); 4727 4728 rval = SD_CMD_SUCCESS; 4729 4730 done: 4731 4732 /* 4733 * Clear the VTOC info, only if the Solaris partition entry 4734 * has moved, changed size, been deleted, or if the size of 4735 * the partition is too small to even fit the label sector. 4736 */ 4737 if ((un->un_solaris_offset != solaris_offset) || 4738 (un->un_solaris_size != solaris_size) || 4739 solaris_size <= DK_LABEL_LOC) { 4740 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk moved 0x%x 0x%lx", 4741 solaris_offset, solaris_size); 4742 bzero(&un->un_g, sizeof (struct dk_geom)); 4743 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 4744 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 4745 un->un_f_geometry_is_valid = FALSE; 4746 } 4747 un->un_solaris_offset = solaris_offset; 4748 un->un_solaris_size = solaris_size; 4749 kmem_free(bufp, blocksize); 4750 return (rval); 4751 4752 #else /* #elif defined(_FIRMWARE_NEEDS_FDISK) */ 4753 #error "fdisk table presence undetermined for this platform." 4754 #endif /* #if defined(_NO_FDISK_PRESENT) */ 4755 } 4756 4757 4758 /* 4759 * Function: sd_get_physical_geometry 4760 * 4761 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4762 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4763 * target, and use this information to initialize the physical 4764 * geometry cache specified by pgeom_p. 4765 * 4766 * MODE SENSE is an optional command, so failure in this case 4767 * does not necessarily denote an error. We want to use the 4768 * MODE SENSE commands to derive the physical geometry of the 4769 * device, but if either command fails, the logical geometry is 4770 * used as the fallback for disk label geometry. 4771 * 4772 * This requires that un->un_blockcount and un->un_tgt_blocksize 4773 * have already been initialized for the current target and 4774 * that the current values be passed as args so that we don't 4775 * end up ever trying to use -1 as a valid value. This could 4776 * happen if either value is reset while we're not holding 4777 * the mutex. 4778 * 4779 * Arguments: un - driver soft state (unit) structure 4780 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4781 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4782 * to use the USCSI "direct" chain and bypass the normal 4783 * command waitq. 4784 * 4785 * Context: Kernel thread only (can sleep). 4786 */ 4787 4788 static void 4789 sd_get_physical_geometry(struct sd_lun *un, struct geom_cache *pgeom_p, 4790 int capacity, int lbasize, int path_flag) 4791 { 4792 struct mode_format *page3p; 4793 struct mode_geometry *page4p; 4794 struct mode_header *headerp; 4795 int sector_size; 4796 int nsect; 4797 int nhead; 4798 int ncyl; 4799 int intrlv; 4800 int spc; 4801 int modesense_capacity; 4802 int rpm; 4803 int bd_len; 4804 int mode_header_length; 4805 uchar_t *p3bufp; 4806 uchar_t *p4bufp; 4807 int cdbsize; 4808 4809 ASSERT(un != NULL); 4810 ASSERT(!(mutex_owned(SD_MUTEX(un)))); 4811 4812 if (un->un_f_blockcount_is_valid != TRUE) { 4813 return; 4814 } 4815 4816 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 4817 return; 4818 } 4819 4820 if (lbasize == 0) { 4821 if (ISCD(un)) { 4822 lbasize = 2048; 4823 } else { 4824 lbasize = un->un_sys_blocksize; 4825 } 4826 } 4827 pgeom_p->g_secsize = (unsigned short)lbasize; 4828 4829 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4830 4831 /* 4832 * Retrieve MODE SENSE page 3 - Format Device Page 4833 */ 4834 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4835 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4836 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4837 != 0) { 4838 SD_ERROR(SD_LOG_COMMON, un, 4839 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4840 goto page3_exit; 4841 } 4842 4843 /* 4844 * Determine size of Block Descriptors in order to locate the mode 4845 * page data. ATAPI devices return 0, SCSI devices should return 4846 * MODE_BLK_DESC_LENGTH. 4847 */ 4848 headerp = (struct mode_header *)p3bufp; 4849 if (un->un_f_cfg_is_atapi == TRUE) { 4850 struct mode_header_grp2 *mhp = 4851 (struct mode_header_grp2 *)headerp; 4852 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4853 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4854 } else { 4855 mode_header_length = MODE_HEADER_LENGTH; 4856 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4857 } 4858 4859 if (bd_len > MODE_BLK_DESC_LENGTH) { 4860 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4861 "received unexpected bd_len of %d, page3\n", bd_len); 4862 goto page3_exit; 4863 } 4864 4865 page3p = (struct mode_format *) 4866 ((caddr_t)headerp + mode_header_length + bd_len); 4867 4868 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4869 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4870 "mode sense pg3 code mismatch %d\n", 4871 page3p->mode_page.code); 4872 goto page3_exit; 4873 } 4874 4875 /* 4876 * Use this physical geometry data only if BOTH MODE SENSE commands 4877 * complete successfully; otherwise, revert to the logical geometry. 4878 * So, we need to save everything in temporary variables. 4879 */ 4880 sector_size = BE_16(page3p->data_bytes_sect); 4881 4882 /* 4883 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4884 */ 4885 if (sector_size == 0) { 4886 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4887 } else { 4888 sector_size &= ~(un->un_sys_blocksize - 1); 4889 } 4890 4891 nsect = BE_16(page3p->sect_track); 4892 intrlv = BE_16(page3p->interleave); 4893 4894 SD_INFO(SD_LOG_COMMON, un, 4895 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4896 SD_INFO(SD_LOG_COMMON, un, 4897 " mode page: %d; nsect: %d; sector size: %d;\n", 4898 page3p->mode_page.code, nsect, sector_size); 4899 SD_INFO(SD_LOG_COMMON, un, 4900 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4901 BE_16(page3p->track_skew), 4902 BE_16(page3p->cylinder_skew)); 4903 4904 4905 /* 4906 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4907 */ 4908 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4909 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4910 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4911 != 0) { 4912 SD_ERROR(SD_LOG_COMMON, un, 4913 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4914 goto page4_exit; 4915 } 4916 4917 /* 4918 * Determine size of Block Descriptors in order to locate the mode 4919 * page data. ATAPI devices return 0, SCSI devices should return 4920 * MODE_BLK_DESC_LENGTH. 4921 */ 4922 headerp = (struct mode_header *)p4bufp; 4923 if (un->un_f_cfg_is_atapi == TRUE) { 4924 struct mode_header_grp2 *mhp = 4925 (struct mode_header_grp2 *)headerp; 4926 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4927 } else { 4928 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4929 } 4930 4931 if (bd_len > MODE_BLK_DESC_LENGTH) { 4932 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4933 "received unexpected bd_len of %d, page4\n", bd_len); 4934 goto page4_exit; 4935 } 4936 4937 page4p = (struct mode_geometry *) 4938 ((caddr_t)headerp + mode_header_length + bd_len); 4939 4940 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4941 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4942 "mode sense pg4 code mismatch %d\n", 4943 page4p->mode_page.code); 4944 goto page4_exit; 4945 } 4946 4947 /* 4948 * Stash the data now, after we know that both commands completed. 4949 */ 4950 4951 mutex_enter(SD_MUTEX(un)); 4952 4953 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4954 spc = nhead * nsect; 4955 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4956 rpm = BE_16(page4p->rpm); 4957 4958 modesense_capacity = spc * ncyl; 4959 4960 SD_INFO(SD_LOG_COMMON, un, 4961 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4962 SD_INFO(SD_LOG_COMMON, un, 4963 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4964 SD_INFO(SD_LOG_COMMON, un, 4965 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4966 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4967 (void *)pgeom_p, capacity); 4968 4969 /* 4970 * Compensate if the drive's geometry is not rectangular, i.e., 4971 * the product of C * H * S returned by MODE SENSE >= that returned 4972 * by read capacity. This is an idiosyncrasy of the original x86 4973 * disk subsystem. 4974 */ 4975 if (modesense_capacity >= capacity) { 4976 SD_INFO(SD_LOG_COMMON, un, 4977 "sd_get_physical_geometry: adjusting acyl; " 4978 "old: %d; new: %d\n", pgeom_p->g_acyl, 4979 (modesense_capacity - capacity + spc - 1) / spc); 4980 if (sector_size != 0) { 4981 /* 1243403: NEC D38x7 drives don't support sec size */ 4982 pgeom_p->g_secsize = (unsigned short)sector_size; 4983 } 4984 pgeom_p->g_nsect = (unsigned short)nsect; 4985 pgeom_p->g_nhead = (unsigned short)nhead; 4986 pgeom_p->g_capacity = capacity; 4987 pgeom_p->g_acyl = 4988 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4989 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4990 } 4991 4992 pgeom_p->g_rpm = (unsigned short)rpm; 4993 pgeom_p->g_intrlv = (unsigned short)intrlv; 4994 4995 SD_INFO(SD_LOG_COMMON, un, 4996 "sd_get_physical_geometry: mode sense geometry:\n"); 4997 SD_INFO(SD_LOG_COMMON, un, 4998 " nsect: %d; sector size: %d; interlv: %d\n", 4999 nsect, sector_size, intrlv); 5000 SD_INFO(SD_LOG_COMMON, un, 5001 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 5002 nhead, ncyl, rpm, modesense_capacity); 5003 SD_INFO(SD_LOG_COMMON, un, 5004 "sd_get_physical_geometry: (cached)\n"); 5005 SD_INFO(SD_LOG_COMMON, un, 5006 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5007 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 5008 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 5009 SD_INFO(SD_LOG_COMMON, un, 5010 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 5011 un->un_pgeom.g_secsize, un->un_pgeom.g_capacity, 5012 un->un_pgeom.g_intrlv, un->un_pgeom.g_rpm); 5013 5014 mutex_exit(SD_MUTEX(un)); 5015 5016 page4_exit: 5017 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 5018 page3_exit: 5019 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 5020 } 5021 5022 5023 /* 5024 * Function: sd_get_virtual_geometry 5025 * 5026 * Description: Ask the controller to tell us about the target device. 5027 * 5028 * Arguments: un - pointer to softstate 5029 * capacity - disk capacity in #blocks 5030 * lbasize - disk block size in bytes 5031 * 5032 * Context: Kernel thread only 5033 */ 5034 5035 static void 5036 sd_get_virtual_geometry(struct sd_lun *un, int capacity, int lbasize) 5037 { 5038 struct geom_cache *lgeom_p = &un->un_lgeom; 5039 uint_t geombuf; 5040 int spc; 5041 5042 ASSERT(un != NULL); 5043 ASSERT(mutex_owned(SD_MUTEX(un))); 5044 5045 mutex_exit(SD_MUTEX(un)); 5046 5047 /* Set sector size, and total number of sectors */ 5048 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 5049 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 5050 5051 /* Let the HBA tell us its geometry */ 5052 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 5053 5054 mutex_enter(SD_MUTEX(un)); 5055 5056 /* A value of -1 indicates an undefined "geometry" property */ 5057 if (geombuf == (-1)) { 5058 return; 5059 } 5060 5061 /* Initialize the logical geometry cache. */ 5062 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5063 lgeom_p->g_nsect = geombuf & 0xffff; 5064 lgeom_p->g_secsize = un->un_sys_blocksize; 5065 5066 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5067 5068 /* 5069 * Note: The driver originally converted the capacity value from 5070 * target blocks to system blocks. However, the capacity value passed 5071 * to this routine is already in terms of system blocks (this scaling 5072 * is done when the READ CAPACITY command is issued and processed). 5073 * This 'error' may have gone undetected because the usage of g_ncyl 5074 * (which is based upon g_capacity) is very limited within the driver 5075 */ 5076 lgeom_p->g_capacity = capacity; 5077 5078 /* 5079 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5080 * hba may return zero values if the device has been removed. 5081 */ 5082 if (spc == 0) { 5083 lgeom_p->g_ncyl = 0; 5084 } else { 5085 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5086 } 5087 lgeom_p->g_acyl = 0; 5088 5089 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5090 SD_INFO(SD_LOG_COMMON, un, 5091 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5092 un->un_lgeom.g_ncyl, un->un_lgeom.g_acyl, 5093 un->un_lgeom.g_nhead, un->un_lgeom.g_nsect); 5094 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 5095 "intrlv: %d; rpm: %d\n", un->un_lgeom.g_secsize, 5096 un->un_lgeom.g_capacity, un->un_lgeom.g_intrlv, un->un_lgeom.g_rpm); 5097 } 5098 5099 5100 /* 5101 * Function: sd_update_block_info 5102 * 5103 * Description: Calculate a byte count to sector count bitshift value 5104 * from sector size. 5105 * 5106 * Arguments: un: unit struct. 5107 * lbasize: new target sector size 5108 * capacity: new target capacity, ie. block count 5109 * 5110 * Context: Kernel thread context 5111 */ 5112 5113 static void 5114 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5115 { 5116 if (lbasize != 0) { 5117 un->un_tgt_blocksize = lbasize; 5118 un->un_f_tgt_blocksize_is_valid = TRUE; 5119 } 5120 5121 if (capacity != 0) { 5122 un->un_blockcount = capacity; 5123 un->un_f_blockcount_is_valid = TRUE; 5124 } 5125 } 5126 5127 5128 static void 5129 sd_swap_efi_gpt(efi_gpt_t *e) 5130 { 5131 _NOTE(ASSUMING_PROTECTED(*e)) 5132 e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature); 5133 e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision); 5134 e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize); 5135 e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32); 5136 e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA); 5137 e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA); 5138 e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA); 5139 e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA); 5140 UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID); 5141 e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA); 5142 e->efi_gpt_NumberOfPartitionEntries = 5143 LE_32(e->efi_gpt_NumberOfPartitionEntries); 5144 e->efi_gpt_SizeOfPartitionEntry = 5145 LE_32(e->efi_gpt_SizeOfPartitionEntry); 5146 e->efi_gpt_PartitionEntryArrayCRC32 = 5147 LE_32(e->efi_gpt_PartitionEntryArrayCRC32); 5148 } 5149 5150 static void 5151 sd_swap_efi_gpe(int nparts, efi_gpe_t *p) 5152 { 5153 int i; 5154 5155 _NOTE(ASSUMING_PROTECTED(*p)) 5156 for (i = 0; i < nparts; i++) { 5157 UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID, 5158 p[i].efi_gpe_PartitionTypeGUID); 5159 p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA); 5160 p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA); 5161 /* PartitionAttrs */ 5162 } 5163 } 5164 5165 static int 5166 sd_validate_efi(efi_gpt_t *labp) 5167 { 5168 if (labp->efi_gpt_Signature != EFI_SIGNATURE) 5169 return (EINVAL); 5170 /* at least 96 bytes in this version of the spec. */ 5171 if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) > 5172 labp->efi_gpt_HeaderSize) 5173 return (EINVAL); 5174 /* this should be 128 bytes */ 5175 if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t)) 5176 return (EINVAL); 5177 return (0); 5178 } 5179 5180 static int 5181 sd_use_efi(struct sd_lun *un, int path_flag) 5182 { 5183 int i; 5184 int rval = 0; 5185 efi_gpe_t *partitions; 5186 uchar_t *buf; 5187 uint_t lbasize; 5188 uint64_t cap; 5189 uint_t nparts; 5190 diskaddr_t gpe_lba; 5191 5192 ASSERT(mutex_owned(SD_MUTEX(un))); 5193 lbasize = un->un_tgt_blocksize; 5194 5195 mutex_exit(SD_MUTEX(un)); 5196 5197 buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 5198 5199 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 5200 rval = EINVAL; 5201 goto done_err; 5202 } 5203 5204 rval = sd_send_scsi_READ(un, buf, lbasize, 0, path_flag); 5205 if (rval) { 5206 goto done_err; 5207 } 5208 if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) { 5209 /* not ours */ 5210 rval = ESRCH; 5211 goto done_err; 5212 } 5213 5214 rval = sd_send_scsi_READ(un, buf, lbasize, 1, path_flag); 5215 if (rval) { 5216 goto done_err; 5217 } 5218 sd_swap_efi_gpt((efi_gpt_t *)buf); 5219 5220 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) { 5221 /* 5222 * Couldn't read the primary, try the backup. Our 5223 * capacity at this point could be based on CHS, so 5224 * check what the device reports. 5225 */ 5226 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 5227 path_flag); 5228 if (rval) { 5229 goto done_err; 5230 } 5231 if ((rval = sd_send_scsi_READ(un, buf, lbasize, 5232 cap - 1, path_flag)) != 0) { 5233 goto done_err; 5234 } 5235 sd_swap_efi_gpt((efi_gpt_t *)buf); 5236 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) 5237 goto done_err; 5238 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5239 "primary label corrupt; using backup\n"); 5240 } 5241 5242 nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries; 5243 gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA; 5244 5245 rval = sd_send_scsi_READ(un, buf, EFI_MIN_ARRAY_SIZE, gpe_lba, 5246 path_flag); 5247 if (rval) { 5248 goto done_err; 5249 } 5250 partitions = (efi_gpe_t *)buf; 5251 5252 if (nparts > MAXPART) { 5253 nparts = MAXPART; 5254 } 5255 sd_swap_efi_gpe(nparts, partitions); 5256 5257 mutex_enter(SD_MUTEX(un)); 5258 5259 /* Fill in partition table. */ 5260 for (i = 0; i < nparts; i++) { 5261 if (partitions->efi_gpe_StartingLBA != 0 || 5262 partitions->efi_gpe_EndingLBA != 0) { 5263 un->un_map[i].dkl_cylno = 5264 partitions->efi_gpe_StartingLBA; 5265 un->un_map[i].dkl_nblk = 5266 partitions->efi_gpe_EndingLBA - 5267 partitions->efi_gpe_StartingLBA + 1; 5268 un->un_offset[i] = 5269 partitions->efi_gpe_StartingLBA; 5270 } 5271 if (i == WD_NODE) { 5272 /* 5273 * minor number 7 corresponds to the whole disk 5274 */ 5275 un->un_map[i].dkl_cylno = 0; 5276 un->un_map[i].dkl_nblk = un->un_blockcount; 5277 un->un_offset[i] = 0; 5278 } 5279 partitions++; 5280 } 5281 un->un_solaris_offset = 0; 5282 un->un_solaris_size = cap; 5283 un->un_f_geometry_is_valid = TRUE; 5284 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5285 return (0); 5286 5287 done_err: 5288 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5289 mutex_enter(SD_MUTEX(un)); 5290 /* 5291 * if we didn't find something that could look like a VTOC 5292 * and the disk is over 1TB, we know there isn't a valid label. 5293 * Otherwise let sd_uselabel decide what to do. We only 5294 * want to invalidate this if we're certain the label isn't 5295 * valid because sd_prop_op will now fail, which in turn 5296 * causes things like opens and stats on the partition to fail. 5297 */ 5298 if ((un->un_blockcount > DK_MAX_BLOCKS) && (rval != ESRCH)) { 5299 un->un_f_geometry_is_valid = FALSE; 5300 } 5301 return (rval); 5302 } 5303 5304 5305 /* 5306 * Function: sd_uselabel 5307 * 5308 * Description: Validate the disk label and update the relevant data (geometry, 5309 * partition, vtoc, and capacity data) in the sd_lun struct. 5310 * Marks the geometry of the unit as being valid. 5311 * 5312 * Arguments: un: unit struct. 5313 * dk_label: disk label 5314 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 5315 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 5316 * to use the USCSI "direct" chain and bypass the normal 5317 * command waitq. 5318 * 5319 * Return Code: SD_LABEL_IS_VALID: Label read from disk is OK; geometry, 5320 * partition, vtoc, and capacity data are good. 5321 * 5322 * SD_LABEL_IS_INVALID: Magic number or checksum error in the 5323 * label; or computed capacity does not jibe with capacity 5324 * reported from the READ CAPACITY command. 5325 * 5326 * Context: Kernel thread only (can sleep). 5327 */ 5328 5329 static int 5330 sd_uselabel(struct sd_lun *un, struct dk_label *labp, int path_flag) 5331 { 5332 short *sp; 5333 short sum; 5334 short count; 5335 int label_error = SD_LABEL_IS_VALID; 5336 int i; 5337 int capacity; 5338 int part_end; 5339 int track_capacity; 5340 int err; 5341 #if defined(_SUNOS_VTOC_16) 5342 struct dkl_partition *vpartp; 5343 #endif 5344 ASSERT(un != NULL); 5345 ASSERT(mutex_owned(SD_MUTEX(un))); 5346 5347 /* Validate the magic number of the label. */ 5348 if (labp->dkl_magic != DKL_MAGIC) { 5349 #if defined(__sparc) 5350 if ((un->un_state == SD_STATE_NORMAL) && 5351 !ISREMOVABLE(un)) { 5352 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5353 "Corrupt label; wrong magic number\n"); 5354 } 5355 #endif 5356 return (SD_LABEL_IS_INVALID); 5357 } 5358 5359 /* Validate the checksum of the label. */ 5360 sp = (short *)labp; 5361 sum = 0; 5362 count = sizeof (struct dk_label) / sizeof (short); 5363 while (count--) { 5364 sum ^= *sp++; 5365 } 5366 5367 if (sum != 0) { 5368 #if defined(_SUNOS_VTOC_16) 5369 if (un->un_state == SD_STATE_NORMAL && !ISCD(un)) { 5370 #elif defined(_SUNOS_VTOC_8) 5371 if (un->un_state == SD_STATE_NORMAL && !ISREMOVABLE(un)) { 5372 #endif 5373 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5374 "Corrupt label - label checksum failed\n"); 5375 } 5376 return (SD_LABEL_IS_INVALID); 5377 } 5378 5379 5380 /* 5381 * Fill in geometry structure with data from label. 5382 */ 5383 bzero(&un->un_g, sizeof (struct dk_geom)); 5384 un->un_g.dkg_ncyl = labp->dkl_ncyl; 5385 un->un_g.dkg_acyl = labp->dkl_acyl; 5386 un->un_g.dkg_bcyl = 0; 5387 un->un_g.dkg_nhead = labp->dkl_nhead; 5388 un->un_g.dkg_nsect = labp->dkl_nsect; 5389 un->un_g.dkg_intrlv = labp->dkl_intrlv; 5390 5391 #if defined(_SUNOS_VTOC_8) 5392 un->un_g.dkg_gap1 = labp->dkl_gap1; 5393 un->un_g.dkg_gap2 = labp->dkl_gap2; 5394 un->un_g.dkg_bhead = labp->dkl_bhead; 5395 #endif 5396 #if defined(_SUNOS_VTOC_16) 5397 un->un_dkg_skew = labp->dkl_skew; 5398 #endif 5399 5400 #if defined(__i386) || defined(__amd64) 5401 un->un_g.dkg_apc = labp->dkl_apc; 5402 #endif 5403 5404 /* 5405 * Currently we rely on the values in the label being accurate. If 5406 * dlk_rpm or dlk_pcly are zero in the label, use a default value. 5407 * 5408 * Note: In the future a MODE SENSE may be used to retrieve this data, 5409 * although this command is optional in SCSI-2. 5410 */ 5411 un->un_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600; 5412 un->un_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl : 5413 (un->un_g.dkg_ncyl + un->un_g.dkg_acyl); 5414 5415 /* 5416 * The Read and Write reinstruct values may not be valid 5417 * for older disks. 5418 */ 5419 un->un_g.dkg_read_reinstruct = labp->dkl_read_reinstruct; 5420 un->un_g.dkg_write_reinstruct = labp->dkl_write_reinstruct; 5421 5422 /* Fill in partition table. */ 5423 #if defined(_SUNOS_VTOC_8) 5424 for (i = 0; i < NDKMAP; i++) { 5425 un->un_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno; 5426 un->un_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk; 5427 } 5428 #endif 5429 #if defined(_SUNOS_VTOC_16) 5430 vpartp = labp->dkl_vtoc.v_part; 5431 track_capacity = labp->dkl_nhead * labp->dkl_nsect; 5432 5433 for (i = 0; i < NDKMAP; i++, vpartp++) { 5434 un->un_map[i].dkl_cylno = vpartp->p_start / track_capacity; 5435 un->un_map[i].dkl_nblk = vpartp->p_size; 5436 } 5437 #endif 5438 5439 /* Fill in VTOC Structure. */ 5440 bcopy(&labp->dkl_vtoc, &un->un_vtoc, sizeof (struct dk_vtoc)); 5441 #if defined(_SUNOS_VTOC_8) 5442 /* 5443 * The 8-slice vtoc does not include the ascii label; save it into 5444 * the device's soft state structure here. 5445 */ 5446 bcopy(labp->dkl_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 5447 #endif 5448 5449 /* Mark the geometry as valid. */ 5450 un->un_f_geometry_is_valid = TRUE; 5451 5452 /* Now look for a valid capacity. */ 5453 track_capacity = (un->un_g.dkg_nhead * un->un_g.dkg_nsect); 5454 capacity = (un->un_g.dkg_ncyl * track_capacity); 5455 5456 if (un->un_g.dkg_acyl) { 5457 #if defined(__i386) || defined(__amd64) 5458 /* we may have > 1 alts cylinder */ 5459 capacity += (track_capacity * un->un_g.dkg_acyl); 5460 #else 5461 capacity += track_capacity; 5462 #endif 5463 } 5464 5465 /* 5466 * At this point, un->un_blockcount should contain valid data from 5467 * the READ CAPACITY command. 5468 */ 5469 if (un->un_f_blockcount_is_valid != TRUE) { 5470 /* 5471 * We have a situation where the target didn't give us a good 5472 * READ CAPACITY value, yet there appears to be a valid label. 5473 * In this case, we'll fake the capacity. 5474 */ 5475 un->un_blockcount = capacity; 5476 un->un_f_blockcount_is_valid = TRUE; 5477 goto done; 5478 } 5479 5480 5481 if ((capacity <= un->un_blockcount) || 5482 (un->un_state != SD_STATE_NORMAL)) { 5483 #if defined(_SUNOS_VTOC_8) 5484 /* 5485 * We can't let this happen on drives that are subdivided 5486 * into logical disks (i.e., that have an fdisk table). 5487 * The un_blockcount field should always hold the full media 5488 * size in sectors, period. This code would overwrite 5489 * un_blockcount with the size of the Solaris fdisk partition. 5490 */ 5491 SD_ERROR(SD_LOG_COMMON, un, 5492 "sd_uselabel: Label %d blocks; Drive %d blocks\n", 5493 capacity, un->un_blockcount); 5494 un->un_blockcount = capacity; 5495 un->un_f_blockcount_is_valid = TRUE; 5496 #endif /* defined(_SUNOS_VTOC_8) */ 5497 goto done; 5498 } 5499 5500 if (ISCD(un)) { 5501 /* For CDROMs, we trust that the data in the label is OK. */ 5502 #if defined(_SUNOS_VTOC_8) 5503 for (i = 0; i < NDKMAP; i++) { 5504 part_end = labp->dkl_nhead * labp->dkl_nsect * 5505 labp->dkl_map[i].dkl_cylno + 5506 labp->dkl_map[i].dkl_nblk - 1; 5507 5508 if ((labp->dkl_map[i].dkl_nblk) && 5509 (part_end > un->un_blockcount)) { 5510 un->un_f_geometry_is_valid = FALSE; 5511 break; 5512 } 5513 } 5514 #endif 5515 #if defined(_SUNOS_VTOC_16) 5516 vpartp = &(labp->dkl_vtoc.v_part[0]); 5517 for (i = 0; i < NDKMAP; i++, vpartp++) { 5518 part_end = vpartp->p_start + vpartp->p_size; 5519 if ((vpartp->p_size > 0) && 5520 (part_end > un->un_blockcount)) { 5521 un->un_f_geometry_is_valid = FALSE; 5522 break; 5523 } 5524 } 5525 #endif 5526 } else { 5527 uint64_t t_capacity; 5528 uint32_t t_lbasize; 5529 5530 mutex_exit(SD_MUTEX(un)); 5531 err = sd_send_scsi_READ_CAPACITY(un, &t_capacity, &t_lbasize, 5532 path_flag); 5533 ASSERT(t_capacity <= DK_MAX_BLOCKS); 5534 mutex_enter(SD_MUTEX(un)); 5535 5536 if (err == 0) { 5537 sd_update_block_info(un, t_lbasize, t_capacity); 5538 } 5539 5540 if (capacity > un->un_blockcount) { 5541 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5542 "Corrupt label - bad geometry\n"); 5543 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 5544 "Label says %u blocks; Drive says %llu blocks\n", 5545 capacity, (unsigned long long)un->un_blockcount); 5546 un->un_f_geometry_is_valid = FALSE; 5547 label_error = SD_LABEL_IS_INVALID; 5548 } 5549 } 5550 5551 done: 5552 5553 SD_INFO(SD_LOG_COMMON, un, "sd_uselabel: (label geometry)\n"); 5554 SD_INFO(SD_LOG_COMMON, un, 5555 " ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n", 5556 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5557 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5558 SD_INFO(SD_LOG_COMMON, un, 5559 " lbasize: %d; capacity: %d; intrlv: %d; rpm: %d\n", 5560 un->un_tgt_blocksize, un->un_blockcount, 5561 un->un_g.dkg_intrlv, un->un_g.dkg_rpm); 5562 SD_INFO(SD_LOG_COMMON, un, " wrt_reinstr: %d; rd_reinstr: %d\n", 5563 un->un_g.dkg_write_reinstruct, un->un_g.dkg_read_reinstruct); 5564 5565 ASSERT(mutex_owned(SD_MUTEX(un))); 5566 5567 return (label_error); 5568 } 5569 5570 5571 /* 5572 * Function: sd_build_default_label 5573 * 5574 * Description: Generate a default label for those devices that do not have 5575 * one, e.g., new media, removable cartridges, etc.. 5576 * 5577 * Context: Kernel thread only 5578 */ 5579 5580 static void 5581 sd_build_default_label(struct sd_lun *un) 5582 { 5583 #if defined(_SUNOS_VTOC_16) 5584 uint_t phys_spc; 5585 uint_t disksize; 5586 struct dk_geom un_g; 5587 #endif 5588 5589 ASSERT(un != NULL); 5590 ASSERT(mutex_owned(SD_MUTEX(un))); 5591 5592 #if defined(_SUNOS_VTOC_8) 5593 /* 5594 * Note: This is a legacy check for non-removable devices on VTOC_8 5595 * only. This may be a valid check for VTOC_16 as well. 5596 */ 5597 if (!ISREMOVABLE(un)) { 5598 return; 5599 } 5600 #endif 5601 5602 bzero(&un->un_g, sizeof (struct dk_geom)); 5603 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 5604 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 5605 5606 #if defined(_SUNOS_VTOC_8) 5607 5608 /* 5609 * It's a REMOVABLE media, therefore no label (on sparc, anyway). 5610 * But it is still necessary to set up various geometry information, 5611 * and we are doing this here. 5612 */ 5613 5614 /* 5615 * For the rpm, we use the minimum for the disk. For the head, cyl, 5616 * and number of sector per track, if the capacity <= 1GB, head = 64, 5617 * sect = 32. else head = 255, sect 63 Note: the capacity should be 5618 * equal to C*H*S values. This will cause some truncation of size due 5619 * to round off errors. For CD-ROMs, this truncation can have adverse 5620 * side effects, so returning ncyl and nhead as 1. The nsect will 5621 * overflow for most of CD-ROMs as nsect is of type ushort. (4190569) 5622 */ 5623 if (ISCD(un)) { 5624 /* 5625 * Preserve the old behavior for non-writable 5626 * medias. Since dkg_nsect is a ushort, it 5627 * will lose bits as cdroms have more than 5628 * 65536 sectors. So if we recalculate 5629 * capacity, it will become much shorter. 5630 * But the dkg_* information is not 5631 * used for CDROMs so it is OK. But for 5632 * Writable CDs we need this information 5633 * to be valid (for newfs say). So we 5634 * make nsect and nhead > 1 that way 5635 * nsect can still stay within ushort limit 5636 * without losing any bits. 5637 */ 5638 if (un->un_f_mmc_writable_media == TRUE) { 5639 un->un_g.dkg_nhead = 64; 5640 un->un_g.dkg_nsect = 32; 5641 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5642 un->un_blockcount = un->un_g.dkg_ncyl * 5643 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5644 } else { 5645 un->un_g.dkg_ncyl = 1; 5646 un->un_g.dkg_nhead = 1; 5647 un->un_g.dkg_nsect = un->un_blockcount; 5648 } 5649 } else { 5650 if (un->un_blockcount <= 0x1000) { 5651 /* unlabeled SCSI floppy device */ 5652 un->un_g.dkg_nhead = 2; 5653 un->un_g.dkg_ncyl = 80; 5654 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 5655 } else if (un->un_blockcount <= 0x200000) { 5656 un->un_g.dkg_nhead = 64; 5657 un->un_g.dkg_nsect = 32; 5658 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5659 } else { 5660 un->un_g.dkg_nhead = 255; 5661 un->un_g.dkg_nsect = 63; 5662 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 5663 } 5664 un->un_blockcount = 5665 un->un_g.dkg_ncyl * un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5666 } 5667 5668 un->un_g.dkg_acyl = 0; 5669 un->un_g.dkg_bcyl = 0; 5670 un->un_g.dkg_rpm = 200; 5671 un->un_asciilabel[0] = '\0'; 5672 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl; 5673 5674 un->un_map[0].dkl_cylno = 0; 5675 un->un_map[0].dkl_nblk = un->un_blockcount; 5676 un->un_map[2].dkl_cylno = 0; 5677 un->un_map[2].dkl_nblk = un->un_blockcount; 5678 5679 #elif defined(_SUNOS_VTOC_16) 5680 5681 if (un->un_solaris_size == 0) { 5682 /* 5683 * Got fdisk table but no solaris entry therefore 5684 * don't create a default label 5685 */ 5686 un->un_f_geometry_is_valid = TRUE; 5687 return; 5688 } 5689 5690 /* 5691 * For CDs we continue to use the physical geometry to calculate 5692 * number of cylinders. All other devices must convert the 5693 * physical geometry (geom_cache) to values that will fit 5694 * in a dk_geom structure. 5695 */ 5696 if (ISCD(un)) { 5697 phys_spc = un->un_pgeom.g_nhead * un->un_pgeom.g_nsect; 5698 } else { 5699 /* Convert physical geometry to disk geometry */ 5700 bzero(&un_g, sizeof (struct dk_geom)); 5701 sd_convert_geometry(un->un_blockcount, &un_g); 5702 bcopy(&un_g, &un->un_g, sizeof (un->un_g)); 5703 phys_spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5704 } 5705 5706 un->un_g.dkg_pcyl = un->un_solaris_size / phys_spc; 5707 un->un_g.dkg_acyl = DK_ACYL; 5708 un->un_g.dkg_ncyl = un->un_g.dkg_pcyl - DK_ACYL; 5709 disksize = un->un_g.dkg_ncyl * phys_spc; 5710 5711 if (ISCD(un)) { 5712 /* 5713 * CD's don't use the "heads * sectors * cyls"-type of 5714 * geometry, but instead use the entire capacity of the media. 5715 */ 5716 disksize = un->un_solaris_size; 5717 un->un_g.dkg_nhead = 1; 5718 un->un_g.dkg_nsect = 1; 5719 un->un_g.dkg_rpm = 5720 (un->un_pgeom.g_rpm == 0) ? 200 : un->un_pgeom.g_rpm; 5721 5722 un->un_vtoc.v_part[0].p_start = 0; 5723 un->un_vtoc.v_part[0].p_size = disksize; 5724 un->un_vtoc.v_part[0].p_tag = V_BACKUP; 5725 un->un_vtoc.v_part[0].p_flag = V_UNMNT; 5726 5727 un->un_map[0].dkl_cylno = 0; 5728 un->un_map[0].dkl_nblk = disksize; 5729 un->un_offset[0] = 0; 5730 5731 } else { 5732 /* 5733 * Hard disks and removable media cartridges 5734 */ 5735 un->un_g.dkg_rpm = 5736 (un->un_pgeom.g_rpm == 0) ? 3600: un->un_pgeom.g_rpm; 5737 un->un_vtoc.v_sectorsz = un->un_sys_blocksize; 5738 5739 /* Add boot slice */ 5740 un->un_vtoc.v_part[8].p_start = 0; 5741 un->un_vtoc.v_part[8].p_size = phys_spc; 5742 un->un_vtoc.v_part[8].p_tag = V_BOOT; 5743 un->un_vtoc.v_part[8].p_flag = V_UNMNT; 5744 5745 un->un_map[8].dkl_cylno = 0; 5746 un->un_map[8].dkl_nblk = phys_spc; 5747 un->un_offset[8] = 0; 5748 } 5749 5750 un->un_g.dkg_apc = 0; 5751 un->un_vtoc.v_nparts = V_NUMPAR; 5752 un->un_vtoc.v_version = V_VERSION; 5753 5754 /* Add backup slice */ 5755 un->un_vtoc.v_part[2].p_start = 0; 5756 un->un_vtoc.v_part[2].p_size = disksize; 5757 un->un_vtoc.v_part[2].p_tag = V_BACKUP; 5758 un->un_vtoc.v_part[2].p_flag = V_UNMNT; 5759 5760 un->un_map[2].dkl_cylno = 0; 5761 un->un_map[2].dkl_nblk = disksize; 5762 un->un_offset[2] = 0; 5763 5764 (void) sprintf(un->un_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d" 5765 " hd %d sec %d", un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5766 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5767 5768 #else 5769 #error "No VTOC format defined." 5770 #endif 5771 5772 un->un_g.dkg_read_reinstruct = 0; 5773 un->un_g.dkg_write_reinstruct = 0; 5774 5775 un->un_g.dkg_intrlv = 1; 5776 5777 un->un_vtoc.v_sanity = VTOC_SANE; 5778 5779 un->un_f_geometry_is_valid = TRUE; 5780 5781 SD_INFO(SD_LOG_COMMON, un, 5782 "sd_build_default_label: Default label created: " 5783 "cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n", 5784 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, un->un_g.dkg_nhead, 5785 un->un_g.dkg_nsect, un->un_blockcount); 5786 } 5787 5788 5789 #if defined(_FIRMWARE_NEEDS_FDISK) 5790 /* 5791 * Max CHS values, as they are encoded into bytes, for 1022/254/63 5792 */ 5793 #define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2)) 5794 #define LBA_MAX_CYL (1022 & 0xFF) 5795 #define LBA_MAX_HEAD (254) 5796 5797 5798 /* 5799 * Function: sd_has_max_chs_vals 5800 * 5801 * Description: Return TRUE if Cylinder-Head-Sector values are all at maximum. 5802 * 5803 * Arguments: fdp - ptr to CHS info 5804 * 5805 * Return Code: True or false 5806 * 5807 * Context: Any. 5808 */ 5809 5810 static int 5811 sd_has_max_chs_vals(struct ipart *fdp) 5812 { 5813 return ((fdp->begcyl == LBA_MAX_CYL) && 5814 (fdp->beghead == LBA_MAX_HEAD) && 5815 (fdp->begsect == LBA_MAX_SECT) && 5816 (fdp->endcyl == LBA_MAX_CYL) && 5817 (fdp->endhead == LBA_MAX_HEAD) && 5818 (fdp->endsect == LBA_MAX_SECT)); 5819 } 5820 #endif 5821 5822 5823 /* 5824 * Function: sd_inq_fill 5825 * 5826 * Description: Print a piece of inquiry data, cleaned up for non-printable 5827 * characters and stopping at the first space character after 5828 * the beginning of the passed string; 5829 * 5830 * Arguments: p - source string 5831 * l - maximum length to copy 5832 * s - destination string 5833 * 5834 * Context: Any. 5835 */ 5836 5837 static void 5838 sd_inq_fill(char *p, int l, char *s) 5839 { 5840 unsigned i = 0; 5841 char c; 5842 5843 while (i++ < l) { 5844 if ((c = *p++) < ' ' || c >= 0x7F) { 5845 c = '*'; 5846 } else if (i != 1 && c == ' ') { 5847 break; 5848 } 5849 *s++ = c; 5850 } 5851 *s++ = 0; 5852 } 5853 5854 5855 /* 5856 * Function: sd_register_devid 5857 * 5858 * Description: This routine will obtain the device id information from the 5859 * target, obtain the serial number, and register the device 5860 * id with the ddi framework. 5861 * 5862 * Arguments: devi - the system's dev_info_t for the device. 5863 * un - driver soft state (unit) structure 5864 * reservation_flag - indicates if a reservation conflict 5865 * occurred during attach 5866 * 5867 * Context: Kernel Thread 5868 */ 5869 static void 5870 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 5871 { 5872 int rval = 0; 5873 uchar_t *inq80 = NULL; 5874 size_t inq80_len = MAX_INQUIRY_SIZE; 5875 size_t inq80_resid = 0; 5876 uchar_t *inq83 = NULL; 5877 size_t inq83_len = MAX_INQUIRY_SIZE; 5878 size_t inq83_resid = 0; 5879 5880 ASSERT(un != NULL); 5881 ASSERT(mutex_owned(SD_MUTEX(un))); 5882 ASSERT((SD_DEVINFO(un)) == devi); 5883 5884 /* 5885 * This is the case of antiquated Sun disk drives that have the 5886 * FAB_DEVID property set in the disk_table. These drives 5887 * manage the devid's by storing them in last 2 available sectors 5888 * on the drive and have them fabricated by the ddi layer by calling 5889 * ddi_devid_init and passing the DEVID_FAB flag. 5890 */ 5891 if (un->un_f_opt_fab_devid == TRUE) { 5892 /* 5893 * Depending on EINVAL isn't reliable, since a reserved disk 5894 * may result in invalid geometry, so check to make sure a 5895 * reservation conflict did not occur during attach. 5896 */ 5897 if ((sd_get_devid(un) == EINVAL) && 5898 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5899 /* 5900 * The devid is invalid AND there is no reservation 5901 * conflict. Fabricate a new devid. 5902 */ 5903 (void) sd_create_devid(un); 5904 } 5905 5906 /* Register the devid if it exists */ 5907 if (un->un_devid != NULL) { 5908 (void) ddi_devid_register(SD_DEVINFO(un), 5909 un->un_devid); 5910 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5911 "sd_register_devid: Devid Fabricated\n"); 5912 } 5913 return; 5914 } 5915 5916 /* 5917 * We check the availibility of the World Wide Name (0x83) and Unit 5918 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5919 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5920 * 0x83 is availible, that is the best choice. Our next choice is 5921 * 0x80. If neither are availible, we munge the devid from the device 5922 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5923 * to fabricate a devid for non-Sun qualified disks. 5924 */ 5925 if (sd_check_vpd_page_support(un) == 0) { 5926 /* collect page 80 data if available */ 5927 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5928 5929 mutex_exit(SD_MUTEX(un)); 5930 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5931 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 5932 0x01, 0x80, &inq80_resid); 5933 5934 if (rval != 0) { 5935 kmem_free(inq80, inq80_len); 5936 inq80 = NULL; 5937 inq80_len = 0; 5938 } 5939 mutex_enter(SD_MUTEX(un)); 5940 } 5941 5942 /* collect page 83 data if available */ 5943 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5944 5945 mutex_exit(SD_MUTEX(un)); 5946 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5947 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 5948 0x01, 0x83, &inq83_resid); 5949 5950 if (rval != 0) { 5951 kmem_free(inq83, inq83_len); 5952 inq83 = NULL; 5953 inq83_len = 0; 5954 } 5955 mutex_enter(SD_MUTEX(un)); 5956 } 5957 } 5958 5959 /* encode best devid possible based on data available */ 5960 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5961 (char *)ddi_driver_name(SD_DEVINFO(un)), 5962 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5963 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5964 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5965 5966 /* devid successfully encoded, register devid */ 5967 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5968 5969 } else { 5970 /* 5971 * Unable to encode a devid based on data available. 5972 * This is not a Sun qualified disk. Older Sun disk 5973 * drives that have the SD_FAB_DEVID property 5974 * set in the disk_table and non Sun qualified 5975 * disks are treated in the same manner. These 5976 * drives manage the devid's by storing them in 5977 * last 2 available sectors on the drive and 5978 * have them fabricated by the ddi layer by 5979 * calling ddi_devid_init and passing the 5980 * DEVID_FAB flag. 5981 * Create a fabricate devid only if there's no 5982 * fabricate devid existed. 5983 */ 5984 if (sd_get_devid(un) == EINVAL) { 5985 (void) sd_create_devid(un); 5986 un->un_f_opt_fab_devid = TRUE; 5987 } 5988 5989 /* Register the devid if it exists */ 5990 if (un->un_devid != NULL) { 5991 (void) ddi_devid_register(SD_DEVINFO(un), 5992 un->un_devid); 5993 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5994 "sd_register_devid: devid fabricated using " 5995 "ddi framework\n"); 5996 } 5997 } 5998 5999 /* clean up resources */ 6000 if (inq80 != NULL) { 6001 kmem_free(inq80, inq80_len); 6002 } 6003 if (inq83 != NULL) { 6004 kmem_free(inq83, inq83_len); 6005 } 6006 } 6007 6008 static daddr_t 6009 sd_get_devid_block(struct sd_lun *un) 6010 { 6011 daddr_t spc, blk, head, cyl; 6012 6013 if (un->un_blockcount <= DK_MAX_BLOCKS) { 6014 /* this geometry doesn't allow us to write a devid */ 6015 if (un->un_g.dkg_acyl < 2) { 6016 return (-1); 6017 } 6018 6019 /* 6020 * Subtract 2 guarantees that the next to last cylinder 6021 * is used 6022 */ 6023 cyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl - 2; 6024 spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 6025 head = un->un_g.dkg_nhead - 1; 6026 blk = (cyl * (spc - un->un_g.dkg_apc)) + 6027 (head * un->un_g.dkg_nsect) + 1; 6028 } else { 6029 if (un->un_reserved != -1) { 6030 blk = un->un_map[un->un_reserved].dkl_cylno + 1; 6031 } else { 6032 return (-1); 6033 } 6034 } 6035 return (blk); 6036 } 6037 6038 /* 6039 * Function: sd_get_devid 6040 * 6041 * Description: This routine will return 0 if a valid device id has been 6042 * obtained from the target and stored in the soft state. If a 6043 * valid device id has not been previously read and stored, a 6044 * read attempt will be made. 6045 * 6046 * Arguments: un - driver soft state (unit) structure 6047 * 6048 * Return Code: 0 if we successfully get the device id 6049 * 6050 * Context: Kernel Thread 6051 */ 6052 6053 static int 6054 sd_get_devid(struct sd_lun *un) 6055 { 6056 struct dk_devid *dkdevid; 6057 ddi_devid_t tmpid; 6058 uint_t *ip; 6059 size_t sz; 6060 daddr_t blk; 6061 int status; 6062 int chksum; 6063 int i; 6064 size_t buffer_size; 6065 6066 ASSERT(un != NULL); 6067 ASSERT(mutex_owned(SD_MUTEX(un))); 6068 6069 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 6070 un); 6071 6072 if (un->un_devid != NULL) { 6073 return (0); 6074 } 6075 6076 blk = sd_get_devid_block(un); 6077 if (blk < 0) 6078 return (EINVAL); 6079 6080 /* 6081 * Read and verify device id, stored in the reserved cylinders at the 6082 * end of the disk. Backup label is on the odd sectors of the last 6083 * track of the last cylinder. Device id will be on track of the next 6084 * to last cylinder. 6085 */ 6086 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 6087 mutex_exit(SD_MUTEX(un)); 6088 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 6089 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 6090 SD_PATH_DIRECT); 6091 if (status != 0) { 6092 goto error; 6093 } 6094 6095 /* Validate the revision */ 6096 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 6097 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 6098 status = EINVAL; 6099 goto error; 6100 } 6101 6102 /* Calculate the checksum */ 6103 chksum = 0; 6104 ip = (uint_t *)dkdevid; 6105 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6106 i++) { 6107 chksum ^= ip[i]; 6108 } 6109 6110 /* Compare the checksums */ 6111 if (DKD_GETCHKSUM(dkdevid) != chksum) { 6112 status = EINVAL; 6113 goto error; 6114 } 6115 6116 /* Validate the device id */ 6117 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 6118 status = EINVAL; 6119 goto error; 6120 } 6121 6122 /* 6123 * Store the device id in the driver soft state 6124 */ 6125 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 6126 tmpid = kmem_alloc(sz, KM_SLEEP); 6127 6128 mutex_enter(SD_MUTEX(un)); 6129 6130 un->un_devid = tmpid; 6131 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 6132 6133 kmem_free(dkdevid, buffer_size); 6134 6135 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 6136 6137 return (status); 6138 error: 6139 mutex_enter(SD_MUTEX(un)); 6140 kmem_free(dkdevid, buffer_size); 6141 return (status); 6142 } 6143 6144 6145 /* 6146 * Function: sd_create_devid 6147 * 6148 * Description: This routine will fabricate the device id and write it 6149 * to the disk. 6150 * 6151 * Arguments: un - driver soft state (unit) structure 6152 * 6153 * Return Code: value of the fabricated device id 6154 * 6155 * Context: Kernel Thread 6156 */ 6157 6158 static ddi_devid_t 6159 sd_create_devid(struct sd_lun *un) 6160 { 6161 ASSERT(un != NULL); 6162 6163 /* Fabricate the devid */ 6164 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 6165 == DDI_FAILURE) { 6166 return (NULL); 6167 } 6168 6169 /* Write the devid to disk */ 6170 if (sd_write_deviceid(un) != 0) { 6171 ddi_devid_free(un->un_devid); 6172 un->un_devid = NULL; 6173 } 6174 6175 return (un->un_devid); 6176 } 6177 6178 6179 /* 6180 * Function: sd_write_deviceid 6181 * 6182 * Description: This routine will write the device id to the disk 6183 * reserved sector. 6184 * 6185 * Arguments: un - driver soft state (unit) structure 6186 * 6187 * Return Code: EINVAL 6188 * value returned by sd_send_scsi_cmd 6189 * 6190 * Context: Kernel Thread 6191 */ 6192 6193 static int 6194 sd_write_deviceid(struct sd_lun *un) 6195 { 6196 struct dk_devid *dkdevid; 6197 daddr_t blk; 6198 uint_t *ip, chksum; 6199 int status; 6200 int i; 6201 6202 ASSERT(mutex_owned(SD_MUTEX(un))); 6203 6204 blk = sd_get_devid_block(un); 6205 if (blk < 0) 6206 return (-1); 6207 mutex_exit(SD_MUTEX(un)); 6208 6209 /* Allocate the buffer */ 6210 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 6211 6212 /* Fill in the revision */ 6213 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 6214 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 6215 6216 /* Copy in the device id */ 6217 mutex_enter(SD_MUTEX(un)); 6218 bcopy(un->un_devid, &dkdevid->dkd_devid, 6219 ddi_devid_sizeof(un->un_devid)); 6220 mutex_exit(SD_MUTEX(un)); 6221 6222 /* Calculate the checksum */ 6223 chksum = 0; 6224 ip = (uint_t *)dkdevid; 6225 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6226 i++) { 6227 chksum ^= ip[i]; 6228 } 6229 6230 /* Fill-in checksum */ 6231 DKD_FORMCHKSUM(chksum, dkdevid); 6232 6233 /* Write the reserved sector */ 6234 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 6235 SD_PATH_DIRECT); 6236 6237 kmem_free(dkdevid, un->un_sys_blocksize); 6238 6239 mutex_enter(SD_MUTEX(un)); 6240 return (status); 6241 } 6242 6243 6244 /* 6245 * Function: sd_check_vpd_page_support 6246 * 6247 * Description: This routine sends an inquiry command with the EVPD bit set and 6248 * a page code of 0x00 to the device. It is used to determine which 6249 * vital product pages are availible to find the devid. We are 6250 * looking for pages 0x83 or 0x80. If we return a negative 1, the 6251 * device does not support that command. 6252 * 6253 * Arguments: un - driver soft state (unit) structure 6254 * 6255 * Return Code: 0 - success 6256 * 1 - check condition 6257 * 6258 * Context: This routine can sleep. 6259 */ 6260 6261 static int 6262 sd_check_vpd_page_support(struct sd_lun *un) 6263 { 6264 uchar_t *page_list = NULL; 6265 uchar_t page_length = 0xff; /* Use max possible length */ 6266 uchar_t evpd = 0x01; /* Set the EVPD bit */ 6267 uchar_t page_code = 0x00; /* Supported VPD Pages */ 6268 int rval = 0; 6269 int counter; 6270 6271 ASSERT(un != NULL); 6272 ASSERT(mutex_owned(SD_MUTEX(un))); 6273 6274 mutex_exit(SD_MUTEX(un)); 6275 6276 /* 6277 * We'll set the page length to the maximum to save figuring it out 6278 * with an additional call. 6279 */ 6280 page_list = kmem_zalloc(page_length, KM_SLEEP); 6281 6282 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 6283 page_code, NULL); 6284 6285 mutex_enter(SD_MUTEX(un)); 6286 6287 /* 6288 * Now we must validate that the device accepted the command, as some 6289 * drives do not support it. If the drive does support it, we will 6290 * return 0, and the supported pages will be in un_vpd_page_mask. If 6291 * not, we return -1. 6292 */ 6293 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 6294 /* Loop to find one of the 2 pages we need */ 6295 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 6296 6297 /* 6298 * Pages are returned in ascending order, and 0x83 is what we 6299 * are hoping for. 6300 */ 6301 while ((page_list[counter] <= 0x83) && 6302 (counter <= (page_list[VPD_PAGE_LENGTH] + 6303 VPD_HEAD_OFFSET))) { 6304 /* 6305 * Add 3 because page_list[3] is the number of 6306 * pages minus 3 6307 */ 6308 6309 switch (page_list[counter]) { 6310 case 0x00: 6311 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 6312 break; 6313 case 0x80: 6314 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 6315 break; 6316 case 0x81: 6317 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 6318 break; 6319 case 0x82: 6320 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 6321 break; 6322 case 0x83: 6323 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 6324 break; 6325 } 6326 counter++; 6327 } 6328 6329 } else { 6330 rval = -1; 6331 6332 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6333 "sd_check_vpd_page_support: This drive does not implement " 6334 "VPD pages.\n"); 6335 } 6336 6337 kmem_free(page_list, page_length); 6338 6339 return (rval); 6340 } 6341 6342 6343 /* 6344 * Function: sd_setup_pm 6345 * 6346 * Description: Initialize Power Management on the device 6347 * 6348 * Context: Kernel Thread 6349 */ 6350 6351 static void 6352 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 6353 { 6354 uint_t log_page_size; 6355 uchar_t *log_page_data; 6356 int rval; 6357 6358 /* 6359 * Since we are called from attach, holding a mutex for 6360 * un is unnecessary. Because some of the routines called 6361 * from here require SD_MUTEX to not be held, assert this 6362 * right up front. 6363 */ 6364 ASSERT(!mutex_owned(SD_MUTEX(un))); 6365 /* 6366 * Since the sd device does not have the 'reg' property, 6367 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6368 * The following code is to tell cpr that this device 6369 * DOES need to be suspended and resumed. 6370 */ 6371 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6372 "pm-hardware-state", "needs-suspend-resume"); 6373 6374 /* 6375 * Check if HBA has set the "pm-capable" property. 6376 * If "pm-capable" exists and is non-zero then we can 6377 * power manage the device without checking the start/stop 6378 * cycle count log sense page. 6379 * 6380 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 6381 * then we should not power manage the device. 6382 * 6383 * If "pm-capable" doesn't exist then un->un_pm_capable_prop will 6384 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, sd will 6385 * check the start/stop cycle count log sense page and power manage 6386 * the device if the cycle count limit has not been exceeded. 6387 */ 6388 un->un_pm_capable_prop = 6389 ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6390 "pm-capable", SD_PM_CAPABLE_UNDEFINED); 6391 if (un->un_pm_capable_prop != SD_PM_CAPABLE_UNDEFINED) { 6392 /* 6393 * pm-capable property exists. 6394 * 6395 * Convert "TRUE" values for un_pm_capable_prop to 6396 * SD_PM_CAPABLE_TRUE (1) to make it easier to check later. 6397 * "TRUE" values are any values except SD_PM_CAPABLE_FALSE (0) 6398 * and SD_PM_CAPABLE_UNDEFINED (-1) 6399 */ 6400 if (un->un_pm_capable_prop != SD_PM_CAPABLE_FALSE) { 6401 un->un_pm_capable_prop = SD_PM_CAPABLE_TRUE; 6402 } 6403 6404 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6405 "sd_unit_attach: un:0x%p pm-capable " 6406 "property set to %d.\n", un, un->un_pm_capable_prop); 6407 } 6408 6409 /* 6410 * This complies with the new power management framework 6411 * for certain desktop machines. Create the pm_components 6412 * property as a string array property. 6413 * 6414 * If this is a removable device or if the pm-capable property 6415 * is SD_PM_CAPABLE_TRUE (1) then we should create the 6416 * pm_components property without checking for the existance of 6417 * the start-stop cycle counter log page 6418 */ 6419 if (ISREMOVABLE(un) || 6420 un->un_pm_capable_prop == SD_PM_CAPABLE_TRUE) { 6421 /* 6422 * not all devices have a motor, try it first. 6423 * some devices may return ILLEGAL REQUEST, some 6424 * will hang 6425 */ 6426 un->un_f_start_stop_supported = TRUE; 6427 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 6428 SD_PATH_DIRECT) != 0) { 6429 un->un_f_start_stop_supported = FALSE; 6430 } 6431 6432 /* 6433 * create pm properties anyways otherwise the parent can't 6434 * go to sleep 6435 */ 6436 (void) sd_create_pm_components(devi, un); 6437 un->un_f_pm_is_enabled = TRUE; 6438 6439 /* 6440 * Need to create a zero length (Boolean) property 6441 * removable-media for the removable media devices. 6442 * Note that the return value of the property is not being 6443 * checked, since if unable to create the property 6444 * then do not want the attach to fail altogether. Consistent 6445 * with other property creation in attach. 6446 */ 6447 if (ISREMOVABLE(un)) { 6448 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 6449 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 6450 } 6451 return; 6452 } 6453 6454 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 6455 6456 #ifdef SDDEBUG 6457 if (sd_force_pm_supported) { 6458 /* Force a successful result */ 6459 rval = 1; 6460 } 6461 #endif 6462 6463 /* 6464 * If the start-stop cycle counter log page is not supported 6465 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 6466 * then we should not create the pm_components property. 6467 */ 6468 if (rval == -1 || un->un_pm_capable_prop == SD_PM_CAPABLE_FALSE) { 6469 /* 6470 * Error. 6471 * Reading log sense failed, most likely this is 6472 * an older drive that does not support log sense. 6473 * If this fails auto-pm is not supported. 6474 */ 6475 un->un_power_level = SD_SPINDLE_ON; 6476 un->un_f_pm_is_enabled = FALSE; 6477 6478 } else if (rval == 0) { 6479 /* 6480 * Page not found. 6481 * The start stop cycle counter is implemented as page 6482 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6483 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6484 */ 6485 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 6486 /* 6487 * Page found, use this one. 6488 */ 6489 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6490 un->un_f_pm_is_enabled = TRUE; 6491 } else { 6492 /* 6493 * Error or page not found. 6494 * auto-pm is not supported for this device. 6495 */ 6496 un->un_power_level = SD_SPINDLE_ON; 6497 un->un_f_pm_is_enabled = FALSE; 6498 } 6499 } else { 6500 /* 6501 * Page found, use it. 6502 */ 6503 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6504 un->un_f_pm_is_enabled = TRUE; 6505 } 6506 6507 6508 if (un->un_f_pm_is_enabled == TRUE) { 6509 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6510 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6511 6512 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6513 log_page_size, un->un_start_stop_cycle_page, 6514 0x01, 0, SD_PATH_DIRECT); 6515 #ifdef SDDEBUG 6516 if (sd_force_pm_supported) { 6517 /* Force a successful result */ 6518 rval = 0; 6519 } 6520 #endif 6521 6522 /* 6523 * If the Log sense for Page( Start/stop cycle counter page) 6524 * succeeds, then power managment is supported and we can 6525 * enable auto-pm. 6526 */ 6527 if (rval == 0) { 6528 (void) sd_create_pm_components(devi, un); 6529 } else { 6530 un->un_power_level = SD_SPINDLE_ON; 6531 un->un_f_pm_is_enabled = FALSE; 6532 } 6533 6534 kmem_free(log_page_data, log_page_size); 6535 } 6536 } 6537 6538 6539 /* 6540 * Function: sd_create_pm_components 6541 * 6542 * Description: Initialize PM property. 6543 * 6544 * Context: Kernel thread context 6545 */ 6546 6547 static void 6548 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6549 { 6550 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 6551 6552 ASSERT(!mutex_owned(SD_MUTEX(un))); 6553 6554 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6555 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 6556 /* 6557 * When components are initially created they are idle, 6558 * power up any non-removables. 6559 * Note: the return value of pm_raise_power can't be used 6560 * for determining if PM should be enabled for this device. 6561 * Even if you check the return values and remove this 6562 * property created above, the PM framework will not honor the 6563 * change after the first call to pm_raise_power. Hence, 6564 * removal of that property does not help if pm_raise_power 6565 * fails. In the case of removable media, the start/stop 6566 * will fail if the media is not present. 6567 */ 6568 if ((!ISREMOVABLE(un)) && (pm_raise_power(SD_DEVINFO(un), 0, 6569 SD_SPINDLE_ON) == DDI_SUCCESS)) { 6570 mutex_enter(SD_MUTEX(un)); 6571 un->un_power_level = SD_SPINDLE_ON; 6572 mutex_enter(&un->un_pm_mutex); 6573 /* Set to on and not busy. */ 6574 un->un_pm_count = 0; 6575 } else { 6576 mutex_enter(SD_MUTEX(un)); 6577 un->un_power_level = SD_SPINDLE_OFF; 6578 mutex_enter(&un->un_pm_mutex); 6579 /* Set to off. */ 6580 un->un_pm_count = -1; 6581 } 6582 mutex_exit(&un->un_pm_mutex); 6583 mutex_exit(SD_MUTEX(un)); 6584 } else { 6585 un->un_power_level = SD_SPINDLE_ON; 6586 un->un_f_pm_is_enabled = FALSE; 6587 } 6588 } 6589 6590 6591 /* 6592 * Function: sd_ddi_suspend 6593 * 6594 * Description: Performs system power-down operations. This includes 6595 * setting the drive state to indicate its suspended so 6596 * that no new commands will be accepted. Also, wait for 6597 * all commands that are in transport or queued to a timer 6598 * for retry to complete. All timeout threads are cancelled. 6599 * 6600 * Return Code: DDI_FAILURE or DDI_SUCCESS 6601 * 6602 * Context: Kernel thread context 6603 */ 6604 6605 static int 6606 sd_ddi_suspend(dev_info_t *devi) 6607 { 6608 struct sd_lun *un; 6609 clock_t wait_cmds_complete; 6610 6611 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6612 if (un == NULL) { 6613 return (DDI_FAILURE); 6614 } 6615 6616 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6617 6618 mutex_enter(SD_MUTEX(un)); 6619 6620 /* Return success if the device is already suspended. */ 6621 if (un->un_state == SD_STATE_SUSPENDED) { 6622 mutex_exit(SD_MUTEX(un)); 6623 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6624 "device already suspended, exiting\n"); 6625 return (DDI_SUCCESS); 6626 } 6627 6628 /* Return failure if the device is being used by HA */ 6629 if (un->un_resvd_status & 6630 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6631 mutex_exit(SD_MUTEX(un)); 6632 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6633 "device in use by HA, exiting\n"); 6634 return (DDI_FAILURE); 6635 } 6636 6637 /* 6638 * Return failure if the device is in a resource wait 6639 * or power changing state. 6640 */ 6641 if ((un->un_state == SD_STATE_RWAIT) || 6642 (un->un_state == SD_STATE_PM_CHANGING)) { 6643 mutex_exit(SD_MUTEX(un)); 6644 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6645 "device in resource wait state, exiting\n"); 6646 return (DDI_FAILURE); 6647 } 6648 6649 6650 un->un_save_state = un->un_last_state; 6651 New_state(un, SD_STATE_SUSPENDED); 6652 6653 /* 6654 * Wait for all commands that are in transport or queued to a timer 6655 * for retry to complete. 6656 * 6657 * While waiting, no new commands will be accepted or sent because of 6658 * the new state we set above. 6659 * 6660 * Wait till current operation has completed. If we are in the resource 6661 * wait state (with an intr outstanding) then we need to wait till the 6662 * intr completes and starts the next cmd. We want to wait for 6663 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6664 */ 6665 wait_cmds_complete = ddi_get_lbolt() + 6666 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6667 6668 while (un->un_ncmds_in_transport != 0) { 6669 /* 6670 * Fail if commands do not finish in the specified time. 6671 */ 6672 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6673 wait_cmds_complete) == -1) { 6674 /* 6675 * Undo the state changes made above. Everything 6676 * must go back to it's original value. 6677 */ 6678 Restore_state(un); 6679 un->un_last_state = un->un_save_state; 6680 /* Wake up any threads that might be waiting. */ 6681 cv_broadcast(&un->un_suspend_cv); 6682 mutex_exit(SD_MUTEX(un)); 6683 SD_ERROR(SD_LOG_IO_PM, un, 6684 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6685 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6686 return (DDI_FAILURE); 6687 } 6688 } 6689 6690 /* 6691 * Cancel SCSI watch thread and timeouts, if any are active 6692 */ 6693 6694 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6695 opaque_t temp_token = un->un_swr_token; 6696 mutex_exit(SD_MUTEX(un)); 6697 scsi_watch_suspend(temp_token); 6698 mutex_enter(SD_MUTEX(un)); 6699 } 6700 6701 if (un->un_reset_throttle_timeid != NULL) { 6702 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6703 un->un_reset_throttle_timeid = NULL; 6704 mutex_exit(SD_MUTEX(un)); 6705 (void) untimeout(temp_id); 6706 mutex_enter(SD_MUTEX(un)); 6707 } 6708 6709 if (un->un_dcvb_timeid != NULL) { 6710 timeout_id_t temp_id = un->un_dcvb_timeid; 6711 un->un_dcvb_timeid = NULL; 6712 mutex_exit(SD_MUTEX(un)); 6713 (void) untimeout(temp_id); 6714 mutex_enter(SD_MUTEX(un)); 6715 } 6716 6717 mutex_enter(&un->un_pm_mutex); 6718 if (un->un_pm_timeid != NULL) { 6719 timeout_id_t temp_id = un->un_pm_timeid; 6720 un->un_pm_timeid = NULL; 6721 mutex_exit(&un->un_pm_mutex); 6722 mutex_exit(SD_MUTEX(un)); 6723 (void) untimeout(temp_id); 6724 mutex_enter(SD_MUTEX(un)); 6725 } else { 6726 mutex_exit(&un->un_pm_mutex); 6727 } 6728 6729 if (un->un_retry_timeid != NULL) { 6730 timeout_id_t temp_id = un->un_retry_timeid; 6731 un->un_retry_timeid = NULL; 6732 mutex_exit(SD_MUTEX(un)); 6733 (void) untimeout(temp_id); 6734 mutex_enter(SD_MUTEX(un)); 6735 } 6736 6737 if (un->un_direct_priority_timeid != NULL) { 6738 timeout_id_t temp_id = un->un_direct_priority_timeid; 6739 un->un_direct_priority_timeid = NULL; 6740 mutex_exit(SD_MUTEX(un)); 6741 (void) untimeout(temp_id); 6742 mutex_enter(SD_MUTEX(un)); 6743 } 6744 6745 if (un->un_f_is_fibre == TRUE) { 6746 /* 6747 * Remove callbacks for insert and remove events 6748 */ 6749 if (un->un_insert_event != NULL) { 6750 mutex_exit(SD_MUTEX(un)); 6751 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6752 mutex_enter(SD_MUTEX(un)); 6753 un->un_insert_event = NULL; 6754 } 6755 6756 if (un->un_remove_event != NULL) { 6757 mutex_exit(SD_MUTEX(un)); 6758 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6759 mutex_enter(SD_MUTEX(un)); 6760 un->un_remove_event = NULL; 6761 } 6762 } 6763 6764 mutex_exit(SD_MUTEX(un)); 6765 6766 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6767 6768 return (DDI_SUCCESS); 6769 } 6770 6771 6772 /* 6773 * Function: sd_ddi_pm_suspend 6774 * 6775 * Description: Set the drive state to low power. 6776 * Someone else is required to actually change the drive 6777 * power level. 6778 * 6779 * Arguments: un - driver soft state (unit) structure 6780 * 6781 * Return Code: DDI_FAILURE or DDI_SUCCESS 6782 * 6783 * Context: Kernel thread context 6784 */ 6785 6786 static int 6787 sd_ddi_pm_suspend(struct sd_lun *un) 6788 { 6789 ASSERT(un != NULL); 6790 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6791 6792 ASSERT(!mutex_owned(SD_MUTEX(un))); 6793 mutex_enter(SD_MUTEX(un)); 6794 6795 /* 6796 * Exit if power management is not enabled for this device, or if 6797 * the device is being used by HA. 6798 */ 6799 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6800 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6801 mutex_exit(SD_MUTEX(un)); 6802 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6803 return (DDI_SUCCESS); 6804 } 6805 6806 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6807 un->un_ncmds_in_driver); 6808 6809 /* 6810 * See if the device is not busy, ie.: 6811 * - we have no commands in the driver for this device 6812 * - not waiting for resources 6813 */ 6814 if ((un->un_ncmds_in_driver == 0) && 6815 (un->un_state != SD_STATE_RWAIT)) { 6816 /* 6817 * The device is not busy, so it is OK to go to low power state. 6818 * Indicate low power, but rely on someone else to actually 6819 * change it. 6820 */ 6821 mutex_enter(&un->un_pm_mutex); 6822 un->un_pm_count = -1; 6823 mutex_exit(&un->un_pm_mutex); 6824 un->un_power_level = SD_SPINDLE_OFF; 6825 } 6826 6827 mutex_exit(SD_MUTEX(un)); 6828 6829 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6830 6831 return (DDI_SUCCESS); 6832 } 6833 6834 6835 /* 6836 * Function: sd_ddi_resume 6837 * 6838 * Description: Performs system power-up operations.. 6839 * 6840 * Return Code: DDI_SUCCESS 6841 * DDI_FAILURE 6842 * 6843 * Context: Kernel thread context 6844 */ 6845 6846 static int 6847 sd_ddi_resume(dev_info_t *devi) 6848 { 6849 struct sd_lun *un; 6850 6851 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6852 if (un == NULL) { 6853 return (DDI_FAILURE); 6854 } 6855 6856 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6857 6858 mutex_enter(SD_MUTEX(un)); 6859 Restore_state(un); 6860 6861 /* 6862 * Restore the state which was saved to give the 6863 * the right state in un_last_state 6864 */ 6865 un->un_last_state = un->un_save_state; 6866 /* 6867 * Note: throttle comes back at full. 6868 * Also note: this MUST be done before calling pm_raise_power 6869 * otherwise the system can get hung in biowait. The scenario where 6870 * this'll happen is under cpr suspend. Writing of the system 6871 * state goes through sddump, which writes 0 to un_throttle. If 6872 * writing the system state then fails, example if the partition is 6873 * too small, then cpr attempts a resume. If throttle isn't restored 6874 * from the saved value until after calling pm_raise_power then 6875 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6876 * in biowait. 6877 */ 6878 un->un_throttle = un->un_saved_throttle; 6879 6880 /* 6881 * The chance of failure is very rare as the only command done in power 6882 * entry point is START command when you transition from 0->1 or 6883 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6884 * which suspend was done. Ignore the return value as the resume should 6885 * not be failed. In the case of removable media the media need not be 6886 * inserted and hence there is a chance that raise power will fail with 6887 * media not present. 6888 */ 6889 if (!ISREMOVABLE(un)) { 6890 mutex_exit(SD_MUTEX(un)); 6891 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6892 mutex_enter(SD_MUTEX(un)); 6893 } 6894 6895 /* 6896 * Don't broadcast to the suspend cv and therefore possibly 6897 * start I/O until after power has been restored. 6898 */ 6899 cv_broadcast(&un->un_suspend_cv); 6900 cv_broadcast(&un->un_state_cv); 6901 6902 /* restart thread */ 6903 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6904 scsi_watch_resume(un->un_swr_token); 6905 } 6906 6907 #if (defined(__fibre)) 6908 if (un->un_f_is_fibre == TRUE) { 6909 /* 6910 * Add callbacks for insert and remove events 6911 */ 6912 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6913 sd_init_event_callbacks(un); 6914 } 6915 } 6916 #endif 6917 6918 /* 6919 * Transport any pending commands to the target. 6920 * 6921 * If this is a low-activity device commands in queue will have to wait 6922 * until new commands come in, which may take awhile. Also, we 6923 * specifically don't check un_ncmds_in_transport because we know that 6924 * there really are no commands in progress after the unit was 6925 * suspended and we could have reached the throttle level, been 6926 * suspended, and have no new commands coming in for awhile. Highly 6927 * unlikely, but so is the low-activity disk scenario. 6928 */ 6929 ddi_xbuf_dispatch(un->un_xbuf_attr); 6930 6931 sd_start_cmds(un, NULL); 6932 mutex_exit(SD_MUTEX(un)); 6933 6934 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6935 6936 return (DDI_SUCCESS); 6937 } 6938 6939 6940 /* 6941 * Function: sd_ddi_pm_resume 6942 * 6943 * Description: Set the drive state to powered on. 6944 * Someone else is required to actually change the drive 6945 * power level. 6946 * 6947 * Arguments: un - driver soft state (unit) structure 6948 * 6949 * Return Code: DDI_SUCCESS 6950 * 6951 * Context: Kernel thread context 6952 */ 6953 6954 static int 6955 sd_ddi_pm_resume(struct sd_lun *un) 6956 { 6957 ASSERT(un != NULL); 6958 6959 ASSERT(!mutex_owned(SD_MUTEX(un))); 6960 mutex_enter(SD_MUTEX(un)); 6961 un->un_power_level = SD_SPINDLE_ON; 6962 6963 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6964 mutex_enter(&un->un_pm_mutex); 6965 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6966 un->un_pm_count++; 6967 ASSERT(un->un_pm_count == 0); 6968 /* 6969 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6970 * un_suspend_cv is for a system resume, not a power management 6971 * device resume. (4297749) 6972 * cv_broadcast(&un->un_suspend_cv); 6973 */ 6974 } 6975 mutex_exit(&un->un_pm_mutex); 6976 mutex_exit(SD_MUTEX(un)); 6977 6978 return (DDI_SUCCESS); 6979 } 6980 6981 6982 /* 6983 * Function: sd_pm_idletimeout_handler 6984 * 6985 * Description: A timer routine that's active only while a device is busy. 6986 * The purpose is to extend slightly the pm framework's busy 6987 * view of the device to prevent busy/idle thrashing for 6988 * back-to-back commands. Do this by comparing the current time 6989 * to the time at which the last command completed and when the 6990 * difference is greater than sd_pm_idletime, call 6991 * pm_idle_component. In addition to indicating idle to the pm 6992 * framework, update the chain type to again use the internal pm 6993 * layers of the driver. 6994 * 6995 * Arguments: arg - driver soft state (unit) structure 6996 * 6997 * Context: Executes in a timeout(9F) thread context 6998 */ 6999 7000 static void 7001 sd_pm_idletimeout_handler(void *arg) 7002 { 7003 struct sd_lun *un = arg; 7004 7005 time_t now; 7006 7007 mutex_enter(&sd_detach_mutex); 7008 if (un->un_detach_count != 0) { 7009 /* Abort if the instance is detaching */ 7010 mutex_exit(&sd_detach_mutex); 7011 return; 7012 } 7013 mutex_exit(&sd_detach_mutex); 7014 7015 now = ddi_get_time(); 7016 /* 7017 * Grab both mutexes, in the proper order, since we're accessing 7018 * both PM and softstate variables. 7019 */ 7020 mutex_enter(SD_MUTEX(un)); 7021 mutex_enter(&un->un_pm_mutex); 7022 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 7023 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 7024 /* 7025 * Update the chain types. 7026 * This takes affect on the next new command received. 7027 */ 7028 if (ISREMOVABLE(un)) { 7029 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7030 } else { 7031 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7032 } 7033 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7034 7035 SD_TRACE(SD_LOG_IO_PM, un, 7036 "sd_pm_idletimeout_handler: idling device\n"); 7037 (void) pm_idle_component(SD_DEVINFO(un), 0); 7038 un->un_pm_idle_timeid = NULL; 7039 } else { 7040 un->un_pm_idle_timeid = 7041 timeout(sd_pm_idletimeout_handler, un, 7042 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 7043 } 7044 mutex_exit(&un->un_pm_mutex); 7045 mutex_exit(SD_MUTEX(un)); 7046 } 7047 7048 7049 /* 7050 * Function: sd_pm_timeout_handler 7051 * 7052 * Description: Callback to tell framework we are idle. 7053 * 7054 * Context: timeout(9f) thread context. 7055 */ 7056 7057 static void 7058 sd_pm_timeout_handler(void *arg) 7059 { 7060 struct sd_lun *un = arg; 7061 7062 (void) pm_idle_component(SD_DEVINFO(un), 0); 7063 mutex_enter(&un->un_pm_mutex); 7064 un->un_pm_timeid = NULL; 7065 mutex_exit(&un->un_pm_mutex); 7066 } 7067 7068 7069 /* 7070 * Function: sdpower 7071 * 7072 * Description: PM entry point. 7073 * 7074 * Return Code: DDI_SUCCESS 7075 * DDI_FAILURE 7076 * 7077 * Context: Kernel thread context 7078 */ 7079 7080 static int 7081 sdpower(dev_info_t *devi, int component, int level) 7082 { 7083 struct sd_lun *un; 7084 int instance; 7085 int rval = DDI_SUCCESS; 7086 uint_t i, log_page_size, maxcycles, ncycles; 7087 uchar_t *log_page_data; 7088 int log_sense_page; 7089 int medium_present; 7090 time_t intvlp; 7091 dev_t dev; 7092 struct pm_trans_data sd_pm_tran_data; 7093 uchar_t save_state; 7094 int sval; 7095 uchar_t state_before_pm; 7096 int got_semaphore_here; 7097 7098 instance = ddi_get_instance(devi); 7099 7100 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 7101 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 7102 component != 0) { 7103 return (DDI_FAILURE); 7104 } 7105 7106 dev = sd_make_device(SD_DEVINFO(un)); 7107 7108 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 7109 7110 /* 7111 * Must synchronize power down with close. 7112 * Attempt to decrement/acquire the open/close semaphore, 7113 * but do NOT wait on it. If it's not greater than zero, 7114 * ie. it can't be decremented without waiting, then 7115 * someone else, either open or close, already has it 7116 * and the try returns 0. Use that knowledge here to determine 7117 * if it's OK to change the device power level. 7118 * Also, only increment it on exit if it was decremented, ie. gotten, 7119 * here. 7120 */ 7121 got_semaphore_here = sema_tryp(&un->un_semoclose); 7122 7123 mutex_enter(SD_MUTEX(un)); 7124 7125 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 7126 un->un_ncmds_in_driver); 7127 7128 /* 7129 * If un_ncmds_in_driver is non-zero it indicates commands are 7130 * already being processed in the driver, or if the semaphore was 7131 * not gotten here it indicates an open or close is being processed. 7132 * At the same time somebody is requesting to go low power which 7133 * can't happen, therefore we need to return failure. 7134 */ 7135 if ((level == SD_SPINDLE_OFF) && 7136 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 7137 mutex_exit(SD_MUTEX(un)); 7138 7139 if (got_semaphore_here != 0) { 7140 sema_v(&un->un_semoclose); 7141 } 7142 SD_TRACE(SD_LOG_IO_PM, un, 7143 "sdpower: exit, device has queued cmds.\n"); 7144 return (DDI_FAILURE); 7145 } 7146 7147 /* 7148 * if it is OFFLINE that means the disk is completely dead 7149 * in our case we have to put the disk in on or off by sending commands 7150 * Of course that will fail anyway so return back here. 7151 * 7152 * Power changes to a device that's OFFLINE or SUSPENDED 7153 * are not allowed. 7154 */ 7155 if ((un->un_state == SD_STATE_OFFLINE) || 7156 (un->un_state == SD_STATE_SUSPENDED)) { 7157 mutex_exit(SD_MUTEX(un)); 7158 7159 if (got_semaphore_here != 0) { 7160 sema_v(&un->un_semoclose); 7161 } 7162 SD_TRACE(SD_LOG_IO_PM, un, 7163 "sdpower: exit, device is off-line.\n"); 7164 return (DDI_FAILURE); 7165 } 7166 7167 /* 7168 * Change the device's state to indicate it's power level 7169 * is being changed. Do this to prevent a power off in the 7170 * middle of commands, which is especially bad on devices 7171 * that are really powered off instead of just spun down. 7172 */ 7173 state_before_pm = un->un_state; 7174 un->un_state = SD_STATE_PM_CHANGING; 7175 7176 mutex_exit(SD_MUTEX(un)); 7177 7178 /* 7179 * Bypass checking the log sense information for removables 7180 * and devices for which the HBA set the pm-capable property. 7181 * If un->un_pm_capable_prop is SD_PM_CAPABLE_UNDEFINED (-1) 7182 * then the HBA did not create the property. 7183 */ 7184 if ((level == SD_SPINDLE_OFF) && (!ISREMOVABLE(un)) && 7185 un->un_pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 7186 /* 7187 * Get the log sense information to understand whether the 7188 * the powercycle counts have gone beyond the threshhold. 7189 */ 7190 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 7191 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 7192 7193 mutex_enter(SD_MUTEX(un)); 7194 log_sense_page = un->un_start_stop_cycle_page; 7195 mutex_exit(SD_MUTEX(un)); 7196 7197 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 7198 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 7199 #ifdef SDDEBUG 7200 if (sd_force_pm_supported) { 7201 /* Force a successful result */ 7202 rval = 0; 7203 } 7204 #endif 7205 if (rval != 0) { 7206 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 7207 "Log Sense Failed\n"); 7208 kmem_free(log_page_data, log_page_size); 7209 /* Cannot support power management on those drives */ 7210 7211 if (got_semaphore_here != 0) { 7212 sema_v(&un->un_semoclose); 7213 } 7214 /* 7215 * On exit put the state back to it's original value 7216 * and broadcast to anyone waiting for the power 7217 * change completion. 7218 */ 7219 mutex_enter(SD_MUTEX(un)); 7220 un->un_state = state_before_pm; 7221 cv_broadcast(&un->un_suspend_cv); 7222 mutex_exit(SD_MUTEX(un)); 7223 SD_TRACE(SD_LOG_IO_PM, un, 7224 "sdpower: exit, Log Sense Failed.\n"); 7225 return (DDI_FAILURE); 7226 } 7227 7228 /* 7229 * From the page data - Convert the essential information to 7230 * pm_trans_data 7231 */ 7232 maxcycles = 7233 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 7234 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 7235 7236 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 7237 7238 ncycles = 7239 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 7240 (log_page_data[0x26] << 8) | log_page_data[0x27]; 7241 7242 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 7243 7244 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 7245 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 7246 log_page_data[8+i]; 7247 } 7248 7249 kmem_free(log_page_data, log_page_size); 7250 7251 /* 7252 * Call pm_trans_check routine to get the Ok from 7253 * the global policy 7254 */ 7255 7256 sd_pm_tran_data.format = DC_SCSI_FORMAT; 7257 sd_pm_tran_data.un.scsi_cycles.flag = 0; 7258 7259 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 7260 #ifdef SDDEBUG 7261 if (sd_force_pm_supported) { 7262 /* Force a successful result */ 7263 rval = 1; 7264 } 7265 #endif 7266 switch (rval) { 7267 case 0: 7268 /* 7269 * Not Ok to Power cycle or error in parameters passed 7270 * Would have given the advised time to consider power 7271 * cycle. Based on the new intvlp parameter we are 7272 * supposed to pretend we are busy so that pm framework 7273 * will never call our power entry point. Because of 7274 * that install a timeout handler and wait for the 7275 * recommended time to elapse so that power management 7276 * can be effective again. 7277 * 7278 * To effect this behavior, call pm_busy_component to 7279 * indicate to the framework this device is busy. 7280 * By not adjusting un_pm_count the rest of PM in 7281 * the driver will function normally, and independant 7282 * of this but because the framework is told the device 7283 * is busy it won't attempt powering down until it gets 7284 * a matching idle. The timeout handler sends this. 7285 * Note: sd_pm_entry can't be called here to do this 7286 * because sdpower may have been called as a result 7287 * of a call to pm_raise_power from within sd_pm_entry. 7288 * 7289 * If a timeout handler is already active then 7290 * don't install another. 7291 */ 7292 mutex_enter(&un->un_pm_mutex); 7293 if (un->un_pm_timeid == NULL) { 7294 un->un_pm_timeid = 7295 timeout(sd_pm_timeout_handler, 7296 un, intvlp * drv_usectohz(1000000)); 7297 mutex_exit(&un->un_pm_mutex); 7298 (void) pm_busy_component(SD_DEVINFO(un), 0); 7299 } else { 7300 mutex_exit(&un->un_pm_mutex); 7301 } 7302 if (got_semaphore_here != 0) { 7303 sema_v(&un->un_semoclose); 7304 } 7305 /* 7306 * On exit put the state back to it's original value 7307 * and broadcast to anyone waiting for the power 7308 * change completion. 7309 */ 7310 mutex_enter(SD_MUTEX(un)); 7311 un->un_state = state_before_pm; 7312 cv_broadcast(&un->un_suspend_cv); 7313 mutex_exit(SD_MUTEX(un)); 7314 7315 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 7316 "trans check Failed, not ok to power cycle.\n"); 7317 return (DDI_FAILURE); 7318 7319 case -1: 7320 if (got_semaphore_here != 0) { 7321 sema_v(&un->un_semoclose); 7322 } 7323 /* 7324 * On exit put the state back to it's original value 7325 * and broadcast to anyone waiting for the power 7326 * change completion. 7327 */ 7328 mutex_enter(SD_MUTEX(un)); 7329 un->un_state = state_before_pm; 7330 cv_broadcast(&un->un_suspend_cv); 7331 mutex_exit(SD_MUTEX(un)); 7332 SD_TRACE(SD_LOG_IO_PM, un, 7333 "sdpower: exit, trans check command Failed.\n"); 7334 return (DDI_FAILURE); 7335 } 7336 } 7337 7338 if (level == SD_SPINDLE_OFF) { 7339 /* 7340 * Save the last state... if the STOP FAILS we need it 7341 * for restoring 7342 */ 7343 mutex_enter(SD_MUTEX(un)); 7344 save_state = un->un_last_state; 7345 /* 7346 * There must not be any cmds. getting processed 7347 * in the driver when we get here. Power to the 7348 * device is potentially going off. 7349 */ 7350 ASSERT(un->un_ncmds_in_driver == 0); 7351 mutex_exit(SD_MUTEX(un)); 7352 7353 /* 7354 * For now suspend the device completely before spindle is 7355 * turned off 7356 */ 7357 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 7358 if (got_semaphore_here != 0) { 7359 sema_v(&un->un_semoclose); 7360 } 7361 /* 7362 * On exit put the state back to it's original value 7363 * and broadcast to anyone waiting for the power 7364 * change completion. 7365 */ 7366 mutex_enter(SD_MUTEX(un)); 7367 un->un_state = state_before_pm; 7368 cv_broadcast(&un->un_suspend_cv); 7369 mutex_exit(SD_MUTEX(un)); 7370 SD_TRACE(SD_LOG_IO_PM, un, 7371 "sdpower: exit, PM suspend Failed.\n"); 7372 return (DDI_FAILURE); 7373 } 7374 } 7375 7376 /* 7377 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7378 * close, or strategy. Dump no long uses this routine, it uses it's 7379 * own code so it can be done in polled mode. 7380 */ 7381 7382 medium_present = TRUE; 7383 7384 /* 7385 * When powering up, issue a TUR in case the device is at unit 7386 * attention. Don't do retries. Bypass the PM layer, otherwise 7387 * a deadlock on un_pm_busy_cv will occur. 7388 */ 7389 if (level == SD_SPINDLE_ON) { 7390 (void) sd_send_scsi_TEST_UNIT_READY(un, 7391 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7392 } 7393 7394 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7395 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7396 7397 sval = sd_send_scsi_START_STOP_UNIT(un, 7398 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 7399 SD_PATH_DIRECT); 7400 /* Command failed, check for media present. */ 7401 if ((sval == ENXIO) && ISREMOVABLE(un)) { 7402 medium_present = FALSE; 7403 } 7404 7405 /* 7406 * The conditions of interest here are: 7407 * if a spindle off with media present fails, 7408 * then restore the state and return an error. 7409 * else if a spindle on fails, 7410 * then return an error (there's no state to restore). 7411 * In all other cases we setup for the new state 7412 * and return success. 7413 */ 7414 switch (level) { 7415 case SD_SPINDLE_OFF: 7416 if ((medium_present == TRUE) && (sval != 0)) { 7417 /* The stop command from above failed */ 7418 rval = DDI_FAILURE; 7419 /* 7420 * The stop command failed, and we have media 7421 * present. Put the level back by calling the 7422 * sd_pm_resume() and set the state back to 7423 * it's previous value. 7424 */ 7425 (void) sd_ddi_pm_resume(un); 7426 mutex_enter(SD_MUTEX(un)); 7427 un->un_last_state = save_state; 7428 mutex_exit(SD_MUTEX(un)); 7429 break; 7430 } 7431 /* 7432 * The stop command from above succeeded. 7433 */ 7434 if (ISREMOVABLE(un)) { 7435 /* 7436 * Terminate watch thread in case of removable media 7437 * devices going into low power state. This is as per 7438 * the requirements of pm framework, otherwise commands 7439 * will be generated for the device (through watch 7440 * thread), even when the device is in low power state. 7441 */ 7442 mutex_enter(SD_MUTEX(un)); 7443 un->un_f_watcht_stopped = FALSE; 7444 if (un->un_swr_token != NULL) { 7445 opaque_t temp_token = un->un_swr_token; 7446 un->un_f_watcht_stopped = TRUE; 7447 un->un_swr_token = NULL; 7448 mutex_exit(SD_MUTEX(un)); 7449 (void) scsi_watch_request_terminate(temp_token, 7450 SCSI_WATCH_TERMINATE_WAIT); 7451 } else { 7452 mutex_exit(SD_MUTEX(un)); 7453 } 7454 } 7455 break; 7456 7457 default: /* The level requested is spindle on... */ 7458 /* 7459 * Legacy behavior: return success on a failed spinup 7460 * if there is no media in the drive. 7461 * Do this by looking at medium_present here. 7462 */ 7463 if ((sval != 0) && medium_present) { 7464 /* The start command from above failed */ 7465 rval = DDI_FAILURE; 7466 break; 7467 } 7468 /* 7469 * The start command from above succeeded 7470 * Resume the devices now that we have 7471 * started the disks 7472 */ 7473 (void) sd_ddi_pm_resume(un); 7474 7475 /* 7476 * Resume the watch thread since it was suspended 7477 * when the device went into low power mode. 7478 */ 7479 if (ISREMOVABLE(un)) { 7480 mutex_enter(SD_MUTEX(un)); 7481 if (un->un_f_watcht_stopped == TRUE) { 7482 opaque_t temp_token; 7483 7484 un->un_f_watcht_stopped = FALSE; 7485 mutex_exit(SD_MUTEX(un)); 7486 temp_token = scsi_watch_request_submit( 7487 SD_SCSI_DEVP(un), 7488 sd_check_media_time, 7489 SENSE_LENGTH, sd_media_watch_cb, 7490 (caddr_t)dev); 7491 mutex_enter(SD_MUTEX(un)); 7492 un->un_swr_token = temp_token; 7493 } 7494 mutex_exit(SD_MUTEX(un)); 7495 } 7496 } 7497 if (got_semaphore_here != 0) { 7498 sema_v(&un->un_semoclose); 7499 } 7500 /* 7501 * On exit put the state back to it's original value 7502 * and broadcast to anyone waiting for the power 7503 * change completion. 7504 */ 7505 mutex_enter(SD_MUTEX(un)); 7506 un->un_state = state_before_pm; 7507 cv_broadcast(&un->un_suspend_cv); 7508 mutex_exit(SD_MUTEX(un)); 7509 7510 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7511 7512 return (rval); 7513 } 7514 7515 7516 7517 /* 7518 * Function: sdattach 7519 * 7520 * Description: Driver's attach(9e) entry point function. 7521 * 7522 * Arguments: devi - opaque device info handle 7523 * cmd - attach type 7524 * 7525 * Return Code: DDI_SUCCESS 7526 * DDI_FAILURE 7527 * 7528 * Context: Kernel thread context 7529 */ 7530 7531 static int 7532 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7533 { 7534 switch (cmd) { 7535 case DDI_ATTACH: 7536 return (sd_unit_attach(devi)); 7537 case DDI_RESUME: 7538 return (sd_ddi_resume(devi)); 7539 default: 7540 break; 7541 } 7542 return (DDI_FAILURE); 7543 } 7544 7545 7546 /* 7547 * Function: sddetach 7548 * 7549 * Description: Driver's detach(9E) entry point function. 7550 * 7551 * Arguments: devi - opaque device info handle 7552 * cmd - detach type 7553 * 7554 * Return Code: DDI_SUCCESS 7555 * DDI_FAILURE 7556 * 7557 * Context: Kernel thread context 7558 */ 7559 7560 static int 7561 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7562 { 7563 switch (cmd) { 7564 case DDI_DETACH: 7565 return (sd_unit_detach(devi)); 7566 case DDI_SUSPEND: 7567 return (sd_ddi_suspend(devi)); 7568 default: 7569 break; 7570 } 7571 return (DDI_FAILURE); 7572 } 7573 7574 7575 /* 7576 * Function: sd_sync_with_callback 7577 * 7578 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7579 * state while the callback routine is active. 7580 * 7581 * Arguments: un: softstate structure for the instance 7582 * 7583 * Context: Kernel thread context 7584 */ 7585 7586 static void 7587 sd_sync_with_callback(struct sd_lun *un) 7588 { 7589 ASSERT(un != NULL); 7590 7591 mutex_enter(SD_MUTEX(un)); 7592 7593 ASSERT(un->un_in_callback >= 0); 7594 7595 while (un->un_in_callback > 0) { 7596 mutex_exit(SD_MUTEX(un)); 7597 delay(2); 7598 mutex_enter(SD_MUTEX(un)); 7599 } 7600 7601 mutex_exit(SD_MUTEX(un)); 7602 } 7603 7604 /* 7605 * Function: sd_unit_attach 7606 * 7607 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7608 * the soft state structure for the device and performs 7609 * all necessary structure and device initializations. 7610 * 7611 * Arguments: devi: the system's dev_info_t for the device. 7612 * 7613 * Return Code: DDI_SUCCESS if attach is successful. 7614 * DDI_FAILURE if any part of the attach fails. 7615 * 7616 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7617 * Kernel thread context only. Can sleep. 7618 */ 7619 7620 static int 7621 sd_unit_attach(dev_info_t *devi) 7622 { 7623 struct scsi_device *devp; 7624 struct sd_lun *un; 7625 char *variantp; 7626 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7627 int instance; 7628 int rval; 7629 uint64_t capacity; 7630 uint_t lbasize; 7631 7632 /* 7633 * Retrieve the target driver's private data area. This was set 7634 * up by the HBA. 7635 */ 7636 devp = ddi_get_driver_private(devi); 7637 7638 /* 7639 * Since we have no idea what state things were left in by the last 7640 * user of the device, set up some 'default' settings, ie. turn 'em 7641 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7642 * Do this before the scsi_probe, which sends an inquiry. 7643 * This is a fix for bug (4430280). 7644 * Of special importance is wide-xfer. The drive could have been left 7645 * in wide transfer mode by the last driver to communicate with it, 7646 * this includes us. If that's the case, and if the following is not 7647 * setup properly or we don't re-negotiate with the drive prior to 7648 * transferring data to/from the drive, it causes bus parity errors, 7649 * data overruns, and unexpected interrupts. This first occurred when 7650 * the fix for bug (4378686) was made. 7651 */ 7652 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7653 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7654 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7655 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7656 7657 /* 7658 * Use scsi_probe() to issue an INQUIRY command to the device. 7659 * This call will allocate and fill in the scsi_inquiry structure 7660 * and point the sd_inq member of the scsi_device structure to it. 7661 * If the attach succeeds, then this memory will not be de-allocated 7662 * (via scsi_unprobe()) until the instance is detached. 7663 */ 7664 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7665 goto probe_failed; 7666 } 7667 7668 /* 7669 * Check the device type as specified in the inquiry data and 7670 * claim it if it is of a type that we support. 7671 */ 7672 switch (devp->sd_inq->inq_dtype) { 7673 case DTYPE_DIRECT: 7674 break; 7675 case DTYPE_RODIRECT: 7676 break; 7677 case DTYPE_OPTICAL: 7678 break; 7679 case DTYPE_NOTPRESENT: 7680 default: 7681 /* Unsupported device type; fail the attach. */ 7682 goto probe_failed; 7683 } 7684 7685 /* 7686 * Allocate the soft state structure for this unit. 7687 * 7688 * We rely upon this memory being set to all zeroes by 7689 * ddi_soft_state_zalloc(). We assume that any member of the 7690 * soft state structure that is not explicitly initialized by 7691 * this routine will have a value of zero. 7692 */ 7693 instance = ddi_get_instance(devp->sd_dev); 7694 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7695 goto probe_failed; 7696 } 7697 7698 /* 7699 * Retrieve a pointer to the newly-allocated soft state. 7700 * 7701 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7702 * was successful, unless something has gone horribly wrong and the 7703 * ddi's soft state internals are corrupt (in which case it is 7704 * probably better to halt here than just fail the attach....) 7705 */ 7706 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7707 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7708 instance); 7709 /*NOTREACHED*/ 7710 } 7711 7712 /* 7713 * Link the back ptr of the driver soft state to the scsi_device 7714 * struct for this lun. 7715 * Save a pointer to the softstate in the driver-private area of 7716 * the scsi_device struct. 7717 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7718 * we first set un->un_sd below. 7719 */ 7720 un->un_sd = devp; 7721 devp->sd_private = (opaque_t)un; 7722 7723 /* 7724 * The following must be after devp is stored in the soft state struct. 7725 */ 7726 #ifdef SDDEBUG 7727 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7728 "%s_unit_attach: un:0x%p instance:%d\n", 7729 ddi_driver_name(devi), un, instance); 7730 #endif 7731 7732 /* 7733 * Set up the device type and node type (for the minor nodes). 7734 * By default we assume that the device can at least support the 7735 * Common Command Set. Call it a CD-ROM if it reports itself 7736 * as a RODIRECT device. 7737 */ 7738 switch (devp->sd_inq->inq_dtype) { 7739 case DTYPE_RODIRECT: 7740 un->un_node_type = DDI_NT_CD_CHAN; 7741 un->un_ctype = CTYPE_CDROM; 7742 break; 7743 case DTYPE_OPTICAL: 7744 un->un_node_type = DDI_NT_BLOCK_CHAN; 7745 un->un_ctype = CTYPE_ROD; 7746 break; 7747 default: 7748 un->un_node_type = DDI_NT_BLOCK_CHAN; 7749 un->un_ctype = CTYPE_CCS; 7750 break; 7751 } 7752 7753 /* 7754 * Try to read the interconnect type from the HBA. 7755 * 7756 * Note: This driver is currently compiled as two binaries, a parallel 7757 * scsi version (sd) and a fibre channel version (ssd). All functional 7758 * differences are determined at compile time. In the future a single 7759 * binary will be provided and the inteconnect type will be used to 7760 * differentiate between fibre and parallel scsi behaviors. At that time 7761 * it will be necessary for all fibre channel HBAs to support this 7762 * property. 7763 * 7764 * set un_f_is_fiber to TRUE ( default fiber ) 7765 */ 7766 un->un_f_is_fibre = TRUE; 7767 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7768 case INTERCONNECT_SSA: 7769 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7770 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7771 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7772 break; 7773 case INTERCONNECT_PARALLEL: 7774 un->un_f_is_fibre = FALSE; 7775 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7776 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7777 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7778 break; 7779 case INTERCONNECT_FIBRE: 7780 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7781 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7782 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7783 break; 7784 case INTERCONNECT_FABRIC: 7785 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7786 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7787 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7788 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7789 break; 7790 default: 7791 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7792 /* 7793 * The HBA does not support the "interconnect-type" property 7794 * (or did not provide a recognized type). 7795 * 7796 * Note: This will be obsoleted when a single fibre channel 7797 * and parallel scsi driver is delivered. In the meantime the 7798 * interconnect type will be set to the platform default.If that 7799 * type is not parallel SCSI, it means that we should be 7800 * assuming "ssd" semantics. However, here this also means that 7801 * the FC HBA is not supporting the "interconnect-type" property 7802 * like we expect it to, so log this occurrence. 7803 */ 7804 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7805 if (!SD_IS_PARALLEL_SCSI(un)) { 7806 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7807 "sd_unit_attach: un:0x%p Assuming " 7808 "INTERCONNECT_FIBRE\n", un); 7809 } else { 7810 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7811 "sd_unit_attach: un:0x%p Assuming " 7812 "INTERCONNECT_PARALLEL\n", un); 7813 un->un_f_is_fibre = FALSE; 7814 } 7815 #else 7816 /* 7817 * Note: This source will be implemented when a single fibre 7818 * channel and parallel scsi driver is delivered. The default 7819 * will be to assume that if a device does not support the 7820 * "interconnect-type" property it is a parallel SCSI HBA and 7821 * we will set the interconnect type for parallel scsi. 7822 */ 7823 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7824 un->un_f_is_fibre = FALSE; 7825 #endif 7826 break; 7827 } 7828 7829 if (un->un_f_is_fibre == TRUE) { 7830 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7831 SCSI_VERSION_3) { 7832 switch (un->un_interconnect_type) { 7833 case SD_INTERCONNECT_FIBRE: 7834 case SD_INTERCONNECT_SSA: 7835 un->un_node_type = DDI_NT_BLOCK_WWN; 7836 break; 7837 default: 7838 break; 7839 } 7840 } 7841 } 7842 7843 /* 7844 * Initialize the Request Sense command for the target 7845 */ 7846 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7847 goto alloc_rqs_failed; 7848 } 7849 7850 /* 7851 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7852 * with seperate binary for sd and ssd. 7853 * 7854 * x86 has 1 binary, un_retry_count is set base on connection type. 7855 * The hardcoded values will go away when Sparc uses 1 binary 7856 * for sd and ssd. This hardcoded values need to match 7857 * SD_RETRY_COUNT in sddef.h 7858 * The value used is base on interconnect type. 7859 * fibre = 3, parallel = 5 7860 */ 7861 #if defined(__i386) || defined(__amd64) 7862 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7863 #else 7864 un->un_retry_count = SD_RETRY_COUNT; 7865 #endif 7866 7867 /* 7868 * Set the per disk retry count to the default number of retries 7869 * for disks and CDROMs. This value can be overridden by the 7870 * disk property list or an entry in sd.conf. 7871 */ 7872 un->un_notready_retry_count = 7873 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7874 : DISK_NOT_READY_RETRY_COUNT(un); 7875 7876 /* 7877 * Set the busy retry count to the default value of un_retry_count. 7878 * This can be overridden by entries in sd.conf or the device 7879 * config table. 7880 */ 7881 un->un_busy_retry_count = un->un_retry_count; 7882 7883 /* 7884 * Init the reset threshold for retries. This number determines 7885 * how many retries must be performed before a reset can be issued 7886 * (for certain error conditions). This can be overridden by entries 7887 * in sd.conf or the device config table. 7888 */ 7889 un->un_reset_retry_count = (un->un_retry_count / 2); 7890 7891 /* 7892 * Set the victim_retry_count to the default un_retry_count 7893 */ 7894 un->un_victim_retry_count = (2 * un->un_retry_count); 7895 7896 /* 7897 * Set the reservation release timeout to the default value of 7898 * 5 seconds. This can be overridden by entries in ssd.conf or the 7899 * device config table. 7900 */ 7901 un->un_reserve_release_time = 5; 7902 7903 /* 7904 * Set up the default maximum transfer size. Note that this may 7905 * get updated later in the attach, when setting up default wide 7906 * operations for disks. 7907 */ 7908 #if defined(__i386) || defined(__amd64) 7909 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7910 #else 7911 un->un_max_xfer_size = (uint_t)maxphys; 7912 #endif 7913 7914 /* 7915 * Get "allow bus device reset" property (defaults to "enabled" if 7916 * the property was not defined). This is to disable bus resets for 7917 * certain kinds of error recovery. Note: In the future when a run-time 7918 * fibre check is available the soft state flag should default to 7919 * enabled. 7920 */ 7921 if (un->un_f_is_fibre == TRUE) { 7922 un->un_f_allow_bus_device_reset = TRUE; 7923 } else { 7924 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7925 "allow-bus-device-reset", 1) != 0) { 7926 un->un_f_allow_bus_device_reset = TRUE; 7927 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7928 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 7929 un); 7930 } else { 7931 un->un_f_allow_bus_device_reset = FALSE; 7932 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7933 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 7934 un); 7935 } 7936 } 7937 7938 /* 7939 * Check if this is an ATAPI device. ATAPI devices use Group 1 7940 * Read/Write commands and Group 2 Mode Sense/Select commands. 7941 * 7942 * Note: The "obsolete" way of doing this is to check for the "atapi" 7943 * property. The new "variant" property with a value of "atapi" has been 7944 * introduced so that future 'variants' of standard SCSI behavior (like 7945 * atapi) could be specified by the underlying HBA drivers by supplying 7946 * a new value for the "variant" property, instead of having to define a 7947 * new property. 7948 */ 7949 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7950 un->un_f_cfg_is_atapi = TRUE; 7951 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7952 "sd_unit_attach: un:0x%p Atapi device\n", un); 7953 } 7954 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7955 &variantp) == DDI_PROP_SUCCESS) { 7956 if (strcmp(variantp, "atapi") == 0) { 7957 un->un_f_cfg_is_atapi = TRUE; 7958 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7959 "sd_unit_attach: un:0x%p Atapi device\n", un); 7960 } 7961 ddi_prop_free(variantp); 7962 } 7963 7964 /* 7965 * Assume doorlock commands are supported. If not, the first 7966 * call to sd_send_scsi_DOORLOCK() will set to FALSE 7967 */ 7968 un->un_f_doorlock_supported = TRUE; 7969 7970 un->un_cmd_timeout = SD_IO_TIME; 7971 7972 /* Info on current states, statuses, etc. (Updated frequently) */ 7973 un->un_state = SD_STATE_NORMAL; 7974 un->un_last_state = SD_STATE_NORMAL; 7975 7976 /* Control & status info for command throttling */ 7977 un->un_throttle = sd_max_throttle; 7978 un->un_saved_throttle = sd_max_throttle; 7979 un->un_min_throttle = sd_min_throttle; 7980 7981 if (un->un_f_is_fibre == TRUE) { 7982 un->un_f_use_adaptive_throttle = TRUE; 7983 } else { 7984 un->un_f_use_adaptive_throttle = FALSE; 7985 } 7986 7987 /* Removable media support. */ 7988 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7989 un->un_mediastate = DKIO_NONE; 7990 un->un_specified_mediastate = DKIO_NONE; 7991 7992 /* CVs for suspend/resume (PM or DR) */ 7993 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7994 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7995 7996 /* Power management support. */ 7997 un->un_power_level = SD_SPINDLE_UNINIT; 7998 7999 /* 8000 * The open/close semaphore is used to serialize threads executing 8001 * in the driver's open & close entry point routines for a given 8002 * instance. 8003 */ 8004 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 8005 8006 /* 8007 * The conf file entry and softstate variable is a forceful override, 8008 * meaning a non-zero value must be entered to change the default. 8009 */ 8010 un->un_f_disksort_disabled = FALSE; 8011 8012 /* 8013 * Retrieve the properties from the static driver table or the driver 8014 * configuration file (.conf) for this unit and update the soft state 8015 * for the device as needed for the indicated properties. 8016 * Note: the property configuration needs to occur here as some of the 8017 * following routines may have dependancies on soft state flags set 8018 * as part of the driver property configuration. 8019 */ 8020 sd_read_unit_properties(un); 8021 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8022 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 8023 8024 /* 8025 * By default, we mark the capacity, lbazize, and geometry 8026 * as invalid. Only if we successfully read a valid capacity 8027 * will we update the un_blockcount and un_tgt_blocksize with the 8028 * valid values (the geometry will be validated later). 8029 */ 8030 un->un_f_blockcount_is_valid = FALSE; 8031 un->un_f_tgt_blocksize_is_valid = FALSE; 8032 un->un_f_geometry_is_valid = FALSE; 8033 8034 /* 8035 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 8036 * otherwise. 8037 */ 8038 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 8039 un->un_blockcount = 0; 8040 8041 /* 8042 * Set up the per-instance info needed to determine the correct 8043 * CDBs and other info for issuing commands to the target. 8044 */ 8045 sd_init_cdb_limits(un); 8046 8047 /* 8048 * Set up the IO chains to use, based upon the target type. 8049 */ 8050 if (ISREMOVABLE(un)) { 8051 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 8052 } else { 8053 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 8054 } 8055 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 8056 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 8057 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 8058 8059 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 8060 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 8061 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 8062 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 8063 8064 8065 if (ISCD(un)) { 8066 un->un_additional_codes = sd_additional_codes; 8067 } else { 8068 un->un_additional_codes = NULL; 8069 } 8070 8071 /* 8072 * Create the kstats here so they can be available for attach-time 8073 * routines that send commands to the unit (either polled or via 8074 * sd_send_scsi_cmd). 8075 * 8076 * Note: This is a critical sequence that needs to be maintained: 8077 * 1) Instantiate the kstats here, before any routines using the 8078 * iopath (i.e. sd_send_scsi_cmd). 8079 * 2) Initialize the error stats (sd_set_errstats) and partition 8080 * stats (sd_set_pstats), following sd_validate_geometry(), 8081 * sd_register_devid(), and sd_disable_caching(). 8082 */ 8083 8084 un->un_stats = kstat_create(sd_label, instance, 8085 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 8086 if (un->un_stats != NULL) { 8087 un->un_stats->ks_lock = SD_MUTEX(un); 8088 kstat_install(un->un_stats); 8089 } 8090 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8091 "sd_unit_attach: un:0x%p un_stats created\n", un); 8092 8093 sd_create_errstats(un, instance); 8094 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8095 "sd_unit_attach: un:0x%p errstats created\n", un); 8096 8097 /* 8098 * The following if/else code was relocated here from below as part 8099 * of the fix for bug (4430280). However with the default setup added 8100 * on entry to this routine, it's no longer absolutely necessary for 8101 * this to be before the call to sd_spin_up_unit. 8102 */ 8103 if (SD_IS_PARALLEL_SCSI(un)) { 8104 /* 8105 * If SCSI-2 tagged queueing is supported by the target 8106 * and by the host adapter then we will enable it. 8107 */ 8108 un->un_tagflags = 0; 8109 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8110 (devp->sd_inq->inq_cmdque) && 8111 (un->un_f_arq_enabled == TRUE)) { 8112 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 8113 1, 1) == 1) { 8114 un->un_tagflags = FLAG_STAG; 8115 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8116 "sd_unit_attach: un:0x%p tag queueing " 8117 "enabled\n", un); 8118 } else if (scsi_ifgetcap(SD_ADDRESS(un), 8119 "untagged-qing", 0) == 1) { 8120 un->un_f_opt_queueing = TRUE; 8121 un->un_saved_throttle = un->un_throttle = 8122 min(un->un_throttle, 3); 8123 } else { 8124 un->un_f_opt_queueing = FALSE; 8125 un->un_saved_throttle = un->un_throttle = 1; 8126 } 8127 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 8128 == 1) && (un->un_f_arq_enabled == TRUE)) { 8129 /* The Host Adapter supports internal queueing. */ 8130 un->un_f_opt_queueing = TRUE; 8131 un->un_saved_throttle = un->un_throttle = 8132 min(un->un_throttle, 3); 8133 } else { 8134 un->un_f_opt_queueing = FALSE; 8135 un->un_saved_throttle = un->un_throttle = 1; 8136 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8137 "sd_unit_attach: un:0x%p no tag queueing\n", un); 8138 } 8139 8140 8141 /* Setup or tear down default wide operations for disks */ 8142 8143 /* 8144 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 8145 * and "ssd_max_xfer_size" to exist simultaneously on the same 8146 * system and be set to different values. In the future this 8147 * code may need to be updated when the ssd module is 8148 * obsoleted and removed from the system. (4299588) 8149 */ 8150 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8151 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 8152 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8153 1, 1) == 1) { 8154 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8155 "sd_unit_attach: un:0x%p Wide Transfer " 8156 "enabled\n", un); 8157 } 8158 8159 /* 8160 * If tagged queuing has also been enabled, then 8161 * enable large xfers 8162 */ 8163 if (un->un_saved_throttle == sd_max_throttle) { 8164 un->un_max_xfer_size = 8165 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8166 sd_max_xfer_size, SD_MAX_XFER_SIZE); 8167 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8168 "sd_unit_attach: un:0x%p max transfer " 8169 "size=0x%x\n", un, un->un_max_xfer_size); 8170 } 8171 } else { 8172 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8173 0, 1) == 1) { 8174 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8175 "sd_unit_attach: un:0x%p " 8176 "Wide Transfer disabled\n", un); 8177 } 8178 } 8179 } else { 8180 un->un_tagflags = FLAG_STAG; 8181 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 8182 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 8183 } 8184 8185 /* 8186 * If this target supports LUN reset, try to enable it. 8187 */ 8188 if (un->un_f_lun_reset_enabled) { 8189 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 8190 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8191 "un:0x%p lun_reset capability set\n", un); 8192 } else { 8193 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8194 "un:0x%p lun-reset capability not set\n", un); 8195 } 8196 } 8197 8198 /* 8199 * At this point in the attach, we have enough info in the 8200 * soft state to be able to issue commands to the target. 8201 * 8202 * All command paths used below MUST issue their commands as 8203 * SD_PATH_DIRECT. This is important as intermediate layers 8204 * are not all initialized yet (such as PM). 8205 */ 8206 8207 /* 8208 * Send a TEST UNIT READY command to the device. This should clear 8209 * any outstanding UNIT ATTENTION that may be present. 8210 * 8211 * Note: Don't check for success, just track if there is a reservation, 8212 * this is a throw away command to clear any unit attentions. 8213 * 8214 * Note: This MUST be the first command issued to the target during 8215 * attach to ensure power on UNIT ATTENTIONS are cleared. 8216 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8217 * with attempts at spinning up a device with no media. 8218 */ 8219 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 8220 reservation_flag = SD_TARGET_IS_RESERVED; 8221 } 8222 8223 /* 8224 * If the device is NOT a removable media device, attempt to spin 8225 * it up (using the START_STOP_UNIT command) and read its capacity 8226 * (using the READ CAPACITY command). Note, however, that either 8227 * of these could fail and in some cases we would continue with 8228 * the attach despite the failure (see below). 8229 */ 8230 if (devp->sd_inq->inq_dtype == DTYPE_DIRECT && !ISREMOVABLE(un)) { 8231 switch (sd_spin_up_unit(un)) { 8232 case 0: 8233 /* 8234 * Spin-up was successful; now try to read the 8235 * capacity. If successful then save the results 8236 * and mark the capacity & lbasize as valid. 8237 */ 8238 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8239 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8240 8241 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 8242 &lbasize, SD_PATH_DIRECT)) { 8243 case 0: { 8244 if (capacity > DK_MAX_BLOCKS) { 8245 #ifdef _LP64 8246 /* 8247 * Enable descriptor format sense data 8248 * so that we can get 64 bit sense 8249 * data fields. 8250 */ 8251 sd_enable_descr_sense(un); 8252 #else 8253 /* 32-bit kernels can't handle this */ 8254 scsi_log(SD_DEVINFO(un), 8255 sd_label, CE_WARN, 8256 "disk has %llu blocks, which " 8257 "is too large for a 32-bit " 8258 "kernel", capacity); 8259 goto spinup_failed; 8260 #endif 8261 } 8262 /* 8263 * The following relies on 8264 * sd_send_scsi_READ_CAPACITY never 8265 * returning 0 for capacity and/or lbasize. 8266 */ 8267 sd_update_block_info(un, lbasize, capacity); 8268 8269 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8270 "sd_unit_attach: un:0x%p capacity = %ld " 8271 "blocks; lbasize= %ld.\n", un, 8272 un->un_blockcount, un->un_tgt_blocksize); 8273 8274 break; 8275 } 8276 case EACCES: 8277 /* 8278 * Should never get here if the spin-up 8279 * succeeded, but code it in anyway. 8280 * From here, just continue with the attach... 8281 */ 8282 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8283 "sd_unit_attach: un:0x%p " 8284 "sd_send_scsi_READ_CAPACITY " 8285 "returned reservation conflict\n", un); 8286 reservation_flag = SD_TARGET_IS_RESERVED; 8287 break; 8288 default: 8289 /* 8290 * Likewise, should never get here if the 8291 * spin-up succeeded. Just continue with 8292 * the attach... 8293 */ 8294 break; 8295 } 8296 break; 8297 case EACCES: 8298 /* 8299 * Device is reserved by another host. In this case 8300 * we could not spin it up or read the capacity, but 8301 * we continue with the attach anyway. 8302 */ 8303 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8304 "sd_unit_attach: un:0x%p spin-up reservation " 8305 "conflict.\n", un); 8306 reservation_flag = SD_TARGET_IS_RESERVED; 8307 break; 8308 default: 8309 /* Fail the attach if the spin-up failed. */ 8310 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8311 "sd_unit_attach: un:0x%p spin-up failed.", un); 8312 goto spinup_failed; 8313 } 8314 } 8315 8316 /* 8317 * Check to see if this is a MMC drive 8318 */ 8319 if (ISCD(un)) { 8320 sd_set_mmc_caps(un); 8321 } 8322 8323 /* 8324 * Create the minor nodes for the device. 8325 * Note: If we want to support fdisk on both sparc and intel, this will 8326 * have to separate out the notion that VTOC8 is always sparc, and 8327 * VTOC16 is always intel (tho these can be the defaults). The vtoc 8328 * type will have to be determined at run-time, and the fdisk 8329 * partitioning will have to have been read & set up before we 8330 * create the minor nodes. (any other inits (such as kstats) that 8331 * also ought to be done before creating the minor nodes?) (Doesn't 8332 * setting up the minor nodes kind of imply that we're ready to 8333 * handle an open from userland?) 8334 */ 8335 if (sd_create_minor_nodes(un, devi) != DDI_SUCCESS) { 8336 goto create_minor_nodes_failed; 8337 } 8338 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8339 "sd_unit_attach: un:0x%p minor nodes created\n", un); 8340 8341 /* 8342 * Add a zero-length attribute to tell the world we support 8343 * kernel ioctls (for layered drivers) 8344 */ 8345 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8346 DDI_KERNEL_IOCTL, NULL, 0); 8347 8348 /* 8349 * Add a boolean property to tell the world we support 8350 * the B_FAILFAST flag (for layered drivers) 8351 */ 8352 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8353 "ddi-failfast-supported", NULL, 0); 8354 8355 /* 8356 * Initialize power management 8357 */ 8358 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8359 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8360 sd_setup_pm(un, devi); 8361 if (un->un_f_pm_is_enabled == FALSE) { 8362 /* 8363 * For performance, point to a jump table that does 8364 * not include pm. 8365 * The direct and priority chains don't change with PM. 8366 * 8367 * Note: this is currently done based on individual device 8368 * capabilities. When an interface for determining system 8369 * power enabled state becomes available, or when additional 8370 * layers are added to the command chain, these values will 8371 * have to be re-evaluated for correctness. 8372 */ 8373 if (ISREMOVABLE(un)) { 8374 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8375 } else { 8376 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8377 } 8378 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8379 } 8380 8381 /* 8382 * This property is set to 0 by HA software to avoid retries 8383 * on a reserved disk. (The preferred property name is 8384 * "retry-on-reservation-conflict") (1189689) 8385 * 8386 * Note: The use of a global here can have unintended consequences. A 8387 * per instance variable is preferrable to match the capabilities of 8388 * different underlying hba's (4402600) 8389 */ 8390 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8391 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8392 sd_retry_on_reservation_conflict); 8393 if (sd_retry_on_reservation_conflict != 0) { 8394 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8395 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8396 sd_retry_on_reservation_conflict); 8397 } 8398 8399 /* Set up options for QFULL handling. */ 8400 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8401 "qfull-retries", -1)) != -1) { 8402 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8403 rval, 1); 8404 } 8405 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8406 "qfull-retry-interval", -1)) != -1) { 8407 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8408 rval, 1); 8409 } 8410 8411 /* 8412 * This just prints a message that announces the existence of the 8413 * device. The message is always printed in the system logfile, but 8414 * only appears on the console if the system is booted with the 8415 * -v (verbose) argument. 8416 */ 8417 ddi_report_dev(devi); 8418 8419 /* 8420 * The framework calls driver attach routines single-threaded 8421 * for a given instance. However we still acquire SD_MUTEX here 8422 * because this required for calling the sd_validate_geometry() 8423 * and sd_register_devid() functions. 8424 */ 8425 mutex_enter(SD_MUTEX(un)); 8426 un->un_f_geometry_is_valid = FALSE; 8427 un->un_mediastate = DKIO_NONE; 8428 un->un_reserved = -1; 8429 if (!ISREMOVABLE(un)) { 8430 /* 8431 * Read and validate the device's geometry (ie, disk label) 8432 * A new unformatted drive will not have a valid geometry, but 8433 * the driver needs to successfully attach to this device so 8434 * the drive can be formatted via ioctls. 8435 */ 8436 if (((sd_validate_geometry(un, SD_PATH_DIRECT) == 8437 ENOTSUP)) && 8438 (un->un_blockcount < DK_MAX_BLOCKS)) { 8439 /* 8440 * We found a small disk with an EFI label on it; 8441 * we need to fix up the minor nodes accordingly. 8442 */ 8443 ddi_remove_minor_node(devi, "h"); 8444 ddi_remove_minor_node(devi, "h,raw"); 8445 (void) ddi_create_minor_node(devi, "wd", 8446 S_IFBLK, 8447 (instance << SDUNIT_SHIFT) | WD_NODE, 8448 un->un_node_type, NULL); 8449 (void) ddi_create_minor_node(devi, "wd,raw", 8450 S_IFCHR, 8451 (instance << SDUNIT_SHIFT) | WD_NODE, 8452 un->un_node_type, NULL); 8453 } 8454 } 8455 8456 /* 8457 * Read and initialize the devid for the unit. 8458 */ 8459 ASSERT(un->un_errstats != NULL); 8460 if (!ISREMOVABLE(un)) { 8461 sd_register_devid(un, devi, reservation_flag); 8462 } 8463 mutex_exit(SD_MUTEX(un)); 8464 8465 #if (defined(__fibre)) 8466 /* 8467 * Register callbacks for fibre only. You can't do this soley 8468 * on the basis of the devid_type because this is hba specific. 8469 * We need to query our hba capabilities to find out whether to 8470 * register or not. 8471 */ 8472 if (un->un_f_is_fibre) { 8473 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8474 sd_init_event_callbacks(un); 8475 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8476 "sd_unit_attach: un:0x%p event callbacks inserted", un); 8477 } 8478 } 8479 #endif 8480 8481 if (un->un_f_opt_disable_cache == TRUE) { 8482 if (sd_disable_caching(un) != 0) { 8483 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8484 "sd_unit_attach: un:0x%p Could not disable " 8485 "caching", un); 8486 goto devid_failed; 8487 } 8488 } 8489 8490 /* 8491 * Set the pstat and error stat values here, so data obtained during the 8492 * previous attach-time routines is available. 8493 * 8494 * Note: This is a critical sequence that needs to be maintained: 8495 * 1) Instantiate the kstats before any routines using the iopath 8496 * (i.e. sd_send_scsi_cmd). 8497 * 2) Initialize the error stats (sd_set_errstats) and partition 8498 * stats (sd_set_pstats)here, following sd_validate_geometry(), 8499 * sd_register_devid(), and sd_disable_caching(). 8500 */ 8501 if (!ISREMOVABLE(un) && (un->un_f_pkstats_enabled == TRUE)) { 8502 sd_set_pstats(un); 8503 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8504 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8505 } 8506 8507 sd_set_errstats(un); 8508 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8509 "sd_unit_attach: un:0x%p errstats set\n", un); 8510 8511 /* 8512 * Find out what type of reservation this disk supports. 8513 */ 8514 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 8515 case 0: 8516 /* 8517 * SCSI-3 reservations are supported. 8518 */ 8519 un->un_reservation_type = SD_SCSI3_RESERVATION; 8520 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8521 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8522 break; 8523 case ENOTSUP: 8524 /* 8525 * The PERSISTENT RESERVE IN command would not be recognized by 8526 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8527 */ 8528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8529 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8530 un->un_reservation_type = SD_SCSI2_RESERVATION; 8531 break; 8532 default: 8533 /* 8534 * default to SCSI-3 reservations 8535 */ 8536 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8537 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8538 un->un_reservation_type = SD_SCSI3_RESERVATION; 8539 break; 8540 } 8541 8542 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8543 "sd_unit_attach: un:0x%p exit success\n", un); 8544 8545 return (DDI_SUCCESS); 8546 8547 /* 8548 * An error occurred during the attach; clean up & return failure. 8549 */ 8550 8551 devid_failed: 8552 8553 setup_pm_failed: 8554 ddi_remove_minor_node(devi, NULL); 8555 8556 create_minor_nodes_failed: 8557 /* 8558 * Cleanup from the scsi_ifsetcap() calls (437868) 8559 */ 8560 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8561 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8562 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8563 8564 if (un->un_f_is_fibre == FALSE) { 8565 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8566 } 8567 8568 spinup_failed: 8569 8570 mutex_enter(SD_MUTEX(un)); 8571 8572 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8573 if (un->un_direct_priority_timeid != NULL) { 8574 timeout_id_t temp_id = un->un_direct_priority_timeid; 8575 un->un_direct_priority_timeid = NULL; 8576 mutex_exit(SD_MUTEX(un)); 8577 (void) untimeout(temp_id); 8578 mutex_enter(SD_MUTEX(un)); 8579 } 8580 8581 /* Cancel any pending start/stop timeouts */ 8582 if (un->un_startstop_timeid != NULL) { 8583 timeout_id_t temp_id = un->un_startstop_timeid; 8584 un->un_startstop_timeid = NULL; 8585 mutex_exit(SD_MUTEX(un)); 8586 (void) untimeout(temp_id); 8587 mutex_enter(SD_MUTEX(un)); 8588 } 8589 8590 /* Cancel any pending reset-throttle timeouts */ 8591 if (un->un_reset_throttle_timeid != NULL) { 8592 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8593 un->un_reset_throttle_timeid = NULL; 8594 mutex_exit(SD_MUTEX(un)); 8595 (void) untimeout(temp_id); 8596 mutex_enter(SD_MUTEX(un)); 8597 } 8598 8599 /* Cancel any pending retry timeouts */ 8600 if (un->un_retry_timeid != NULL) { 8601 timeout_id_t temp_id = un->un_retry_timeid; 8602 un->un_retry_timeid = NULL; 8603 mutex_exit(SD_MUTEX(un)); 8604 (void) untimeout(temp_id); 8605 mutex_enter(SD_MUTEX(un)); 8606 } 8607 8608 /* Cancel any pending delayed cv broadcast timeouts */ 8609 if (un->un_dcvb_timeid != NULL) { 8610 timeout_id_t temp_id = un->un_dcvb_timeid; 8611 un->un_dcvb_timeid = NULL; 8612 mutex_exit(SD_MUTEX(un)); 8613 (void) untimeout(temp_id); 8614 mutex_enter(SD_MUTEX(un)); 8615 } 8616 8617 mutex_exit(SD_MUTEX(un)); 8618 8619 /* There should not be any in-progress I/O so ASSERT this check */ 8620 ASSERT(un->un_ncmds_in_transport == 0); 8621 ASSERT(un->un_ncmds_in_driver == 0); 8622 8623 /* Do not free the softstate if the callback routine is active */ 8624 sd_sync_with_callback(un); 8625 8626 /* 8627 * Partition stats apparently are not used with removables. These would 8628 * not have been created during attach, so no need to clean them up... 8629 */ 8630 if (un->un_stats != NULL) { 8631 kstat_delete(un->un_stats); 8632 un->un_stats = NULL; 8633 } 8634 if (un->un_errstats != NULL) { 8635 kstat_delete(un->un_errstats); 8636 un->un_errstats = NULL; 8637 } 8638 8639 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8640 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8641 8642 ddi_prop_remove_all(devi); 8643 sema_destroy(&un->un_semoclose); 8644 cv_destroy(&un->un_state_cv); 8645 8646 getrbuf_failed: 8647 8648 sd_free_rqs(un); 8649 8650 alloc_rqs_failed: 8651 8652 devp->sd_private = NULL; 8653 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8654 8655 get_softstate_failed: 8656 /* 8657 * Note: the man pages are unclear as to whether or not doing a 8658 * ddi_soft_state_free(sd_state, instance) is the right way to 8659 * clean up after the ddi_soft_state_zalloc() if the subsequent 8660 * ddi_get_soft_state() fails. The implication seems to be 8661 * that the get_soft_state cannot fail if the zalloc succeeds. 8662 */ 8663 ddi_soft_state_free(sd_state, instance); 8664 8665 probe_failed: 8666 scsi_unprobe(devp); 8667 #ifdef SDDEBUG 8668 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 8669 (sd_level_mask & SD_LOGMASK_TRACE)) { 8670 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 8671 (void *)un); 8672 } 8673 #endif 8674 return (DDI_FAILURE); 8675 } 8676 8677 8678 /* 8679 * Function: sd_unit_detach 8680 * 8681 * Description: Performs DDI_DETACH processing for sddetach(). 8682 * 8683 * Return Code: DDI_SUCCESS 8684 * DDI_FAILURE 8685 * 8686 * Context: Kernel thread context 8687 */ 8688 8689 static int 8690 sd_unit_detach(dev_info_t *devi) 8691 { 8692 struct scsi_device *devp; 8693 struct sd_lun *un; 8694 int i; 8695 dev_t dev; 8696 #if !(defined(__i386) || defined(__amd64)) && !defined(__fibre) 8697 int reset_retval; 8698 #endif 8699 int instance = ddi_get_instance(devi); 8700 8701 mutex_enter(&sd_detach_mutex); 8702 8703 /* 8704 * Fail the detach for any of the following: 8705 * - Unable to get the sd_lun struct for the instance 8706 * - A layered driver has an outstanding open on the instance 8707 * - Another thread is already detaching this instance 8708 * - Another thread is currently performing an open 8709 */ 8710 devp = ddi_get_driver_private(devi); 8711 if ((devp == NULL) || 8712 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8713 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8714 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8715 mutex_exit(&sd_detach_mutex); 8716 return (DDI_FAILURE); 8717 } 8718 8719 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8720 8721 /* 8722 * Mark this instance as currently in a detach, to inhibit any 8723 * opens from a layered driver. 8724 */ 8725 un->un_detach_count++; 8726 mutex_exit(&sd_detach_mutex); 8727 8728 dev = sd_make_device(SD_DEVINFO(un)); 8729 8730 _NOTE(COMPETING_THREADS_NOW); 8731 8732 mutex_enter(SD_MUTEX(un)); 8733 8734 /* 8735 * Fail the detach if there are any outstanding layered 8736 * opens on this device. 8737 */ 8738 for (i = 0; i < NDKMAP; i++) { 8739 if (un->un_ocmap.lyropen[i] != 0) { 8740 goto err_notclosed; 8741 } 8742 } 8743 8744 /* 8745 * Verify there are NO outstanding commands issued to this device. 8746 * ie, un_ncmds_in_transport == 0. 8747 * It's possible to have outstanding commands through the physio 8748 * code path, even though everything's closed. 8749 */ 8750 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8751 (un->un_direct_priority_timeid != NULL) || 8752 (un->un_state == SD_STATE_RWAIT)) { 8753 mutex_exit(SD_MUTEX(un)); 8754 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8755 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8756 goto err_stillbusy; 8757 } 8758 8759 /* 8760 * If we have the device reserved, release the reservation. 8761 */ 8762 if ((un->un_resvd_status & SD_RESERVE) && 8763 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8764 mutex_exit(SD_MUTEX(un)); 8765 /* 8766 * Note: sd_reserve_release sends a command to the device 8767 * via the sd_ioctlcmd() path, and can sleep. 8768 */ 8769 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8770 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8771 "sd_dr_detach: Cannot release reservation \n"); 8772 } 8773 } else { 8774 mutex_exit(SD_MUTEX(un)); 8775 } 8776 8777 /* 8778 * Untimeout any reserve recover, throttle reset, restart unit 8779 * and delayed broadcast timeout threads. Protect the timeout pointer 8780 * from getting nulled by their callback functions. 8781 */ 8782 mutex_enter(SD_MUTEX(un)); 8783 if (un->un_resvd_timeid != NULL) { 8784 timeout_id_t temp_id = un->un_resvd_timeid; 8785 un->un_resvd_timeid = NULL; 8786 mutex_exit(SD_MUTEX(un)); 8787 (void) untimeout(temp_id); 8788 mutex_enter(SD_MUTEX(un)); 8789 } 8790 8791 if (un->un_reset_throttle_timeid != NULL) { 8792 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8793 un->un_reset_throttle_timeid = NULL; 8794 mutex_exit(SD_MUTEX(un)); 8795 (void) untimeout(temp_id); 8796 mutex_enter(SD_MUTEX(un)); 8797 } 8798 8799 if (un->un_startstop_timeid != NULL) { 8800 timeout_id_t temp_id = un->un_startstop_timeid; 8801 un->un_startstop_timeid = NULL; 8802 mutex_exit(SD_MUTEX(un)); 8803 (void) untimeout(temp_id); 8804 mutex_enter(SD_MUTEX(un)); 8805 } 8806 8807 if (un->un_dcvb_timeid != NULL) { 8808 timeout_id_t temp_id = un->un_dcvb_timeid; 8809 un->un_dcvb_timeid = NULL; 8810 mutex_exit(SD_MUTEX(un)); 8811 (void) untimeout(temp_id); 8812 } else { 8813 mutex_exit(SD_MUTEX(un)); 8814 } 8815 8816 /* Remove any pending reservation reclaim requests for this device */ 8817 sd_rmv_resv_reclaim_req(dev); 8818 8819 mutex_enter(SD_MUTEX(un)); 8820 8821 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8822 if (un->un_direct_priority_timeid != NULL) { 8823 timeout_id_t temp_id = un->un_direct_priority_timeid; 8824 un->un_direct_priority_timeid = NULL; 8825 mutex_exit(SD_MUTEX(un)); 8826 (void) untimeout(temp_id); 8827 mutex_enter(SD_MUTEX(un)); 8828 } 8829 8830 /* Cancel any active multi-host disk watch thread requests */ 8831 if (un->un_mhd_token != NULL) { 8832 mutex_exit(SD_MUTEX(un)); 8833 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8834 if (scsi_watch_request_terminate(un->un_mhd_token, 8835 SCSI_WATCH_TERMINATE_NOWAIT)) { 8836 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8837 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8838 /* 8839 * Note: We are returning here after having removed 8840 * some driver timeouts above. This is consistent with 8841 * the legacy implementation but perhaps the watch 8842 * terminate call should be made with the wait flag set. 8843 */ 8844 goto err_stillbusy; 8845 } 8846 mutex_enter(SD_MUTEX(un)); 8847 un->un_mhd_token = NULL; 8848 } 8849 8850 if (un->un_swr_token != NULL) { 8851 mutex_exit(SD_MUTEX(un)); 8852 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8853 if (scsi_watch_request_terminate(un->un_swr_token, 8854 SCSI_WATCH_TERMINATE_NOWAIT)) { 8855 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8856 "sd_dr_detach: Cannot cancel swr watch request\n"); 8857 /* 8858 * Note: We are returning here after having removed 8859 * some driver timeouts above. This is consistent with 8860 * the legacy implementation but perhaps the watch 8861 * terminate call should be made with the wait flag set. 8862 */ 8863 goto err_stillbusy; 8864 } 8865 mutex_enter(SD_MUTEX(un)); 8866 un->un_swr_token = NULL; 8867 } 8868 8869 mutex_exit(SD_MUTEX(un)); 8870 8871 /* 8872 * Clear any scsi_reset_notifies. We clear the reset notifies 8873 * if we have not registered one. 8874 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8875 */ 8876 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8877 sd_mhd_reset_notify_cb, (caddr_t)un); 8878 8879 8880 8881 #if defined(__i386) || defined(__amd64) 8882 /* 8883 * Gratuitous bus resets sometimes cause an otherwise 8884 * okay ATA/ATAPI bus to hang. This is due the lack of 8885 * a clear spec of how resets should be implemented by ATA 8886 * disk drives. 8887 */ 8888 #elif !defined(__fibre) /* "#else if" does NOT work! */ 8889 /* 8890 * Reset target/bus. 8891 * 8892 * Note: This is a legacy workaround for Elite III dual-port drives that 8893 * will not come online after an aborted detach and subsequent re-attach 8894 * It should be removed when the Elite III FW is fixed, or the drives 8895 * are no longer supported. 8896 */ 8897 if (un->un_f_cfg_is_atapi == FALSE) { 8898 reset_retval = 0; 8899 8900 /* If the device is in low power mode don't reset it */ 8901 8902 mutex_enter(&un->un_pm_mutex); 8903 if (!SD_DEVICE_IS_IN_LOW_POWER(un)) { 8904 /* 8905 * First try a LUN reset if we can, then move on to a 8906 * target reset if needed; swat the bus as a last 8907 * resort. 8908 */ 8909 mutex_exit(&un->un_pm_mutex); 8910 if (un->un_f_allow_bus_device_reset == TRUE) { 8911 if (un->un_f_lun_reset_enabled == TRUE) { 8912 reset_retval = 8913 scsi_reset(SD_ADDRESS(un), 8914 RESET_LUN); 8915 } 8916 if (reset_retval == 0) { 8917 reset_retval = 8918 scsi_reset(SD_ADDRESS(un), 8919 RESET_TARGET); 8920 } 8921 } 8922 if (reset_retval == 0) { 8923 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 8924 } 8925 } else { 8926 mutex_exit(&un->un_pm_mutex); 8927 } 8928 } 8929 #endif 8930 8931 /* 8932 * protect the timeout pointers from getting nulled by 8933 * their callback functions during the cancellation process. 8934 * In such a scenario untimeout can be invoked with a null value. 8935 */ 8936 _NOTE(NO_COMPETING_THREADS_NOW); 8937 8938 mutex_enter(&un->un_pm_mutex); 8939 if (un->un_pm_idle_timeid != NULL) { 8940 timeout_id_t temp_id = un->un_pm_idle_timeid; 8941 un->un_pm_idle_timeid = NULL; 8942 mutex_exit(&un->un_pm_mutex); 8943 8944 /* 8945 * Timeout is active; cancel it. 8946 * Note that it'll never be active on a device 8947 * that does not support PM therefore we don't 8948 * have to check before calling pm_idle_component. 8949 */ 8950 (void) untimeout(temp_id); 8951 (void) pm_idle_component(SD_DEVINFO(un), 0); 8952 mutex_enter(&un->un_pm_mutex); 8953 } 8954 8955 /* 8956 * Check whether there is already a timeout scheduled for power 8957 * management. If yes then don't lower the power here, that's. 8958 * the timeout handler's job. 8959 */ 8960 if (un->un_pm_timeid != NULL) { 8961 timeout_id_t temp_id = un->un_pm_timeid; 8962 un->un_pm_timeid = NULL; 8963 mutex_exit(&un->un_pm_mutex); 8964 /* 8965 * Timeout is active; cancel it. 8966 * Note that it'll never be active on a device 8967 * that does not support PM therefore we don't 8968 * have to check before calling pm_idle_component. 8969 */ 8970 (void) untimeout(temp_id); 8971 (void) pm_idle_component(SD_DEVINFO(un), 0); 8972 8973 } else { 8974 mutex_exit(&un->un_pm_mutex); 8975 if ((un->un_f_pm_is_enabled == TRUE) && 8976 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8977 DDI_SUCCESS)) { 8978 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8979 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8980 /* 8981 * Fix for bug: 4297749, item # 13 8982 * The above test now includes a check to see if PM is 8983 * supported by this device before call 8984 * pm_lower_power(). 8985 * Note, the following is not dead code. The call to 8986 * pm_lower_power above will generate a call back into 8987 * our sdpower routine which might result in a timeout 8988 * handler getting activated. Therefore the following 8989 * code is valid and necessary. 8990 */ 8991 mutex_enter(&un->un_pm_mutex); 8992 if (un->un_pm_timeid != NULL) { 8993 timeout_id_t temp_id = un->un_pm_timeid; 8994 un->un_pm_timeid = NULL; 8995 mutex_exit(&un->un_pm_mutex); 8996 (void) untimeout(temp_id); 8997 (void) pm_idle_component(SD_DEVINFO(un), 0); 8998 } else { 8999 mutex_exit(&un->un_pm_mutex); 9000 } 9001 } 9002 } 9003 9004 /* 9005 * Cleanup from the scsi_ifsetcap() calls (437868) 9006 * Relocated here from above to be after the call to 9007 * pm_lower_power, which was getting errors. 9008 */ 9009 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 9010 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 9011 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 9012 9013 if (un->un_f_is_fibre == FALSE) { 9014 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 9015 } 9016 9017 /* 9018 * Remove any event callbacks, fibre only 9019 */ 9020 if (un->un_f_is_fibre == TRUE) { 9021 if ((un->un_insert_event != NULL) && 9022 (ddi_remove_event_handler(un->un_insert_cb_id) != 9023 DDI_SUCCESS)) { 9024 /* 9025 * Note: We are returning here after having done 9026 * substantial cleanup above. This is consistent 9027 * with the legacy implementation but this may not 9028 * be the right thing to do. 9029 */ 9030 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9031 "sd_dr_detach: Cannot cancel insert event\n"); 9032 goto err_remove_event; 9033 } 9034 un->un_insert_event = NULL; 9035 9036 if ((un->un_remove_event != NULL) && 9037 (ddi_remove_event_handler(un->un_remove_cb_id) != 9038 DDI_SUCCESS)) { 9039 /* 9040 * Note: We are returning here after having done 9041 * substantial cleanup above. This is consistent 9042 * with the legacy implementation but this may not 9043 * be the right thing to do. 9044 */ 9045 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9046 "sd_dr_detach: Cannot cancel remove event\n"); 9047 goto err_remove_event; 9048 } 9049 un->un_remove_event = NULL; 9050 } 9051 9052 /* Do not free the softstate if the callback routine is active */ 9053 sd_sync_with_callback(un); 9054 9055 /* 9056 * Hold the detach mutex here, to make sure that no other threads ever 9057 * can access a (partially) freed soft state structure. 9058 */ 9059 mutex_enter(&sd_detach_mutex); 9060 9061 /* 9062 * Clean up the soft state struct. 9063 * Cleanup is done in reverse order of allocs/inits. 9064 * At this point there should be no competing threads anymore. 9065 */ 9066 9067 /* Unregister and free device id. */ 9068 ddi_devid_unregister(devi); 9069 if (un->un_devid) { 9070 ddi_devid_free(un->un_devid); 9071 un->un_devid = NULL; 9072 } 9073 9074 /* 9075 * Destroy wmap cache if it exists. 9076 */ 9077 if (un->un_wm_cache != NULL) { 9078 kmem_cache_destroy(un->un_wm_cache); 9079 un->un_wm_cache = NULL; 9080 } 9081 9082 /* Remove minor nodes */ 9083 ddi_remove_minor_node(devi, NULL); 9084 9085 /* 9086 * kstat cleanup is done in detach for all device types (4363169). 9087 * We do not want to fail detach if the device kstats are not deleted 9088 * since there is a confusion about the devo_refcnt for the device. 9089 * We just delete the kstats and let detach complete successfully. 9090 */ 9091 if (un->un_stats != NULL) { 9092 kstat_delete(un->un_stats); 9093 un->un_stats = NULL; 9094 } 9095 if (un->un_errstats != NULL) { 9096 kstat_delete(un->un_errstats); 9097 un->un_errstats = NULL; 9098 } 9099 9100 /* Remove partition stats (not created for removables) */ 9101 if (!ISREMOVABLE(un)) { 9102 for (i = 0; i < NSDMAP; i++) { 9103 if (un->un_pstats[i] != NULL) { 9104 kstat_delete(un->un_pstats[i]); 9105 un->un_pstats[i] = NULL; 9106 } 9107 } 9108 } 9109 9110 /* Remove xbuf registration */ 9111 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 9112 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 9113 9114 /* Remove driver properties */ 9115 ddi_prop_remove_all(devi); 9116 9117 mutex_destroy(&un->un_pm_mutex); 9118 cv_destroy(&un->un_pm_busy_cv); 9119 9120 /* Open/close semaphore */ 9121 sema_destroy(&un->un_semoclose); 9122 9123 /* Removable media condvar. */ 9124 cv_destroy(&un->un_state_cv); 9125 9126 /* Suspend/resume condvar. */ 9127 cv_destroy(&un->un_suspend_cv); 9128 cv_destroy(&un->un_disk_busy_cv); 9129 9130 sd_free_rqs(un); 9131 9132 /* Free up soft state */ 9133 devp->sd_private = NULL; 9134 bzero(un, sizeof (struct sd_lun)); 9135 ddi_soft_state_free(sd_state, instance); 9136 9137 mutex_exit(&sd_detach_mutex); 9138 9139 /* This frees up the INQUIRY data associated with the device. */ 9140 scsi_unprobe(devp); 9141 9142 return (DDI_SUCCESS); 9143 9144 err_notclosed: 9145 mutex_exit(SD_MUTEX(un)); 9146 9147 err_stillbusy: 9148 _NOTE(NO_COMPETING_THREADS_NOW); 9149 9150 err_remove_event: 9151 mutex_enter(&sd_detach_mutex); 9152 un->un_detach_count--; 9153 mutex_exit(&sd_detach_mutex); 9154 9155 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9156 return (DDI_FAILURE); 9157 } 9158 9159 9160 /* 9161 * Driver minor node structure and data table 9162 */ 9163 struct driver_minor_data { 9164 char *name; 9165 minor_t minor; 9166 int type; 9167 }; 9168 9169 static struct driver_minor_data sd_minor_data[] = { 9170 {"a", 0, S_IFBLK}, 9171 {"b", 1, S_IFBLK}, 9172 {"c", 2, S_IFBLK}, 9173 {"d", 3, S_IFBLK}, 9174 {"e", 4, S_IFBLK}, 9175 {"f", 5, S_IFBLK}, 9176 {"g", 6, S_IFBLK}, 9177 {"h", 7, S_IFBLK}, 9178 #if defined(_SUNOS_VTOC_16) 9179 {"i", 8, S_IFBLK}, 9180 {"j", 9, S_IFBLK}, 9181 {"k", 10, S_IFBLK}, 9182 {"l", 11, S_IFBLK}, 9183 {"m", 12, S_IFBLK}, 9184 {"n", 13, S_IFBLK}, 9185 {"o", 14, S_IFBLK}, 9186 {"p", 15, S_IFBLK}, 9187 #endif /* defined(_SUNOS_VTOC_16) */ 9188 #if defined(_FIRMWARE_NEEDS_FDISK) 9189 {"q", 16, S_IFBLK}, 9190 {"r", 17, S_IFBLK}, 9191 {"s", 18, S_IFBLK}, 9192 {"t", 19, S_IFBLK}, 9193 {"u", 20, S_IFBLK}, 9194 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9195 {"a,raw", 0, S_IFCHR}, 9196 {"b,raw", 1, S_IFCHR}, 9197 {"c,raw", 2, S_IFCHR}, 9198 {"d,raw", 3, S_IFCHR}, 9199 {"e,raw", 4, S_IFCHR}, 9200 {"f,raw", 5, S_IFCHR}, 9201 {"g,raw", 6, S_IFCHR}, 9202 {"h,raw", 7, S_IFCHR}, 9203 #if defined(_SUNOS_VTOC_16) 9204 {"i,raw", 8, S_IFCHR}, 9205 {"j,raw", 9, S_IFCHR}, 9206 {"k,raw", 10, S_IFCHR}, 9207 {"l,raw", 11, S_IFCHR}, 9208 {"m,raw", 12, S_IFCHR}, 9209 {"n,raw", 13, S_IFCHR}, 9210 {"o,raw", 14, S_IFCHR}, 9211 {"p,raw", 15, S_IFCHR}, 9212 #endif /* defined(_SUNOS_VTOC_16) */ 9213 #if defined(_FIRMWARE_NEEDS_FDISK) 9214 {"q,raw", 16, S_IFCHR}, 9215 {"r,raw", 17, S_IFCHR}, 9216 {"s,raw", 18, S_IFCHR}, 9217 {"t,raw", 19, S_IFCHR}, 9218 {"u,raw", 20, S_IFCHR}, 9219 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9220 {0} 9221 }; 9222 9223 static struct driver_minor_data sd_minor_data_efi[] = { 9224 {"a", 0, S_IFBLK}, 9225 {"b", 1, S_IFBLK}, 9226 {"c", 2, S_IFBLK}, 9227 {"d", 3, S_IFBLK}, 9228 {"e", 4, S_IFBLK}, 9229 {"f", 5, S_IFBLK}, 9230 {"g", 6, S_IFBLK}, 9231 {"wd", 7, S_IFBLK}, 9232 #if defined(_FIRMWARE_NEEDS_FDISK) 9233 {"q", 16, S_IFBLK}, 9234 {"r", 17, S_IFBLK}, 9235 {"s", 18, S_IFBLK}, 9236 {"t", 19, S_IFBLK}, 9237 {"u", 20, S_IFBLK}, 9238 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9239 {"a,raw", 0, S_IFCHR}, 9240 {"b,raw", 1, S_IFCHR}, 9241 {"c,raw", 2, S_IFCHR}, 9242 {"d,raw", 3, S_IFCHR}, 9243 {"e,raw", 4, S_IFCHR}, 9244 {"f,raw", 5, S_IFCHR}, 9245 {"g,raw", 6, S_IFCHR}, 9246 {"wd,raw", 7, S_IFCHR}, 9247 #if defined(_FIRMWARE_NEEDS_FDISK) 9248 {"q,raw", 16, S_IFCHR}, 9249 {"r,raw", 17, S_IFCHR}, 9250 {"s,raw", 18, S_IFCHR}, 9251 {"t,raw", 19, S_IFCHR}, 9252 {"u,raw", 20, S_IFCHR}, 9253 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9254 {0} 9255 }; 9256 9257 9258 /* 9259 * Function: sd_create_minor_nodes 9260 * 9261 * Description: Create the minor device nodes for the instance. 9262 * 9263 * Arguments: un - driver soft state (unit) structure 9264 * devi - pointer to device info structure 9265 * 9266 * Return Code: DDI_SUCCESS 9267 * DDI_FAILURE 9268 * 9269 * Context: Kernel thread context 9270 */ 9271 9272 static int 9273 sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi) 9274 { 9275 struct driver_minor_data *dmdp; 9276 struct scsi_device *devp; 9277 int instance; 9278 char name[48]; 9279 9280 ASSERT(un != NULL); 9281 devp = ddi_get_driver_private(devi); 9282 instance = ddi_get_instance(devp->sd_dev); 9283 9284 /* 9285 * Create all the minor nodes for this target. 9286 */ 9287 if (un->un_blockcount > DK_MAX_BLOCKS) 9288 dmdp = sd_minor_data_efi; 9289 else 9290 dmdp = sd_minor_data; 9291 while (dmdp->name != NULL) { 9292 9293 (void) sprintf(name, "%s", dmdp->name); 9294 9295 if (ddi_create_minor_node(devi, name, dmdp->type, 9296 (instance << SDUNIT_SHIFT) | dmdp->minor, 9297 un->un_node_type, NULL) == DDI_FAILURE) { 9298 /* 9299 * Clean up any nodes that may have been created, in 9300 * case this fails in the middle of the loop. 9301 */ 9302 ddi_remove_minor_node(devi, NULL); 9303 return (DDI_FAILURE); 9304 } 9305 dmdp++; 9306 } 9307 9308 return (DDI_SUCCESS); 9309 } 9310 9311 9312 /* 9313 * Function: sd_create_errstats 9314 * 9315 * Description: This routine instantiates the device error stats. 9316 * 9317 * Note: During attach the stats are instantiated first so they are 9318 * available for attach-time routines that utilize the driver 9319 * iopath to send commands to the device. The stats are initialized 9320 * separately so data obtained during some attach-time routines is 9321 * available. (4362483) 9322 * 9323 * Arguments: un - driver soft state (unit) structure 9324 * instance - driver instance 9325 * 9326 * Context: Kernel thread context 9327 */ 9328 9329 static void 9330 sd_create_errstats(struct sd_lun *un, int instance) 9331 { 9332 struct sd_errstats *stp; 9333 char kstatmodule_err[KSTAT_STRLEN]; 9334 char kstatname[KSTAT_STRLEN]; 9335 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9336 9337 ASSERT(un != NULL); 9338 9339 if (un->un_errstats != NULL) { 9340 return; 9341 } 9342 9343 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9344 "%serr", sd_label); 9345 (void) snprintf(kstatname, sizeof (kstatname), 9346 "%s%d,err", sd_label, instance); 9347 9348 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9349 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9350 9351 if (un->un_errstats == NULL) { 9352 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9353 "sd_create_errstats: Failed kstat_create\n"); 9354 return; 9355 } 9356 9357 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9358 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9359 KSTAT_DATA_UINT32); 9360 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9361 KSTAT_DATA_UINT32); 9362 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9363 KSTAT_DATA_UINT32); 9364 kstat_named_init(&stp->sd_vid, "Vendor", 9365 KSTAT_DATA_CHAR); 9366 kstat_named_init(&stp->sd_pid, "Product", 9367 KSTAT_DATA_CHAR); 9368 kstat_named_init(&stp->sd_revision, "Revision", 9369 KSTAT_DATA_CHAR); 9370 kstat_named_init(&stp->sd_serial, "Serial No", 9371 KSTAT_DATA_CHAR); 9372 kstat_named_init(&stp->sd_capacity, "Size", 9373 KSTAT_DATA_ULONGLONG); 9374 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9375 KSTAT_DATA_UINT32); 9376 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9377 KSTAT_DATA_UINT32); 9378 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9379 KSTAT_DATA_UINT32); 9380 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9381 KSTAT_DATA_UINT32); 9382 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9383 KSTAT_DATA_UINT32); 9384 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9385 KSTAT_DATA_UINT32); 9386 9387 un->un_errstats->ks_private = un; 9388 un->un_errstats->ks_update = nulldev; 9389 9390 kstat_install(un->un_errstats); 9391 } 9392 9393 9394 /* 9395 * Function: sd_set_errstats 9396 * 9397 * Description: This routine sets the value of the vendor id, product id, 9398 * revision, serial number, and capacity device error stats. 9399 * 9400 * Note: During attach the stats are instantiated first so they are 9401 * available for attach-time routines that utilize the driver 9402 * iopath to send commands to the device. The stats are initialized 9403 * separately so data obtained during some attach-time routines is 9404 * available. (4362483) 9405 * 9406 * Arguments: un - driver soft state (unit) structure 9407 * 9408 * Context: Kernel thread context 9409 */ 9410 9411 static void 9412 sd_set_errstats(struct sd_lun *un) 9413 { 9414 struct sd_errstats *stp; 9415 9416 ASSERT(un != NULL); 9417 ASSERT(un->un_errstats != NULL); 9418 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9419 ASSERT(stp != NULL); 9420 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9421 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9422 (void) strncpy(stp->sd_revision.value.c, 9423 un->un_sd->sd_inq->inq_revision, 4); 9424 9425 /* 9426 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9427 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9428 * (4376302)) 9429 */ 9430 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9431 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9432 sizeof (SD_INQUIRY(un)->inq_serial)); 9433 } 9434 9435 if (un->un_f_blockcount_is_valid != TRUE) { 9436 /* 9437 * Set capacity error stat to 0 for no media. This ensures 9438 * a valid capacity is displayed in response to 'iostat -E' 9439 * when no media is present in the device. 9440 */ 9441 stp->sd_capacity.value.ui64 = 0; 9442 } else { 9443 /* 9444 * Multiply un_blockcount by un->un_sys_blocksize to get 9445 * capacity. 9446 * 9447 * Note: for non-512 blocksize devices "un_blockcount" has been 9448 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9449 * (un_tgt_blocksize / un->un_sys_blocksize). 9450 */ 9451 stp->sd_capacity.value.ui64 = (uint64_t) 9452 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9453 } 9454 } 9455 9456 9457 /* 9458 * Function: sd_set_pstats 9459 * 9460 * Description: This routine instantiates and initializes the partition 9461 * stats for each partition with more than zero blocks. 9462 * (4363169) 9463 * 9464 * Arguments: un - driver soft state (unit) structure 9465 * 9466 * Context: Kernel thread context 9467 */ 9468 9469 static void 9470 sd_set_pstats(struct sd_lun *un) 9471 { 9472 char kstatname[KSTAT_STRLEN]; 9473 int instance; 9474 int i; 9475 9476 ASSERT(un != NULL); 9477 9478 instance = ddi_get_instance(SD_DEVINFO(un)); 9479 9480 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9481 for (i = 0; i < NSDMAP; i++) { 9482 if ((un->un_pstats[i] == NULL) && 9483 (un->un_map[i].dkl_nblk != 0)) { 9484 (void) snprintf(kstatname, sizeof (kstatname), 9485 "%s%d,%s", sd_label, instance, 9486 sd_minor_data[i].name); 9487 un->un_pstats[i] = kstat_create(sd_label, 9488 instance, kstatname, "partition", KSTAT_TYPE_IO, 9489 1, KSTAT_FLAG_PERSISTENT); 9490 if (un->un_pstats[i] != NULL) { 9491 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9492 kstat_install(un->un_pstats[i]); 9493 } 9494 } 9495 } 9496 } 9497 9498 9499 #if (defined(__fibre)) 9500 /* 9501 * Function: sd_init_event_callbacks 9502 * 9503 * Description: This routine initializes the insertion and removal event 9504 * callbacks. (fibre only) 9505 * 9506 * Arguments: un - driver soft state (unit) structure 9507 * 9508 * Context: Kernel thread context 9509 */ 9510 9511 static void 9512 sd_init_event_callbacks(struct sd_lun *un) 9513 { 9514 ASSERT(un != NULL); 9515 9516 if ((un->un_insert_event == NULL) && 9517 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9518 &un->un_insert_event) == DDI_SUCCESS)) { 9519 /* 9520 * Add the callback for an insertion event 9521 */ 9522 (void) ddi_add_event_handler(SD_DEVINFO(un), 9523 un->un_insert_event, sd_event_callback, (void *)un, 9524 &(un->un_insert_cb_id)); 9525 } 9526 9527 if ((un->un_remove_event == NULL) && 9528 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9529 &un->un_remove_event) == DDI_SUCCESS)) { 9530 /* 9531 * Add the callback for a removal event 9532 */ 9533 (void) ddi_add_event_handler(SD_DEVINFO(un), 9534 un->un_remove_event, sd_event_callback, (void *)un, 9535 &(un->un_remove_cb_id)); 9536 } 9537 } 9538 9539 9540 /* 9541 * Function: sd_event_callback 9542 * 9543 * Description: This routine handles insert/remove events (photon). The 9544 * state is changed to OFFLINE which can be used to supress 9545 * error msgs. (fibre only) 9546 * 9547 * Arguments: un - driver soft state (unit) structure 9548 * 9549 * Context: Callout thread context 9550 */ 9551 /* ARGSUSED */ 9552 static void 9553 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9554 void *bus_impldata) 9555 { 9556 struct sd_lun *un = (struct sd_lun *)arg; 9557 9558 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9559 if (event == un->un_insert_event) { 9560 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9561 mutex_enter(SD_MUTEX(un)); 9562 if (un->un_state == SD_STATE_OFFLINE) { 9563 if (un->un_last_state != SD_STATE_SUSPENDED) { 9564 un->un_state = un->un_last_state; 9565 } else { 9566 /* 9567 * We have gone through SUSPEND/RESUME while 9568 * we were offline. Restore the last state 9569 */ 9570 un->un_state = un->un_save_state; 9571 } 9572 } 9573 mutex_exit(SD_MUTEX(un)); 9574 9575 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9576 } else if (event == un->un_remove_event) { 9577 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9578 mutex_enter(SD_MUTEX(un)); 9579 /* 9580 * We need to handle an event callback that occurs during 9581 * the suspend operation, since we don't prevent it. 9582 */ 9583 if (un->un_state != SD_STATE_OFFLINE) { 9584 if (un->un_state != SD_STATE_SUSPENDED) { 9585 New_state(un, SD_STATE_OFFLINE); 9586 } else { 9587 un->un_last_state = SD_STATE_OFFLINE; 9588 } 9589 } 9590 mutex_exit(SD_MUTEX(un)); 9591 } else { 9592 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9593 "!Unknown event\n"); 9594 } 9595 9596 } 9597 #endif 9598 9599 9600 /* 9601 * Function: sd_disable_caching() 9602 * 9603 * Description: This routine is the driver entry point for disabling 9604 * read and write caching by modifying the WCE (write cache 9605 * enable) and RCD (read cache disable) bits of mode 9606 * page 8 (MODEPAGE_CACHING). 9607 * 9608 * Arguments: un - driver soft state (unit) structure 9609 * 9610 * Return Code: EIO 9611 * code returned by sd_send_scsi_MODE_SENSE and 9612 * sd_send_scsi_MODE_SELECT 9613 * 9614 * Context: Kernel Thread 9615 */ 9616 9617 static int 9618 sd_disable_caching(struct sd_lun *un) 9619 { 9620 struct mode_caching *mode_caching_page; 9621 uchar_t *header; 9622 size_t buflen; 9623 int hdrlen; 9624 int bd_len; 9625 int rval = 0; 9626 9627 ASSERT(un != NULL); 9628 9629 /* 9630 * Do a test unit ready, otherwise a mode sense may not work if this 9631 * is the first command sent to the device after boot. 9632 */ 9633 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9634 9635 if (un->un_f_cfg_is_atapi == TRUE) { 9636 hdrlen = MODE_HEADER_LENGTH_GRP2; 9637 } else { 9638 hdrlen = MODE_HEADER_LENGTH; 9639 } 9640 9641 /* 9642 * Allocate memory for the retrieved mode page and its headers. Set 9643 * a pointer to the page itself. 9644 */ 9645 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9646 header = kmem_zalloc(buflen, KM_SLEEP); 9647 9648 /* Get the information from the device. */ 9649 if (un->un_f_cfg_is_atapi == TRUE) { 9650 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9651 MODEPAGE_CACHING, SD_PATH_DIRECT); 9652 } else { 9653 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9654 MODEPAGE_CACHING, SD_PATH_DIRECT); 9655 } 9656 if (rval != 0) { 9657 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9658 "sd_disable_caching: Mode Sense Failed\n"); 9659 kmem_free(header, buflen); 9660 return (rval); 9661 } 9662 9663 /* 9664 * Determine size of Block Descriptors in order to locate 9665 * the mode page data. ATAPI devices return 0, SCSI devices 9666 * should return MODE_BLK_DESC_LENGTH. 9667 */ 9668 if (un->un_f_cfg_is_atapi == TRUE) { 9669 struct mode_header_grp2 *mhp; 9670 mhp = (struct mode_header_grp2 *)header; 9671 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9672 } else { 9673 bd_len = ((struct mode_header *)header)->bdesc_length; 9674 } 9675 9676 if (bd_len > MODE_BLK_DESC_LENGTH) { 9677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9678 "sd_disable_caching: Mode Sense returned invalid " 9679 "block descriptor length\n"); 9680 kmem_free(header, buflen); 9681 return (EIO); 9682 } 9683 9684 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9685 9686 /* Check the relevant bits on successful mode sense. */ 9687 if ((mode_caching_page->wce) || !(mode_caching_page->rcd)) { 9688 /* 9689 * Read or write caching is enabled. Disable both of them. 9690 */ 9691 mode_caching_page->wce = 0; 9692 mode_caching_page->rcd = 1; 9693 9694 /* Clear reserved bits before mode select. */ 9695 mode_caching_page->mode_page.ps = 0; 9696 9697 /* 9698 * Clear out mode header for mode select. 9699 * The rest of the retrieved page will be reused. 9700 */ 9701 bzero(header, hdrlen); 9702 9703 /* Change the cache page to disable all caching. */ 9704 if (un->un_f_cfg_is_atapi == TRUE) { 9705 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 9706 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9707 } else { 9708 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 9709 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9710 } 9711 } 9712 9713 kmem_free(header, buflen); 9714 return (rval); 9715 } 9716 9717 9718 /* 9719 * Function: sd_make_device 9720 * 9721 * Description: Utility routine to return the Solaris device number from 9722 * the data in the device's dev_info structure. 9723 * 9724 * Return Code: The Solaris device number 9725 * 9726 * Context: Any 9727 */ 9728 9729 static dev_t 9730 sd_make_device(dev_info_t *devi) 9731 { 9732 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9733 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9734 } 9735 9736 9737 /* 9738 * Function: sd_pm_entry 9739 * 9740 * Description: Called at the start of a new command to manage power 9741 * and busy status of a device. This includes determining whether 9742 * the current power state of the device is sufficient for 9743 * performing the command or whether it must be changed. 9744 * The PM framework is notified appropriately. 9745 * Only with a return status of DDI_SUCCESS will the 9746 * component be busy to the framework. 9747 * 9748 * All callers of sd_pm_entry must check the return status 9749 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9750 * of DDI_FAILURE indicates the device failed to power up. 9751 * In this case un_pm_count has been adjusted so the result 9752 * on exit is still powered down, ie. count is less than 0. 9753 * Calling sd_pm_exit with this count value hits an ASSERT. 9754 * 9755 * Return Code: DDI_SUCCESS or DDI_FAILURE 9756 * 9757 * Context: Kernel thread context. 9758 */ 9759 9760 static int 9761 sd_pm_entry(struct sd_lun *un) 9762 { 9763 int return_status = DDI_SUCCESS; 9764 9765 ASSERT(!mutex_owned(SD_MUTEX(un))); 9766 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9767 9768 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9769 9770 if (un->un_f_pm_is_enabled == FALSE) { 9771 SD_TRACE(SD_LOG_IO_PM, un, 9772 "sd_pm_entry: exiting, PM not enabled\n"); 9773 return (return_status); 9774 } 9775 9776 /* 9777 * Just increment a counter if PM is enabled. On the transition from 9778 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9779 * the count with each IO and mark the device as idle when the count 9780 * hits 0. 9781 * 9782 * If the count is less than 0 the device is powered down. If a powered 9783 * down device is successfully powered up then the count must be 9784 * incremented to reflect the power up. Note that it'll get incremented 9785 * a second time to become busy. 9786 * 9787 * Because the following has the potential to change the device state 9788 * and must release the un_pm_mutex to do so, only one thread can be 9789 * allowed through at a time. 9790 */ 9791 9792 mutex_enter(&un->un_pm_mutex); 9793 while (un->un_pm_busy == TRUE) { 9794 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9795 } 9796 un->un_pm_busy = TRUE; 9797 9798 if (un->un_pm_count < 1) { 9799 9800 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9801 9802 /* 9803 * Indicate we are now busy so the framework won't attempt to 9804 * power down the device. This call will only fail if either 9805 * we passed a bad component number or the device has no 9806 * components. Neither of these should ever happen. 9807 */ 9808 mutex_exit(&un->un_pm_mutex); 9809 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9810 ASSERT(return_status == DDI_SUCCESS); 9811 9812 mutex_enter(&un->un_pm_mutex); 9813 9814 if (un->un_pm_count < 0) { 9815 mutex_exit(&un->un_pm_mutex); 9816 9817 SD_TRACE(SD_LOG_IO_PM, un, 9818 "sd_pm_entry: power up component\n"); 9819 9820 /* 9821 * pm_raise_power will cause sdpower to be called 9822 * which brings the device power level to the 9823 * desired state, ON in this case. If successful, 9824 * un_pm_count and un_power_level will be updated 9825 * appropriately. 9826 */ 9827 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9828 SD_SPINDLE_ON); 9829 9830 mutex_enter(&un->un_pm_mutex); 9831 9832 if (return_status != DDI_SUCCESS) { 9833 /* 9834 * Power up failed. 9835 * Idle the device and adjust the count 9836 * so the result on exit is that we're 9837 * still powered down, ie. count is less than 0. 9838 */ 9839 SD_TRACE(SD_LOG_IO_PM, un, 9840 "sd_pm_entry: power up failed," 9841 " idle the component\n"); 9842 9843 (void) pm_idle_component(SD_DEVINFO(un), 0); 9844 un->un_pm_count--; 9845 } else { 9846 /* 9847 * Device is powered up, verify the 9848 * count is non-negative. 9849 * This is debug only. 9850 */ 9851 ASSERT(un->un_pm_count == 0); 9852 } 9853 } 9854 9855 if (return_status == DDI_SUCCESS) { 9856 /* 9857 * For performance, now that the device has been tagged 9858 * as busy, and it's known to be powered up, update the 9859 * chain types to use jump tables that do not include 9860 * pm. This significantly lowers the overhead and 9861 * therefore improves performance. 9862 */ 9863 9864 mutex_exit(&un->un_pm_mutex); 9865 mutex_enter(SD_MUTEX(un)); 9866 SD_TRACE(SD_LOG_IO_PM, un, 9867 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9868 un->un_uscsi_chain_type); 9869 9870 if (ISREMOVABLE(un)) { 9871 un->un_buf_chain_type = 9872 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9873 } else { 9874 un->un_buf_chain_type = 9875 SD_CHAIN_INFO_DISK_NO_PM; 9876 } 9877 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9878 9879 SD_TRACE(SD_LOG_IO_PM, un, 9880 " changed uscsi_chain_type to %d\n", 9881 un->un_uscsi_chain_type); 9882 mutex_exit(SD_MUTEX(un)); 9883 mutex_enter(&un->un_pm_mutex); 9884 9885 if (un->un_pm_idle_timeid == NULL) { 9886 /* 300 ms. */ 9887 un->un_pm_idle_timeid = 9888 timeout(sd_pm_idletimeout_handler, un, 9889 (drv_usectohz((clock_t)300000))); 9890 /* 9891 * Include an extra call to busy which keeps the 9892 * device busy with-respect-to the PM layer 9893 * until the timer fires, at which time it'll 9894 * get the extra idle call. 9895 */ 9896 (void) pm_busy_component(SD_DEVINFO(un), 0); 9897 } 9898 } 9899 } 9900 un->un_pm_busy = FALSE; 9901 /* Next... */ 9902 cv_signal(&un->un_pm_busy_cv); 9903 9904 un->un_pm_count++; 9905 9906 SD_TRACE(SD_LOG_IO_PM, un, 9907 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9908 9909 mutex_exit(&un->un_pm_mutex); 9910 9911 return (return_status); 9912 } 9913 9914 9915 /* 9916 * Function: sd_pm_exit 9917 * 9918 * Description: Called at the completion of a command to manage busy 9919 * status for the device. If the device becomes idle the 9920 * PM framework is notified. 9921 * 9922 * Context: Kernel thread context 9923 */ 9924 9925 static void 9926 sd_pm_exit(struct sd_lun *un) 9927 { 9928 ASSERT(!mutex_owned(SD_MUTEX(un))); 9929 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9930 9931 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9932 9933 /* 9934 * After attach the following flag is only read, so don't 9935 * take the penalty of acquiring a mutex for it. 9936 */ 9937 if (un->un_f_pm_is_enabled == TRUE) { 9938 9939 mutex_enter(&un->un_pm_mutex); 9940 un->un_pm_count--; 9941 9942 SD_TRACE(SD_LOG_IO_PM, un, 9943 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9944 9945 ASSERT(un->un_pm_count >= 0); 9946 if (un->un_pm_count == 0) { 9947 mutex_exit(&un->un_pm_mutex); 9948 9949 SD_TRACE(SD_LOG_IO_PM, un, 9950 "sd_pm_exit: idle component\n"); 9951 9952 (void) pm_idle_component(SD_DEVINFO(un), 0); 9953 9954 } else { 9955 mutex_exit(&un->un_pm_mutex); 9956 } 9957 } 9958 9959 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9960 } 9961 9962 9963 /* 9964 * Function: sdopen 9965 * 9966 * Description: Driver's open(9e) entry point function. 9967 * 9968 * Arguments: dev_i - pointer to device number 9969 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9970 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9971 * cred_p - user credential pointer 9972 * 9973 * Return Code: EINVAL 9974 * ENXIO 9975 * EIO 9976 * EROFS 9977 * EBUSY 9978 * 9979 * Context: Kernel thread context 9980 */ 9981 /* ARGSUSED */ 9982 static int 9983 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9984 { 9985 struct sd_lun *un; 9986 int nodelay; 9987 int part; 9988 uint64_t partmask; 9989 int instance; 9990 dev_t dev; 9991 int rval = EIO; 9992 9993 /* Validate the open type */ 9994 if (otyp >= OTYPCNT) { 9995 return (EINVAL); 9996 } 9997 9998 dev = *dev_p; 9999 instance = SDUNIT(dev); 10000 mutex_enter(&sd_detach_mutex); 10001 10002 /* 10003 * Fail the open if there is no softstate for the instance, or 10004 * if another thread somewhere is trying to detach the instance. 10005 */ 10006 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 10007 (un->un_detach_count != 0)) { 10008 mutex_exit(&sd_detach_mutex); 10009 /* 10010 * The probe cache only needs to be cleared when open (9e) fails 10011 * with ENXIO (4238046). 10012 */ 10013 /* 10014 * un-conditionally clearing probe cache is ok with 10015 * separate sd/ssd binaries 10016 * x86 platform can be an issue with both parallel 10017 * and fibre in 1 binary 10018 */ 10019 sd_scsi_clear_probe_cache(); 10020 return (ENXIO); 10021 } 10022 10023 /* 10024 * The un_layer_count is to prevent another thread in specfs from 10025 * trying to detach the instance, which can happen when we are 10026 * called from a higher-layer driver instead of thru specfs. 10027 * This will not be needed when DDI provides a layered driver 10028 * interface that allows specfs to know that an instance is in 10029 * use by a layered driver & should not be detached. 10030 * 10031 * Note: the semantics for layered driver opens are exactly one 10032 * close for every open. 10033 */ 10034 if (otyp == OTYP_LYR) { 10035 un->un_layer_count++; 10036 } 10037 10038 /* 10039 * Keep a count of the current # of opens in progress. This is because 10040 * some layered drivers try to call us as a regular open. This can 10041 * cause problems that we cannot prevent, however by keeping this count 10042 * we can at least keep our open and detach routines from racing against 10043 * each other under such conditions. 10044 */ 10045 un->un_opens_in_progress++; 10046 mutex_exit(&sd_detach_mutex); 10047 10048 nodelay = (flag & (FNDELAY | FNONBLOCK)); 10049 part = SDPART(dev); 10050 partmask = 1 << part; 10051 10052 /* 10053 * We use a semaphore here in order to serialize 10054 * open and close requests on the device. 10055 */ 10056 sema_p(&un->un_semoclose); 10057 10058 mutex_enter(SD_MUTEX(un)); 10059 10060 /* 10061 * All device accesses go thru sdstrategy() where we check 10062 * on suspend status but there could be a scsi_poll command, 10063 * which bypasses sdstrategy(), so we need to check pm 10064 * status. 10065 */ 10066 10067 if (!nodelay) { 10068 while ((un->un_state == SD_STATE_SUSPENDED) || 10069 (un->un_state == SD_STATE_PM_CHANGING)) { 10070 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10071 } 10072 10073 mutex_exit(SD_MUTEX(un)); 10074 if (sd_pm_entry(un) != DDI_SUCCESS) { 10075 rval = EIO; 10076 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 10077 "sdopen: sd_pm_entry failed\n"); 10078 goto open_failed_with_pm; 10079 } 10080 mutex_enter(SD_MUTEX(un)); 10081 } 10082 10083 /* check for previous exclusive open */ 10084 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 10085 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10086 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 10087 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 10088 10089 if (un->un_exclopen & (partmask)) { 10090 goto excl_open_fail; 10091 } 10092 10093 if (flag & FEXCL) { 10094 int i; 10095 if (un->un_ocmap.lyropen[part]) { 10096 goto excl_open_fail; 10097 } 10098 for (i = 0; i < (OTYPCNT - 1); i++) { 10099 if (un->un_ocmap.regopen[i] & (partmask)) { 10100 goto excl_open_fail; 10101 } 10102 } 10103 } 10104 10105 /* 10106 * Check the write permission if this is a removable media device, 10107 * NDELAY has not been set, and writable permission is requested. 10108 * 10109 * Note: If NDELAY was set and this is write-protected media the WRITE 10110 * attempt will fail with EIO as part of the I/O processing. This is a 10111 * more permissive implementation that allows the open to succeed and 10112 * WRITE attempts to fail when appropriate. 10113 */ 10114 if (ISREMOVABLE(un)) { 10115 if ((flag & FWRITE) && (!nodelay)) { 10116 mutex_exit(SD_MUTEX(un)); 10117 /* 10118 * Defer the check for write permission on writable 10119 * DVD drive till sdstrategy and will not fail open even 10120 * if FWRITE is set as the device can be writable 10121 * depending upon the media and the media can change 10122 * after the call to open(). 10123 */ 10124 if (un->un_f_dvdram_writable_device == FALSE) { 10125 if (ISCD(un) || sr_check_wp(dev)) { 10126 rval = EROFS; 10127 mutex_enter(SD_MUTEX(un)); 10128 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10129 "write to cd or write protected media\n"); 10130 goto open_fail; 10131 } 10132 } 10133 mutex_enter(SD_MUTEX(un)); 10134 } 10135 } 10136 10137 /* 10138 * If opening in NDELAY/NONBLOCK mode, just return. 10139 * Check if disk is ready and has a valid geometry later. 10140 */ 10141 if (!nodelay) { 10142 mutex_exit(SD_MUTEX(un)); 10143 rval = sd_ready_and_valid(un); 10144 mutex_enter(SD_MUTEX(un)); 10145 /* 10146 * Fail if device is not ready or if the number of disk 10147 * blocks is zero or negative for non CD devices. 10148 */ 10149 if ((rval != SD_READY_VALID) || 10150 (!ISCD(un) && un->un_map[part].dkl_nblk <= 0)) { 10151 if (ISREMOVABLE(un)) { 10152 rval = ENXIO; 10153 } else { 10154 rval = EIO; 10155 } 10156 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10157 "device not ready or invalid disk block value\n"); 10158 goto open_fail; 10159 } 10160 #if defined(__i386) || defined(__amd64) 10161 } else { 10162 uchar_t *cp; 10163 /* 10164 * x86 requires special nodelay handling, so that p0 is 10165 * always defined and accessible. 10166 * Invalidate geometry only if device is not already open. 10167 */ 10168 cp = &un->un_ocmap.chkd[0]; 10169 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10170 if (*cp != (uchar_t)0) { 10171 break; 10172 } 10173 cp++; 10174 } 10175 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10176 un->un_f_geometry_is_valid = FALSE; 10177 } 10178 10179 #endif 10180 } 10181 10182 if (otyp == OTYP_LYR) { 10183 un->un_ocmap.lyropen[part]++; 10184 } else { 10185 un->un_ocmap.regopen[otyp] |= partmask; 10186 } 10187 10188 /* Set up open and exclusive open flags */ 10189 if (flag & FEXCL) { 10190 un->un_exclopen |= (partmask); 10191 } 10192 10193 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10194 "open of part %d type %d\n", part, otyp); 10195 10196 mutex_exit(SD_MUTEX(un)); 10197 if (!nodelay) { 10198 sd_pm_exit(un); 10199 } 10200 10201 sema_v(&un->un_semoclose); 10202 10203 mutex_enter(&sd_detach_mutex); 10204 un->un_opens_in_progress--; 10205 mutex_exit(&sd_detach_mutex); 10206 10207 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10208 return (DDI_SUCCESS); 10209 10210 excl_open_fail: 10211 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10212 rval = EBUSY; 10213 10214 open_fail: 10215 mutex_exit(SD_MUTEX(un)); 10216 10217 /* 10218 * On a failed open we must exit the pm management. 10219 */ 10220 if (!nodelay) { 10221 sd_pm_exit(un); 10222 } 10223 open_failed_with_pm: 10224 sema_v(&un->un_semoclose); 10225 10226 mutex_enter(&sd_detach_mutex); 10227 un->un_opens_in_progress--; 10228 if (otyp == OTYP_LYR) { 10229 un->un_layer_count--; 10230 } 10231 mutex_exit(&sd_detach_mutex); 10232 10233 return (rval); 10234 } 10235 10236 10237 /* 10238 * Function: sdclose 10239 * 10240 * Description: Driver's close(9e) entry point function. 10241 * 10242 * Arguments: dev - device number 10243 * flag - file status flag, informational only 10244 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10245 * cred_p - user credential pointer 10246 * 10247 * Return Code: ENXIO 10248 * 10249 * Context: Kernel thread context 10250 */ 10251 /* ARGSUSED */ 10252 static int 10253 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10254 { 10255 struct sd_lun *un; 10256 uchar_t *cp; 10257 int part; 10258 int nodelay; 10259 int rval = 0; 10260 10261 /* Validate the open type */ 10262 if (otyp >= OTYPCNT) { 10263 return (ENXIO); 10264 } 10265 10266 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10267 return (ENXIO); 10268 } 10269 10270 part = SDPART(dev); 10271 nodelay = flag & (FNDELAY | FNONBLOCK); 10272 10273 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10274 "sdclose: close of part %d type %d\n", part, otyp); 10275 10276 /* 10277 * We use a semaphore here in order to serialize 10278 * open and close requests on the device. 10279 */ 10280 sema_p(&un->un_semoclose); 10281 10282 mutex_enter(SD_MUTEX(un)); 10283 10284 /* Don't proceed if power is being changed. */ 10285 while (un->un_state == SD_STATE_PM_CHANGING) { 10286 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10287 } 10288 10289 if (un->un_exclopen & (1 << part)) { 10290 un->un_exclopen &= ~(1 << part); 10291 } 10292 10293 /* Update the open partition map */ 10294 if (otyp == OTYP_LYR) { 10295 un->un_ocmap.lyropen[part] -= 1; 10296 } else { 10297 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10298 } 10299 10300 cp = &un->un_ocmap.chkd[0]; 10301 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10302 if (*cp != NULL) { 10303 break; 10304 } 10305 cp++; 10306 } 10307 10308 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10309 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10310 10311 /* 10312 * We avoid persistance upon the last close, and set 10313 * the throttle back to the maximum. 10314 */ 10315 un->un_throttle = un->un_saved_throttle; 10316 10317 if (un->un_state == SD_STATE_OFFLINE) { 10318 if (un->un_f_is_fibre == FALSE) { 10319 scsi_log(SD_DEVINFO(un), sd_label, 10320 CE_WARN, "offline\n"); 10321 } 10322 un->un_f_geometry_is_valid = FALSE; 10323 10324 } else { 10325 /* 10326 * Flush any outstanding writes in NVRAM cache. 10327 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10328 * cmd, it may not work for non-Pluto devices. 10329 * SYNCHRONIZE CACHE is not required for removables, 10330 * except DVD-RAM drives. 10331 * 10332 * Also note: because SYNCHRONIZE CACHE is currently 10333 * the only command issued here that requires the 10334 * drive be powered up, only do the power up before 10335 * sending the Sync Cache command. If additional 10336 * commands are added which require a powered up 10337 * drive, the following sequence may have to change. 10338 * 10339 * And finally, note that parallel SCSI on SPARC 10340 * only issues a Sync Cache to DVD-RAM, a newly 10341 * supported device. 10342 */ 10343 #if defined(__i386) || defined(__amd64) 10344 if (!ISREMOVABLE(un) || 10345 un->un_f_dvdram_writable_device == TRUE) { 10346 #else 10347 if (un->un_f_dvdram_writable_device == TRUE) { 10348 #endif 10349 mutex_exit(SD_MUTEX(un)); 10350 if (sd_pm_entry(un) == DDI_SUCCESS) { 10351 if (sd_send_scsi_SYNCHRONIZE_CACHE(un) 10352 != 0) { 10353 rval = EIO; 10354 } 10355 sd_pm_exit(un); 10356 } else { 10357 rval = EIO; 10358 } 10359 mutex_enter(SD_MUTEX(un)); 10360 } 10361 10362 /* 10363 * For removable media devices, send an ALLOW MEDIA 10364 * REMOVAL command, but don't get upset if it fails. 10365 * Also invalidate the geometry. We need to raise 10366 * the power of the drive before we can call 10367 * sd_send_scsi_DOORLOCK() 10368 */ 10369 if (ISREMOVABLE(un)) { 10370 mutex_exit(SD_MUTEX(un)); 10371 if (sd_pm_entry(un) == DDI_SUCCESS) { 10372 rval = sd_send_scsi_DOORLOCK(un, 10373 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10374 10375 sd_pm_exit(un); 10376 if (ISCD(un) && (rval != 0) && 10377 (nodelay != 0)) { 10378 rval = ENXIO; 10379 } 10380 } else { 10381 rval = EIO; 10382 } 10383 mutex_enter(SD_MUTEX(un)); 10384 10385 sr_ejected(un); 10386 /* 10387 * Destroy the cache (if it exists) which was 10388 * allocated for the write maps since this is 10389 * the last close for this media. 10390 */ 10391 if (un->un_wm_cache) { 10392 /* 10393 * Check if there are pending commands. 10394 * and if there are give a warning and 10395 * do not destroy the cache. 10396 */ 10397 if (un->un_ncmds_in_driver > 0) { 10398 scsi_log(SD_DEVINFO(un), 10399 sd_label, CE_WARN, 10400 "Unable to clean up memory " 10401 "because of pending I/O\n"); 10402 } else { 10403 kmem_cache_destroy( 10404 un->un_wm_cache); 10405 un->un_wm_cache = NULL; 10406 } 10407 } 10408 } 10409 } 10410 } 10411 10412 mutex_exit(SD_MUTEX(un)); 10413 sema_v(&un->un_semoclose); 10414 10415 if (otyp == OTYP_LYR) { 10416 mutex_enter(&sd_detach_mutex); 10417 /* 10418 * The detach routine may run when the layer count 10419 * drops to zero. 10420 */ 10421 un->un_layer_count--; 10422 mutex_exit(&sd_detach_mutex); 10423 } 10424 10425 return (rval); 10426 } 10427 10428 10429 /* 10430 * Function: sd_ready_and_valid 10431 * 10432 * Description: Test if device is ready and has a valid geometry. 10433 * 10434 * Arguments: dev - device number 10435 * un - driver soft state (unit) structure 10436 * 10437 * Return Code: SD_READY_VALID ready and valid label 10438 * SD_READY_NOT_VALID ready, geom ops never applicable 10439 * SD_NOT_READY_VALID not ready, no label 10440 * 10441 * Context: Never called at interrupt context. 10442 */ 10443 10444 static int 10445 sd_ready_and_valid(struct sd_lun *un) 10446 { 10447 struct sd_errstats *stp; 10448 uint64_t capacity; 10449 uint_t lbasize; 10450 int rval = SD_READY_VALID; 10451 char name_str[48]; 10452 10453 ASSERT(un != NULL); 10454 ASSERT(!mutex_owned(SD_MUTEX(un))); 10455 10456 mutex_enter(SD_MUTEX(un)); 10457 if (ISREMOVABLE(un)) { 10458 mutex_exit(SD_MUTEX(un)); 10459 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 10460 rval = SD_NOT_READY_VALID; 10461 mutex_enter(SD_MUTEX(un)); 10462 goto done; 10463 } 10464 10465 mutex_enter(SD_MUTEX(un)); 10466 if ((un->un_f_geometry_is_valid == FALSE) || 10467 (un->un_f_blockcount_is_valid == FALSE) || 10468 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10469 10470 /* capacity has to be read every open. */ 10471 mutex_exit(SD_MUTEX(un)); 10472 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 10473 &lbasize, SD_PATH_DIRECT) != 0) { 10474 mutex_enter(SD_MUTEX(un)); 10475 un->un_f_geometry_is_valid = FALSE; 10476 rval = SD_NOT_READY_VALID; 10477 goto done; 10478 } else { 10479 mutex_enter(SD_MUTEX(un)); 10480 sd_update_block_info(un, lbasize, capacity); 10481 } 10482 } 10483 10484 /* 10485 * If this is a non 512 block device, allocate space for 10486 * the wmap cache. This is being done here since every time 10487 * a media is changed this routine will be called and the 10488 * block size is a function of media rather than device. 10489 */ 10490 if (NOT_DEVBSIZE(un)) { 10491 if (!(un->un_wm_cache)) { 10492 (void) snprintf(name_str, sizeof (name_str), 10493 "%s%d_cache", 10494 ddi_driver_name(SD_DEVINFO(un)), 10495 ddi_get_instance(SD_DEVINFO(un))); 10496 un->un_wm_cache = kmem_cache_create( 10497 name_str, sizeof (struct sd_w_map), 10498 8, sd_wm_cache_constructor, 10499 sd_wm_cache_destructor, NULL, 10500 (void *)un, NULL, 0); 10501 if (!(un->un_wm_cache)) { 10502 rval = ENOMEM; 10503 goto done; 10504 } 10505 } 10506 } 10507 10508 /* 10509 * Check if the media in the device is writable or not. 10510 */ 10511 if ((un->un_f_geometry_is_valid == FALSE) && ISCD(un)) { 10512 sd_check_for_writable_cd(un); 10513 } 10514 10515 } else { 10516 /* 10517 * Do a test unit ready to clear any unit attention from non-cd 10518 * devices. 10519 */ 10520 mutex_exit(SD_MUTEX(un)); 10521 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 10522 mutex_enter(SD_MUTEX(un)); 10523 } 10524 10525 10526 if (un->un_state == SD_STATE_NORMAL) { 10527 /* 10528 * If the target is not yet ready here (defined by a TUR 10529 * failure), invalidate the geometry and print an 'offline' 10530 * message. This is a legacy message, as the state of the 10531 * target is not actually changed to SD_STATE_OFFLINE. 10532 * 10533 * If the TUR fails for EACCES (Reservation Conflict), it 10534 * means there actually is nothing wrong with the target that 10535 * would require invalidating the geometry, so continue in 10536 * that case as if the TUR was successful. 10537 */ 10538 int err; 10539 10540 mutex_exit(SD_MUTEX(un)); 10541 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 10542 mutex_enter(SD_MUTEX(un)); 10543 10544 if ((err != 0) && (err != EACCES)) { 10545 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10546 "offline\n"); 10547 un->un_f_geometry_is_valid = FALSE; 10548 rval = SD_NOT_READY_VALID; 10549 goto done; 10550 } 10551 } 10552 10553 if (un->un_f_format_in_progress == FALSE) { 10554 /* 10555 * Note: sd_validate_geometry may return TRUE, but that does 10556 * not necessarily mean un_f_geometry_is_valid == TRUE! 10557 */ 10558 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 10559 if (rval == ENOTSUP) { 10560 if (un->un_f_geometry_is_valid == TRUE) 10561 rval = 0; 10562 else { 10563 rval = SD_READY_NOT_VALID; 10564 goto done; 10565 } 10566 } 10567 if (rval != 0) { 10568 /* 10569 * We don't check the validity of geometry for 10570 * CDROMs. Also we assume we have a good label 10571 * even if sd_validate_geometry returned ENOMEM. 10572 */ 10573 if (!ISCD(un) && rval != ENOMEM) { 10574 rval = SD_NOT_READY_VALID; 10575 goto done; 10576 } 10577 } 10578 } 10579 10580 #ifdef DOESNTWORK /* on eliteII, see 1118607 */ 10581 /* 10582 * check to see if this disk is write protected, if it is and we have 10583 * not set read-only, then fail 10584 */ 10585 if ((flag & FWRITE) && (sr_check_wp(dev))) { 10586 New_state(un, SD_STATE_CLOSED); 10587 goto done; 10588 } 10589 #endif 10590 10591 /* 10592 * If this is a removable media device, try and send 10593 * a PREVENT MEDIA REMOVAL command, but don't get upset 10594 * if it fails. For a CD, however, it is an error 10595 */ 10596 if (ISREMOVABLE(un)) { 10597 mutex_exit(SD_MUTEX(un)); 10598 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 10599 SD_PATH_DIRECT) != 0) && ISCD(un)) { 10600 rval = SD_NOT_READY_VALID; 10601 mutex_enter(SD_MUTEX(un)); 10602 goto done; 10603 } 10604 mutex_enter(SD_MUTEX(un)); 10605 } 10606 10607 /* The state has changed, inform the media watch routines */ 10608 un->un_mediastate = DKIO_INSERTED; 10609 cv_broadcast(&un->un_state_cv); 10610 rval = SD_READY_VALID; 10611 10612 done: 10613 10614 /* 10615 * Initialize the capacity kstat value, if no media previously 10616 * (capacity kstat is 0) and a media has been inserted 10617 * (un_blockcount > 0). 10618 * This is a more generic way then checking for ISREMOVABLE. 10619 */ 10620 if (un->un_errstats != NULL) { 10621 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10622 if ((stp->sd_capacity.value.ui64 == 0) && 10623 (un->un_f_blockcount_is_valid == TRUE)) { 10624 stp->sd_capacity.value.ui64 = 10625 (uint64_t)((uint64_t)un->un_blockcount * 10626 un->un_sys_blocksize); 10627 } 10628 } 10629 10630 mutex_exit(SD_MUTEX(un)); 10631 return (rval); 10632 } 10633 10634 10635 /* 10636 * Function: sdmin 10637 * 10638 * Description: Routine to limit the size of a data transfer. Used in 10639 * conjunction with physio(9F). 10640 * 10641 * Arguments: bp - pointer to the indicated buf(9S) struct. 10642 * 10643 * Context: Kernel thread context. 10644 */ 10645 10646 static void 10647 sdmin(struct buf *bp) 10648 { 10649 struct sd_lun *un; 10650 int instance; 10651 10652 instance = SDUNIT(bp->b_edev); 10653 10654 un = ddi_get_soft_state(sd_state, instance); 10655 ASSERT(un != NULL); 10656 10657 if (bp->b_bcount > un->un_max_xfer_size) { 10658 bp->b_bcount = un->un_max_xfer_size; 10659 } 10660 } 10661 10662 10663 /* 10664 * Function: sdread 10665 * 10666 * Description: Driver's read(9e) entry point function. 10667 * 10668 * Arguments: dev - device number 10669 * uio - structure pointer describing where data is to be stored 10670 * in user's space 10671 * cred_p - user credential pointer 10672 * 10673 * Return Code: ENXIO 10674 * EIO 10675 * EINVAL 10676 * value returned by physio 10677 * 10678 * Context: Kernel thread context. 10679 */ 10680 /* ARGSUSED */ 10681 static int 10682 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10683 { 10684 struct sd_lun *un = NULL; 10685 int secmask; 10686 int err; 10687 10688 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10689 return (ENXIO); 10690 } 10691 10692 ASSERT(!mutex_owned(SD_MUTEX(un))); 10693 10694 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10695 mutex_enter(SD_MUTEX(un)); 10696 /* 10697 * Because the call to sd_ready_and_valid will issue I/O we 10698 * must wait here if either the device is suspended or 10699 * if it's power level is changing. 10700 */ 10701 while ((un->un_state == SD_STATE_SUSPENDED) || 10702 (un->un_state == SD_STATE_PM_CHANGING)) { 10703 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10704 } 10705 un->un_ncmds_in_driver++; 10706 mutex_exit(SD_MUTEX(un)); 10707 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10708 mutex_enter(SD_MUTEX(un)); 10709 un->un_ncmds_in_driver--; 10710 ASSERT(un->un_ncmds_in_driver >= 0); 10711 mutex_exit(SD_MUTEX(un)); 10712 return (EIO); 10713 } 10714 mutex_enter(SD_MUTEX(un)); 10715 un->un_ncmds_in_driver--; 10716 ASSERT(un->un_ncmds_in_driver >= 0); 10717 mutex_exit(SD_MUTEX(un)); 10718 } 10719 10720 /* 10721 * Read requests are restricted to multiples of the system block size. 10722 */ 10723 secmask = un->un_sys_blocksize - 1; 10724 10725 if (uio->uio_loffset & ((offset_t)(secmask))) { 10726 SD_ERROR(SD_LOG_READ_WRITE, un, 10727 "sdread: file offset not modulo %d\n", 10728 un->un_sys_blocksize); 10729 err = EINVAL; 10730 } else if (uio->uio_iov->iov_len & (secmask)) { 10731 SD_ERROR(SD_LOG_READ_WRITE, un, 10732 "sdread: transfer length not modulo %d\n", 10733 un->un_sys_blocksize); 10734 err = EINVAL; 10735 } else { 10736 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10737 } 10738 return (err); 10739 } 10740 10741 10742 /* 10743 * Function: sdwrite 10744 * 10745 * Description: Driver's write(9e) entry point function. 10746 * 10747 * Arguments: dev - device number 10748 * uio - structure pointer describing where data is stored in 10749 * user's space 10750 * cred_p - user credential pointer 10751 * 10752 * Return Code: ENXIO 10753 * EIO 10754 * EINVAL 10755 * value returned by physio 10756 * 10757 * Context: Kernel thread context. 10758 */ 10759 /* ARGSUSED */ 10760 static int 10761 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10762 { 10763 struct sd_lun *un = NULL; 10764 int secmask; 10765 int err; 10766 10767 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10768 return (ENXIO); 10769 } 10770 10771 ASSERT(!mutex_owned(SD_MUTEX(un))); 10772 10773 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10774 mutex_enter(SD_MUTEX(un)); 10775 /* 10776 * Because the call to sd_ready_and_valid will issue I/O we 10777 * must wait here if either the device is suspended or 10778 * if it's power level is changing. 10779 */ 10780 while ((un->un_state == SD_STATE_SUSPENDED) || 10781 (un->un_state == SD_STATE_PM_CHANGING)) { 10782 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10783 } 10784 un->un_ncmds_in_driver++; 10785 mutex_exit(SD_MUTEX(un)); 10786 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10787 mutex_enter(SD_MUTEX(un)); 10788 un->un_ncmds_in_driver--; 10789 ASSERT(un->un_ncmds_in_driver >= 0); 10790 mutex_exit(SD_MUTEX(un)); 10791 return (EIO); 10792 } 10793 mutex_enter(SD_MUTEX(un)); 10794 un->un_ncmds_in_driver--; 10795 ASSERT(un->un_ncmds_in_driver >= 0); 10796 mutex_exit(SD_MUTEX(un)); 10797 } 10798 10799 /* 10800 * Write requests are restricted to multiples of the system block size. 10801 */ 10802 secmask = un->un_sys_blocksize - 1; 10803 10804 if (uio->uio_loffset & ((offset_t)(secmask))) { 10805 SD_ERROR(SD_LOG_READ_WRITE, un, 10806 "sdwrite: file offset not modulo %d\n", 10807 un->un_sys_blocksize); 10808 err = EINVAL; 10809 } else if (uio->uio_iov->iov_len & (secmask)) { 10810 SD_ERROR(SD_LOG_READ_WRITE, un, 10811 "sdwrite: transfer length not modulo %d\n", 10812 un->un_sys_blocksize); 10813 err = EINVAL; 10814 } else { 10815 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10816 } 10817 return (err); 10818 } 10819 10820 10821 /* 10822 * Function: sdaread 10823 * 10824 * Description: Driver's aread(9e) entry point function. 10825 * 10826 * Arguments: dev - device number 10827 * aio - structure pointer describing where data is to be stored 10828 * cred_p - user credential pointer 10829 * 10830 * Return Code: ENXIO 10831 * EIO 10832 * EINVAL 10833 * value returned by aphysio 10834 * 10835 * Context: Kernel thread context. 10836 */ 10837 /* ARGSUSED */ 10838 static int 10839 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10840 { 10841 struct sd_lun *un = NULL; 10842 struct uio *uio = aio->aio_uio; 10843 int secmask; 10844 int err; 10845 10846 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10847 return (ENXIO); 10848 } 10849 10850 ASSERT(!mutex_owned(SD_MUTEX(un))); 10851 10852 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10853 mutex_enter(SD_MUTEX(un)); 10854 /* 10855 * Because the call to sd_ready_and_valid will issue I/O we 10856 * must wait here if either the device is suspended or 10857 * if it's power level is changing. 10858 */ 10859 while ((un->un_state == SD_STATE_SUSPENDED) || 10860 (un->un_state == SD_STATE_PM_CHANGING)) { 10861 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10862 } 10863 un->un_ncmds_in_driver++; 10864 mutex_exit(SD_MUTEX(un)); 10865 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10866 mutex_enter(SD_MUTEX(un)); 10867 un->un_ncmds_in_driver--; 10868 ASSERT(un->un_ncmds_in_driver >= 0); 10869 mutex_exit(SD_MUTEX(un)); 10870 return (EIO); 10871 } 10872 mutex_enter(SD_MUTEX(un)); 10873 un->un_ncmds_in_driver--; 10874 ASSERT(un->un_ncmds_in_driver >= 0); 10875 mutex_exit(SD_MUTEX(un)); 10876 } 10877 10878 /* 10879 * Read requests are restricted to multiples of the system block size. 10880 */ 10881 secmask = un->un_sys_blocksize - 1; 10882 10883 if (uio->uio_loffset & ((offset_t)(secmask))) { 10884 SD_ERROR(SD_LOG_READ_WRITE, un, 10885 "sdaread: file offset not modulo %d\n", 10886 un->un_sys_blocksize); 10887 err = EINVAL; 10888 } else if (uio->uio_iov->iov_len & (secmask)) { 10889 SD_ERROR(SD_LOG_READ_WRITE, un, 10890 "sdaread: transfer length not modulo %d\n", 10891 un->un_sys_blocksize); 10892 err = EINVAL; 10893 } else { 10894 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10895 } 10896 return (err); 10897 } 10898 10899 10900 /* 10901 * Function: sdawrite 10902 * 10903 * Description: Driver's awrite(9e) entry point function. 10904 * 10905 * Arguments: dev - device number 10906 * aio - structure pointer describing where data is stored 10907 * cred_p - user credential pointer 10908 * 10909 * Return Code: ENXIO 10910 * EIO 10911 * EINVAL 10912 * value returned by aphysio 10913 * 10914 * Context: Kernel thread context. 10915 */ 10916 /* ARGSUSED */ 10917 static int 10918 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10919 { 10920 struct sd_lun *un = NULL; 10921 struct uio *uio = aio->aio_uio; 10922 int secmask; 10923 int err; 10924 10925 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10926 return (ENXIO); 10927 } 10928 10929 ASSERT(!mutex_owned(SD_MUTEX(un))); 10930 10931 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10932 mutex_enter(SD_MUTEX(un)); 10933 /* 10934 * Because the call to sd_ready_and_valid will issue I/O we 10935 * must wait here if either the device is suspended or 10936 * if it's power level is changing. 10937 */ 10938 while ((un->un_state == SD_STATE_SUSPENDED) || 10939 (un->un_state == SD_STATE_PM_CHANGING)) { 10940 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10941 } 10942 un->un_ncmds_in_driver++; 10943 mutex_exit(SD_MUTEX(un)); 10944 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10945 mutex_enter(SD_MUTEX(un)); 10946 un->un_ncmds_in_driver--; 10947 ASSERT(un->un_ncmds_in_driver >= 0); 10948 mutex_exit(SD_MUTEX(un)); 10949 return (EIO); 10950 } 10951 mutex_enter(SD_MUTEX(un)); 10952 un->un_ncmds_in_driver--; 10953 ASSERT(un->un_ncmds_in_driver >= 0); 10954 mutex_exit(SD_MUTEX(un)); 10955 } 10956 10957 /* 10958 * Write requests are restricted to multiples of the system block size. 10959 */ 10960 secmask = un->un_sys_blocksize - 1; 10961 10962 if (uio->uio_loffset & ((offset_t)(secmask))) { 10963 SD_ERROR(SD_LOG_READ_WRITE, un, 10964 "sdawrite: file offset not modulo %d\n", 10965 un->un_sys_blocksize); 10966 err = EINVAL; 10967 } else if (uio->uio_iov->iov_len & (secmask)) { 10968 SD_ERROR(SD_LOG_READ_WRITE, un, 10969 "sdawrite: transfer length not modulo %d\n", 10970 un->un_sys_blocksize); 10971 err = EINVAL; 10972 } else { 10973 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10974 } 10975 return (err); 10976 } 10977 10978 10979 10980 10981 10982 /* 10983 * Driver IO processing follows the following sequence: 10984 * 10985 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10986 * | | ^ 10987 * v v | 10988 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10989 * | | | | 10990 * v | | | 10991 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10992 * | | ^ ^ 10993 * v v | | 10994 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10995 * | | | | 10996 * +---+ | +------------+ +-------+ 10997 * | | | | 10998 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10999 * | v | | 11000 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 11001 * | | ^ | 11002 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11003 * | v | | 11004 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 11005 * | | ^ | 11006 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 11007 * | v | | 11008 * | sd_checksum_iostart() sd_checksum_iodone() | 11009 * | | ^ | 11010 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 11011 * | v | | 11012 * | sd_pm_iostart() sd_pm_iodone() | 11013 * | | ^ | 11014 * | | | | 11015 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 11016 * | ^ 11017 * v | 11018 * sd_core_iostart() | 11019 * | | 11020 * | +------>(*destroypkt)() 11021 * +-> sd_start_cmds() <-+ | | 11022 * | | | v 11023 * | | | scsi_destroy_pkt(9F) 11024 * | | | 11025 * +->(*initpkt)() +- sdintr() 11026 * | | | | 11027 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 11028 * | +-> scsi_setup_cdb(9F) | 11029 * | | 11030 * +--> scsi_transport(9F) | 11031 * | | 11032 * +----> SCSA ---->+ 11033 * 11034 * 11035 * This code is based upon the following presumtions: 11036 * 11037 * - iostart and iodone functions operate on buf(9S) structures. These 11038 * functions perform the necessary operations on the buf(9S) and pass 11039 * them along to the next function in the chain by using the macros 11040 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 11041 * (for iodone side functions). 11042 * 11043 * - The iostart side functions may sleep. The iodone side functions 11044 * are called under interrupt context and may NOT sleep. Therefore 11045 * iodone side functions also may not call iostart side functions. 11046 * (NOTE: iostart side functions should NOT sleep for memory, as 11047 * this could result in deadlock.) 11048 * 11049 * - An iostart side function may call its corresponding iodone side 11050 * function directly (if necessary). 11051 * 11052 * - In the event of an error, an iostart side function can return a buf(9S) 11053 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 11054 * b_error in the usual way of course). 11055 * 11056 * - The taskq mechanism may be used by the iodone side functions to dispatch 11057 * requests to the iostart side functions. The iostart side functions in 11058 * this case would be called under the context of a taskq thread, so it's 11059 * OK for them to block/sleep/spin in this case. 11060 * 11061 * - iostart side functions may allocate "shadow" buf(9S) structs and 11062 * pass them along to the next function in the chain. The corresponding 11063 * iodone side functions must coalesce the "shadow" bufs and return 11064 * the "original" buf to the next higher layer. 11065 * 11066 * - The b_private field of the buf(9S) struct holds a pointer to 11067 * an sd_xbuf struct, which contains information needed to 11068 * construct the scsi_pkt for the command. 11069 * 11070 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 11071 * layer must acquire & release the SD_MUTEX(un) as needed. 11072 */ 11073 11074 11075 /* 11076 * Create taskq for all targets in the system. This is created at 11077 * _init(9E) and destroyed at _fini(9E). 11078 * 11079 * Note: here we set the minalloc to a reasonably high number to ensure that 11080 * we will have an adequate supply of task entries available at interrupt time. 11081 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 11082 * sd_create_taskq(). Since we do not want to sleep for allocations at 11083 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 11084 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 11085 * requests any one instant in time. 11086 */ 11087 #define SD_TASKQ_NUMTHREADS 8 11088 #define SD_TASKQ_MINALLOC 256 11089 #define SD_TASKQ_MAXALLOC 256 11090 11091 static taskq_t *sd_tq = NULL; 11092 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11093 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11094 11095 /* 11096 * The following task queue is being created for the write part of 11097 * read-modify-write of non-512 block size devices. 11098 * Limit the number of threads to 1 for now. This number has been choosen 11099 * considering the fact that it applies only to dvd ram drives/MO drives 11100 * currently. Performance for which is not main criteria at this stage. 11101 * Note: It needs to be explored if we can use a single taskq in future 11102 */ 11103 #define SD_WMR_TASKQ_NUMTHREADS 1 11104 static taskq_t *sd_wmr_tq = NULL; 11105 11106 /* 11107 * Function: sd_taskq_create 11108 * 11109 * Description: Create taskq thread(s) and preallocate task entries 11110 * 11111 * Return Code: Returns a pointer to the allocated taskq_t. 11112 * 11113 * Context: Can sleep. Requires blockable context. 11114 * 11115 * Notes: - The taskq() facility currently is NOT part of the DDI. 11116 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11117 * - taskq_create() will block for memory, also it will panic 11118 * if it cannot create the requested number of threads. 11119 * - Currently taskq_create() creates threads that cannot be 11120 * swapped. 11121 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11122 * supply of taskq entries at interrupt time (ie, so that we 11123 * do not have to sleep for memory) 11124 */ 11125 11126 static void 11127 sd_taskq_create(void) 11128 { 11129 char taskq_name[TASKQ_NAMELEN]; 11130 11131 ASSERT(sd_tq == NULL); 11132 ASSERT(sd_wmr_tq == NULL); 11133 11134 (void) snprintf(taskq_name, sizeof (taskq_name), 11135 "%s_drv_taskq", sd_label); 11136 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11137 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11138 TASKQ_PREPOPULATE)); 11139 11140 (void) snprintf(taskq_name, sizeof (taskq_name), 11141 "%s_rmw_taskq", sd_label); 11142 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11143 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11144 TASKQ_PREPOPULATE)); 11145 } 11146 11147 11148 /* 11149 * Function: sd_taskq_delete 11150 * 11151 * Description: Complementary cleanup routine for sd_taskq_create(). 11152 * 11153 * Context: Kernel thread context. 11154 */ 11155 11156 static void 11157 sd_taskq_delete(void) 11158 { 11159 ASSERT(sd_tq != NULL); 11160 ASSERT(sd_wmr_tq != NULL); 11161 taskq_destroy(sd_tq); 11162 taskq_destroy(sd_wmr_tq); 11163 sd_tq = NULL; 11164 sd_wmr_tq = NULL; 11165 } 11166 11167 11168 /* 11169 * Function: sdstrategy 11170 * 11171 * Description: Driver's strategy (9E) entry point function. 11172 * 11173 * Arguments: bp - pointer to buf(9S) 11174 * 11175 * Return Code: Always returns zero 11176 * 11177 * Context: Kernel thread context. 11178 */ 11179 11180 static int 11181 sdstrategy(struct buf *bp) 11182 { 11183 struct sd_lun *un; 11184 11185 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11186 if (un == NULL) { 11187 bioerror(bp, EIO); 11188 bp->b_resid = bp->b_bcount; 11189 biodone(bp); 11190 return (0); 11191 } 11192 /* As was done in the past, fail new cmds. if state is dumping. */ 11193 if (un->un_state == SD_STATE_DUMPING) { 11194 bioerror(bp, ENXIO); 11195 bp->b_resid = bp->b_bcount; 11196 biodone(bp); 11197 return (0); 11198 } 11199 11200 ASSERT(!mutex_owned(SD_MUTEX(un))); 11201 11202 /* 11203 * Commands may sneak in while we released the mutex in 11204 * DDI_SUSPEND, we should block new commands. However, old 11205 * commands that are still in the driver at this point should 11206 * still be allowed to drain. 11207 */ 11208 mutex_enter(SD_MUTEX(un)); 11209 /* 11210 * Must wait here if either the device is suspended or 11211 * if it's power level is changing. 11212 */ 11213 while ((un->un_state == SD_STATE_SUSPENDED) || 11214 (un->un_state == SD_STATE_PM_CHANGING)) { 11215 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11216 } 11217 11218 un->un_ncmds_in_driver++; 11219 11220 /* 11221 * atapi: Since we are running the CD for now in PIO mode we need to 11222 * call bp_mapin here to avoid bp_mapin called interrupt context under 11223 * the HBA's init_pkt routine. 11224 */ 11225 if (un->un_f_cfg_is_atapi == TRUE) { 11226 mutex_exit(SD_MUTEX(un)); 11227 bp_mapin(bp); 11228 mutex_enter(SD_MUTEX(un)); 11229 } 11230 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11231 un->un_ncmds_in_driver); 11232 11233 mutex_exit(SD_MUTEX(un)); 11234 11235 /* 11236 * This will (eventually) allocate the sd_xbuf area and 11237 * call sd_xbuf_strategy(). We just want to return the 11238 * result of ddi_xbuf_qstrategy so that we have an opt- 11239 * imized tail call which saves us a stack frame. 11240 */ 11241 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11242 } 11243 11244 11245 /* 11246 * Function: sd_xbuf_strategy 11247 * 11248 * Description: Function for initiating IO operations via the 11249 * ddi_xbuf_qstrategy() mechanism. 11250 * 11251 * Context: Kernel thread context. 11252 */ 11253 11254 static void 11255 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11256 { 11257 struct sd_lun *un = arg; 11258 11259 ASSERT(bp != NULL); 11260 ASSERT(xp != NULL); 11261 ASSERT(un != NULL); 11262 ASSERT(!mutex_owned(SD_MUTEX(un))); 11263 11264 /* 11265 * Initialize the fields in the xbuf and save a pointer to the 11266 * xbuf in bp->b_private. 11267 */ 11268 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11269 11270 /* Send the buf down the iostart chain */ 11271 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11272 } 11273 11274 11275 /* 11276 * Function: sd_xbuf_init 11277 * 11278 * Description: Prepare the given sd_xbuf struct for use. 11279 * 11280 * Arguments: un - ptr to softstate 11281 * bp - ptr to associated buf(9S) 11282 * xp - ptr to associated sd_xbuf 11283 * chain_type - IO chain type to use: 11284 * SD_CHAIN_NULL 11285 * SD_CHAIN_BUFIO 11286 * SD_CHAIN_USCSI 11287 * SD_CHAIN_DIRECT 11288 * SD_CHAIN_DIRECT_PRIORITY 11289 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11290 * initialization; may be NULL if none. 11291 * 11292 * Context: Kernel thread context 11293 */ 11294 11295 static void 11296 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11297 uchar_t chain_type, void *pktinfop) 11298 { 11299 int index; 11300 11301 ASSERT(un != NULL); 11302 ASSERT(bp != NULL); 11303 ASSERT(xp != NULL); 11304 11305 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11306 bp, chain_type); 11307 11308 xp->xb_un = un; 11309 xp->xb_pktp = NULL; 11310 xp->xb_pktinfo = pktinfop; 11311 xp->xb_private = bp->b_private; 11312 xp->xb_blkno = (daddr_t)bp->b_blkno; 11313 11314 /* 11315 * Set up the iostart and iodone chain indexes in the xbuf, based 11316 * upon the specified chain type to use. 11317 */ 11318 switch (chain_type) { 11319 case SD_CHAIN_NULL: 11320 /* 11321 * Fall thru to just use the values for the buf type, even 11322 * tho for the NULL chain these values will never be used. 11323 */ 11324 /* FALLTHRU */ 11325 case SD_CHAIN_BUFIO: 11326 index = un->un_buf_chain_type; 11327 break; 11328 case SD_CHAIN_USCSI: 11329 index = un->un_uscsi_chain_type; 11330 break; 11331 case SD_CHAIN_DIRECT: 11332 index = un->un_direct_chain_type; 11333 break; 11334 case SD_CHAIN_DIRECT_PRIORITY: 11335 index = un->un_priority_chain_type; 11336 break; 11337 default: 11338 /* We're really broken if we ever get here... */ 11339 panic("sd_xbuf_init: illegal chain type!"); 11340 /*NOTREACHED*/ 11341 } 11342 11343 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11344 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11345 11346 /* 11347 * It might be a bit easier to simply bzero the entire xbuf above, 11348 * but it turns out that since we init a fair number of members anyway, 11349 * we save a fair number cycles by doing explicit assignment of zero. 11350 */ 11351 xp->xb_pkt_flags = 0; 11352 xp->xb_dma_resid = 0; 11353 xp->xb_retry_count = 0; 11354 xp->xb_victim_retry_count = 0; 11355 xp->xb_ua_retry_count = 0; 11356 xp->xb_sense_bp = NULL; 11357 xp->xb_sense_status = 0; 11358 xp->xb_sense_state = 0; 11359 xp->xb_sense_resid = 0; 11360 11361 bp->b_private = xp; 11362 bp->b_flags &= ~(B_DONE | B_ERROR); 11363 bp->b_resid = 0; 11364 bp->av_forw = NULL; 11365 bp->av_back = NULL; 11366 bioerror(bp, 0); 11367 11368 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11369 } 11370 11371 11372 /* 11373 * Function: sd_uscsi_strategy 11374 * 11375 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11376 * 11377 * Arguments: bp - buf struct ptr 11378 * 11379 * Return Code: Always returns 0 11380 * 11381 * Context: Kernel thread context 11382 */ 11383 11384 static int 11385 sd_uscsi_strategy(struct buf *bp) 11386 { 11387 struct sd_lun *un; 11388 struct sd_uscsi_info *uip; 11389 struct sd_xbuf *xp; 11390 uchar_t chain_type; 11391 11392 ASSERT(bp != NULL); 11393 11394 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11395 if (un == NULL) { 11396 bioerror(bp, EIO); 11397 bp->b_resid = bp->b_bcount; 11398 biodone(bp); 11399 return (0); 11400 } 11401 11402 ASSERT(!mutex_owned(SD_MUTEX(un))); 11403 11404 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11405 11406 mutex_enter(SD_MUTEX(un)); 11407 /* 11408 * atapi: Since we are running the CD for now in PIO mode we need to 11409 * call bp_mapin here to avoid bp_mapin called interrupt context under 11410 * the HBA's init_pkt routine. 11411 */ 11412 if (un->un_f_cfg_is_atapi == TRUE) { 11413 mutex_exit(SD_MUTEX(un)); 11414 bp_mapin(bp); 11415 mutex_enter(SD_MUTEX(un)); 11416 } 11417 un->un_ncmds_in_driver++; 11418 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11419 un->un_ncmds_in_driver); 11420 mutex_exit(SD_MUTEX(un)); 11421 11422 /* 11423 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11424 */ 11425 ASSERT(bp->b_private != NULL); 11426 uip = (struct sd_uscsi_info *)bp->b_private; 11427 11428 switch (uip->ui_flags) { 11429 case SD_PATH_DIRECT: 11430 chain_type = SD_CHAIN_DIRECT; 11431 break; 11432 case SD_PATH_DIRECT_PRIORITY: 11433 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11434 break; 11435 default: 11436 chain_type = SD_CHAIN_USCSI; 11437 break; 11438 } 11439 11440 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 11441 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11442 11443 /* Use the index obtained within xbuf_init */ 11444 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11445 11446 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11447 11448 return (0); 11449 } 11450 11451 11452 /* 11453 * These routines perform raw i/o operations. 11454 */ 11455 /*ARGSUSED*/ 11456 static void 11457 sduscsimin(struct buf *bp) 11458 { 11459 /* 11460 * do not break up because the CDB count would then 11461 * be incorrect and data underruns would result (incomplete 11462 * read/writes which would be retried and then failed, see 11463 * sdintr(). 11464 */ 11465 } 11466 11467 11468 11469 /* 11470 * Function: sd_send_scsi_cmd 11471 * 11472 * Description: Runs a USCSI command for user (when called thru sdioctl), 11473 * or for the driver 11474 * 11475 * Arguments: dev - the dev_t for the device 11476 * incmd - ptr to a valid uscsi_cmd struct 11477 * cdbspace - UIO_USERSPACE or UIO_SYSSPACE 11478 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11479 * rqbufspace - UIO_USERSPACE or UIO_SYSSPACE 11480 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11481 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11482 * to use the USCSI "direct" chain and bypass the normal 11483 * command waitq. 11484 * 11485 * Return Code: 0 - successful completion of the given command 11486 * EIO - scsi_reset() failed, or see biowait()/physio() codes. 11487 * ENXIO - soft state not found for specified dev 11488 * EINVAL 11489 * EFAULT - copyin/copyout error 11490 * return code of biowait(9F) or physio(9F): 11491 * EIO - IO error, caller may check incmd->uscsi_status 11492 * ENXIO 11493 * EACCES - reservation conflict 11494 * 11495 * Context: Waits for command to complete. Can sleep. 11496 */ 11497 11498 static int 11499 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 11500 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 11501 int path_flag) 11502 { 11503 struct sd_uscsi_info *uip; 11504 struct uscsi_cmd *uscmd; 11505 struct sd_lun *un; 11506 struct buf *bp; 11507 int rval; 11508 int flags; 11509 11510 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11511 if (un == NULL) { 11512 return (ENXIO); 11513 } 11514 11515 ASSERT(!mutex_owned(SD_MUTEX(un))); 11516 11517 #ifdef SDDEBUG 11518 switch (dataspace) { 11519 case UIO_USERSPACE: 11520 SD_TRACE(SD_LOG_IO, un, 11521 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 11522 break; 11523 case UIO_SYSSPACE: 11524 SD_TRACE(SD_LOG_IO, un, 11525 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 11526 break; 11527 default: 11528 SD_TRACE(SD_LOG_IO, un, 11529 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 11530 break; 11531 } 11532 #endif 11533 11534 /* 11535 * Perform resets directly; no need to generate a command to do it. 11536 */ 11537 if (incmd->uscsi_flags & (USCSI_RESET | USCSI_RESET_ALL)) { 11538 flags = ((incmd->uscsi_flags & USCSI_RESET_ALL) != 0) ? 11539 RESET_ALL : RESET_TARGET; 11540 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: Issuing reset\n"); 11541 if (scsi_reset(SD_ADDRESS(un), flags) == 0) { 11542 /* Reset attempt was unsuccessful */ 11543 SD_TRACE(SD_LOG_IO, un, 11544 "sd_send_scsi_cmd: reset: failure\n"); 11545 return (EIO); 11546 } 11547 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: reset: success\n"); 11548 return (0); 11549 } 11550 11551 /* Perfunctory sanity check... */ 11552 if (incmd->uscsi_cdblen <= 0) { 11553 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11554 "invalid uscsi_cdblen, returning EINVAL\n"); 11555 return (EINVAL); 11556 } 11557 11558 /* 11559 * In order to not worry about where the uscsi structure came from 11560 * (or where the cdb it points to came from) we're going to make 11561 * kmem_alloc'd copies of them here. This will also allow reference 11562 * to the data they contain long after this process has gone to 11563 * sleep and its kernel stack has been unmapped, etc. 11564 * 11565 * First get some memory for the uscsi_cmd struct and copy the 11566 * contents of the given uscsi_cmd struct into it. 11567 */ 11568 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 11569 bcopy(incmd, uscmd, sizeof (struct uscsi_cmd)); 11570 11571 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: uscsi_cmd", 11572 (uchar_t *)uscmd, sizeof (struct uscsi_cmd), SD_LOG_HEX); 11573 11574 /* 11575 * Now get some space for the CDB, and copy the given CDB into 11576 * it. Use ddi_copyin() in case the data is in user space. 11577 */ 11578 uscmd->uscsi_cdb = kmem_zalloc((size_t)incmd->uscsi_cdblen, KM_SLEEP); 11579 flags = (cdbspace == UIO_SYSSPACE) ? FKIOCTL : 0; 11580 if (ddi_copyin(incmd->uscsi_cdb, uscmd->uscsi_cdb, 11581 (uint_t)incmd->uscsi_cdblen, flags) != 0) { 11582 kmem_free(uscmd->uscsi_cdb, (size_t)incmd->uscsi_cdblen); 11583 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11584 return (EFAULT); 11585 } 11586 11587 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: CDB", 11588 (uchar_t *)uscmd->uscsi_cdb, incmd->uscsi_cdblen, SD_LOG_HEX); 11589 11590 bp = getrbuf(KM_SLEEP); 11591 11592 /* 11593 * Allocate an sd_uscsi_info struct and fill it with the info 11594 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11595 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11596 * since we allocate the buf here in this function, we do not 11597 * need to preserve the prior contents of b_private. 11598 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11599 */ 11600 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11601 uip->ui_flags = path_flag; 11602 uip->ui_cmdp = uscmd; 11603 bp->b_private = uip; 11604 11605 /* 11606 * Initialize Request Sense buffering, if requested. 11607 */ 11608 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11609 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11610 /* 11611 * Here uscmd->uscsi_rqbuf currently points to the caller's 11612 * buffer, but we replace this with a kernel buffer that 11613 * we allocate to use with the sense data. The sense data 11614 * (if present) gets copied into this new buffer before the 11615 * command is completed. Then we copy the sense data from 11616 * our allocated buf into the caller's buffer below. Note 11617 * that incmd->uscsi_rqbuf and incmd->uscsi_rqlen are used 11618 * below to perform the copy back to the caller's buf. 11619 */ 11620 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 11621 if (rqbufspace == UIO_USERSPACE) { 11622 uscmd->uscsi_rqlen = SENSE_LENGTH; 11623 uscmd->uscsi_rqresid = SENSE_LENGTH; 11624 } else { 11625 uchar_t rlen = min(SENSE_LENGTH, uscmd->uscsi_rqlen); 11626 uscmd->uscsi_rqlen = rlen; 11627 uscmd->uscsi_rqresid = rlen; 11628 } 11629 } else { 11630 uscmd->uscsi_rqbuf = NULL; 11631 uscmd->uscsi_rqlen = 0; 11632 uscmd->uscsi_rqresid = 0; 11633 } 11634 11635 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: rqbuf:0x%p rqlen:%d\n", 11636 uscmd->uscsi_rqbuf, uscmd->uscsi_rqlen); 11637 11638 if (un->un_f_is_fibre == FALSE) { 11639 /* 11640 * Force asynchronous mode, if necessary. Doing this here 11641 * has the unfortunate effect of running other queued 11642 * commands async also, but since the main purpose of this 11643 * capability is downloading new drive firmware, we can 11644 * probably live with it. 11645 */ 11646 if ((uscmd->uscsi_flags & USCSI_ASYNC) != 0) { 11647 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11648 == 1) { 11649 if (scsi_ifsetcap(SD_ADDRESS(un), 11650 "synchronous", 0, 1) == 1) { 11651 SD_TRACE(SD_LOG_IO, un, 11652 "sd_send_scsi_cmd: forced async ok\n"); 11653 } else { 11654 SD_TRACE(SD_LOG_IO, un, 11655 "sd_send_scsi_cmd:\ 11656 forced async failed\n"); 11657 rval = EINVAL; 11658 goto done; 11659 } 11660 } 11661 } 11662 11663 /* 11664 * Re-enable synchronous mode, if requested 11665 */ 11666 if (uscmd->uscsi_flags & USCSI_SYNC) { 11667 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11668 == 0) { 11669 int i = scsi_ifsetcap(SD_ADDRESS(un), 11670 "synchronous", 1, 1); 11671 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11672 "re-enabled sync %s\n", 11673 (i == 1) ? "ok" : "failed"); 11674 } 11675 } 11676 } 11677 11678 /* 11679 * Commands sent with priority are intended for error recovery 11680 * situations, and do not have retries performed. 11681 */ 11682 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11683 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11684 } 11685 11686 /* 11687 * If we're going to do actual I/O, let physio do all the right things 11688 */ 11689 if (uscmd->uscsi_buflen != 0) { 11690 struct iovec aiov; 11691 struct uio auio; 11692 struct uio *uio = &auio; 11693 11694 bzero(&auio, sizeof (struct uio)); 11695 bzero(&aiov, sizeof (struct iovec)); 11696 aiov.iov_base = uscmd->uscsi_bufaddr; 11697 aiov.iov_len = uscmd->uscsi_buflen; 11698 uio->uio_iov = &aiov; 11699 11700 uio->uio_iovcnt = 1; 11701 uio->uio_resid = uscmd->uscsi_buflen; 11702 uio->uio_segflg = dataspace; 11703 11704 /* 11705 * physio() will block here until the command completes.... 11706 */ 11707 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling physio.\n"); 11708 11709 rval = physio(sd_uscsi_strategy, bp, dev, 11710 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE), 11711 sduscsimin, uio); 11712 11713 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11714 "returned from physio with 0x%x\n", rval); 11715 11716 } else { 11717 /* 11718 * We have to mimic what physio would do here! Argh! 11719 */ 11720 bp->b_flags = B_BUSY | 11721 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE); 11722 bp->b_edev = dev; 11723 bp->b_dev = cmpdev(dev); /* maybe unnecessary? */ 11724 bp->b_bcount = 0; 11725 bp->b_blkno = 0; 11726 11727 SD_TRACE(SD_LOG_IO, un, 11728 "sd_send_scsi_cmd: calling sd_uscsi_strategy...\n"); 11729 11730 (void) sd_uscsi_strategy(bp); 11731 11732 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling biowait\n"); 11733 11734 rval = biowait(bp); 11735 11736 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11737 "returned from biowait with 0x%x\n", rval); 11738 } 11739 11740 done: 11741 11742 #ifdef SDDEBUG 11743 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11744 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11745 uscmd->uscsi_status, uscmd->uscsi_resid); 11746 if (uscmd->uscsi_bufaddr != NULL) { 11747 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11748 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11749 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11750 if (dataspace == UIO_SYSSPACE) { 11751 SD_DUMP_MEMORY(un, SD_LOG_IO, 11752 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11753 uscmd->uscsi_buflen, SD_LOG_HEX); 11754 } 11755 } 11756 #endif 11757 11758 /* 11759 * Get the status and residual to return to the caller. 11760 */ 11761 incmd->uscsi_status = uscmd->uscsi_status; 11762 incmd->uscsi_resid = uscmd->uscsi_resid; 11763 11764 /* 11765 * If the caller wants sense data, copy back whatever sense data 11766 * we may have gotten, and update the relevant rqsense info. 11767 */ 11768 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11769 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11770 11771 int rqlen = uscmd->uscsi_rqlen - uscmd->uscsi_rqresid; 11772 rqlen = min(((int)incmd->uscsi_rqlen), rqlen); 11773 11774 /* Update the Request Sense status and resid */ 11775 incmd->uscsi_rqresid = incmd->uscsi_rqlen - rqlen; 11776 incmd->uscsi_rqstatus = uscmd->uscsi_rqstatus; 11777 11778 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11779 "uscsi_rqstatus: 0x%02x uscsi_rqresid:0x%x\n", 11780 incmd->uscsi_rqstatus, incmd->uscsi_rqresid); 11781 11782 /* Copy out the sense data for user processes */ 11783 if ((incmd->uscsi_rqbuf != NULL) && (rqlen != 0)) { 11784 int flags = 11785 (rqbufspace == UIO_USERSPACE) ? 0 : FKIOCTL; 11786 if (ddi_copyout(uscmd->uscsi_rqbuf, incmd->uscsi_rqbuf, 11787 rqlen, flags) != 0) { 11788 rval = EFAULT; 11789 } 11790 /* 11791 * Note: Can't touch incmd->uscsi_rqbuf so use 11792 * uscmd->uscsi_rqbuf instead. They're the same. 11793 */ 11794 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11795 "incmd->uscsi_rqbuf: 0x%p rqlen:%d\n", 11796 incmd->uscsi_rqbuf, rqlen); 11797 SD_DUMP_MEMORY(un, SD_LOG_IO, "rq", 11798 (uchar_t *)uscmd->uscsi_rqbuf, rqlen, SD_LOG_HEX); 11799 } 11800 } 11801 11802 /* 11803 * Free allocated resources and return; mapout the buf in case it was 11804 * mapped in by a lower layer. 11805 */ 11806 bp_mapout(bp); 11807 freerbuf(bp); 11808 kmem_free(uip, sizeof (struct sd_uscsi_info)); 11809 if (uscmd->uscsi_rqbuf != NULL) { 11810 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 11811 } 11812 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 11813 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11814 11815 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: exit\n"); 11816 11817 return (rval); 11818 } 11819 11820 11821 /* 11822 * Function: sd_buf_iodone 11823 * 11824 * Description: Frees the sd_xbuf & returns the buf to its originator. 11825 * 11826 * Context: May be called from interrupt context. 11827 */ 11828 /* ARGSUSED */ 11829 static void 11830 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11831 { 11832 struct sd_xbuf *xp; 11833 11834 ASSERT(un != NULL); 11835 ASSERT(bp != NULL); 11836 ASSERT(!mutex_owned(SD_MUTEX(un))); 11837 11838 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11839 11840 xp = SD_GET_XBUF(bp); 11841 ASSERT(xp != NULL); 11842 11843 mutex_enter(SD_MUTEX(un)); 11844 11845 /* 11846 * Grab time when the cmd completed. 11847 * This is used for determining if the system has been 11848 * idle long enough to make it idle to the PM framework. 11849 * This is for lowering the overhead, and therefore improving 11850 * performance per I/O operation. 11851 */ 11852 un->un_pm_idle_time = ddi_get_time(); 11853 11854 un->un_ncmds_in_driver--; 11855 ASSERT(un->un_ncmds_in_driver >= 0); 11856 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11857 un->un_ncmds_in_driver); 11858 11859 mutex_exit(SD_MUTEX(un)); 11860 11861 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11862 biodone(bp); /* bp is gone after this */ 11863 11864 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11865 } 11866 11867 11868 /* 11869 * Function: sd_uscsi_iodone 11870 * 11871 * Description: Frees the sd_xbuf & returns the buf to its originator. 11872 * 11873 * Context: May be called from interrupt context. 11874 */ 11875 /* ARGSUSED */ 11876 static void 11877 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11878 { 11879 struct sd_xbuf *xp; 11880 11881 ASSERT(un != NULL); 11882 ASSERT(bp != NULL); 11883 11884 xp = SD_GET_XBUF(bp); 11885 ASSERT(xp != NULL); 11886 ASSERT(!mutex_owned(SD_MUTEX(un))); 11887 11888 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11889 11890 mutex_enter(SD_MUTEX(un)); 11891 11892 /* 11893 * Grab time when the cmd completed. 11894 * This is used for determining if the system has been 11895 * idle long enough to make it idle to the PM framework. 11896 * This is for lowering the overhead, and therefore improving 11897 * performance per I/O operation. 11898 */ 11899 un->un_pm_idle_time = ddi_get_time(); 11900 11901 un->un_ncmds_in_driver--; 11902 ASSERT(un->un_ncmds_in_driver >= 0); 11903 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11904 un->un_ncmds_in_driver); 11905 11906 mutex_exit(SD_MUTEX(un)); 11907 11908 kmem_free(xp, sizeof (struct sd_xbuf)); 11909 biodone(bp); 11910 11911 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11912 } 11913 11914 11915 /* 11916 * Function: sd_mapblockaddr_iostart 11917 * 11918 * Description: Verify request lies withing the partition limits for 11919 * the indicated minor device. Issue "overrun" buf if 11920 * request would exceed partition range. Converts 11921 * partition-relative block address to absolute. 11922 * 11923 * Context: Can sleep 11924 * 11925 * Issues: This follows what the old code did, in terms of accessing 11926 * some of the partition info in the unit struct without holding 11927 * the mutext. This is a general issue, if the partition info 11928 * can be altered while IO is in progress... as soon as we send 11929 * a buf, its partitioning can be invalid before it gets to the 11930 * device. Probably the right fix is to move partitioning out 11931 * of the driver entirely. 11932 */ 11933 11934 static void 11935 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11936 { 11937 daddr_t nblocks; /* #blocks in the given partition */ 11938 daddr_t blocknum; /* Block number specified by the buf */ 11939 size_t requested_nblocks; 11940 size_t available_nblocks; 11941 int partition; 11942 diskaddr_t partition_offset; 11943 struct sd_xbuf *xp; 11944 11945 11946 ASSERT(un != NULL); 11947 ASSERT(bp != NULL); 11948 ASSERT(!mutex_owned(SD_MUTEX(un))); 11949 11950 SD_TRACE(SD_LOG_IO_PARTITION, un, 11951 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11952 11953 xp = SD_GET_XBUF(bp); 11954 ASSERT(xp != NULL); 11955 11956 /* 11957 * If the geometry is not indicated as valid, attempt to access 11958 * the unit & verify the geometry/label. This can be the case for 11959 * removable-media devices, of if the device was opened in 11960 * NDELAY/NONBLOCK mode. 11961 */ 11962 if ((un->un_f_geometry_is_valid != TRUE) && 11963 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11964 /* 11965 * For removable devices it is possible to start an I/O 11966 * without a media by opening the device in nodelay mode. 11967 * Also for writable CDs there can be many scenarios where 11968 * there is no geometry yet but volume manager is trying to 11969 * issue a read() just because it can see TOC on the CD. So 11970 * do not print a message for removables. 11971 */ 11972 if (!ISREMOVABLE(un)) { 11973 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11974 "i/o to invalid geometry\n"); 11975 } 11976 bioerror(bp, EIO); 11977 bp->b_resid = bp->b_bcount; 11978 SD_BEGIN_IODONE(index, un, bp); 11979 return; 11980 } 11981 11982 partition = SDPART(bp->b_edev); 11983 11984 /* #blocks in partition */ 11985 nblocks = un->un_map[partition].dkl_nblk; /* #blocks in partition */ 11986 11987 /* Use of a local variable potentially improves performance slightly */ 11988 partition_offset = un->un_offset[partition]; 11989 11990 /* 11991 * blocknum is the starting block number of the request. At this 11992 * point it is still relative to the start of the minor device. 11993 */ 11994 blocknum = xp->xb_blkno; 11995 11996 /* 11997 * Legacy: If the starting block number is one past the last block 11998 * in the partition, do not set B_ERROR in the buf. 11999 */ 12000 if (blocknum == nblocks) { 12001 goto error_exit; 12002 } 12003 12004 /* 12005 * Confirm that the first block of the request lies within the 12006 * partition limits. Also the requested number of bytes must be 12007 * a multiple of the system block size. 12008 */ 12009 if ((blocknum < 0) || (blocknum >= nblocks) || 12010 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 12011 bp->b_flags |= B_ERROR; 12012 goto error_exit; 12013 } 12014 12015 /* 12016 * If the requsted # blocks exceeds the available # blocks, that 12017 * is an overrun of the partition. 12018 */ 12019 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 12020 available_nblocks = (size_t)(nblocks - blocknum); 12021 ASSERT(nblocks >= blocknum); 12022 12023 if (requested_nblocks > available_nblocks) { 12024 /* 12025 * Allocate an "overrun" buf to allow the request to proceed 12026 * for the amount of space available in the partition. The 12027 * amount not transferred will be added into the b_resid 12028 * when the operation is complete. The overrun buf 12029 * replaces the original buf here, and the original buf 12030 * is saved inside the overrun buf, for later use. 12031 */ 12032 size_t resid = SD_SYSBLOCKS2BYTES(un, 12033 (offset_t)(requested_nblocks - available_nblocks)); 12034 size_t count = bp->b_bcount - resid; 12035 /* 12036 * Note: count is an unsigned entity thus it'll NEVER 12037 * be less than 0 so ASSERT the original values are 12038 * correct. 12039 */ 12040 ASSERT(bp->b_bcount >= resid); 12041 12042 bp = sd_bioclone_alloc(bp, count, blocknum, 12043 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 12044 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 12045 ASSERT(xp != NULL); 12046 } 12047 12048 /* At this point there should be no residual for this buf. */ 12049 ASSERT(bp->b_resid == 0); 12050 12051 /* Convert the block number to an absolute address. */ 12052 xp->xb_blkno += partition_offset; 12053 12054 SD_NEXT_IOSTART(index, un, bp); 12055 12056 SD_TRACE(SD_LOG_IO_PARTITION, un, 12057 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 12058 12059 return; 12060 12061 error_exit: 12062 bp->b_resid = bp->b_bcount; 12063 SD_BEGIN_IODONE(index, un, bp); 12064 SD_TRACE(SD_LOG_IO_PARTITION, un, 12065 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 12066 } 12067 12068 12069 /* 12070 * Function: sd_mapblockaddr_iodone 12071 * 12072 * Description: Completion-side processing for partition management. 12073 * 12074 * Context: May be called under interrupt context 12075 */ 12076 12077 static void 12078 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 12079 { 12080 /* int partition; */ /* Not used, see below. */ 12081 ASSERT(un != NULL); 12082 ASSERT(bp != NULL); 12083 ASSERT(!mutex_owned(SD_MUTEX(un))); 12084 12085 SD_TRACE(SD_LOG_IO_PARTITION, un, 12086 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 12087 12088 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12089 /* 12090 * We have an "overrun" buf to deal with... 12091 */ 12092 struct sd_xbuf *xp; 12093 struct buf *obp; /* ptr to the original buf */ 12094 12095 xp = SD_GET_XBUF(bp); 12096 ASSERT(xp != NULL); 12097 12098 /* Retrieve the pointer to the original buf */ 12099 obp = (struct buf *)xp->xb_private; 12100 ASSERT(obp != NULL); 12101 12102 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12103 bioerror(obp, bp->b_error); 12104 12105 sd_bioclone_free(bp); 12106 12107 /* 12108 * Get back the original buf. 12109 * Note that since the restoration of xb_blkno below 12110 * was removed, the sd_xbuf is not needed. 12111 */ 12112 bp = obp; 12113 /* 12114 * xp = SD_GET_XBUF(bp); 12115 * ASSERT(xp != NULL); 12116 */ 12117 } 12118 12119 /* 12120 * Convert sd->xb_blkno back to a minor-device relative value. 12121 * Note: this has been commented out, as it is not needed in the 12122 * current implementation of the driver (ie, since this function 12123 * is at the top of the layering chains, so the info will be 12124 * discarded) and it is in the "hot" IO path. 12125 * 12126 * partition = getminor(bp->b_edev) & SDPART_MASK; 12127 * xp->xb_blkno -= un->un_offset[partition]; 12128 */ 12129 12130 SD_NEXT_IODONE(index, un, bp); 12131 12132 SD_TRACE(SD_LOG_IO_PARTITION, un, 12133 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12134 } 12135 12136 12137 /* 12138 * Function: sd_mapblocksize_iostart 12139 * 12140 * Description: Convert between system block size (un->un_sys_blocksize) 12141 * and target block size (un->un_tgt_blocksize). 12142 * 12143 * Context: Can sleep to allocate resources. 12144 * 12145 * Assumptions: A higher layer has already performed any partition validation, 12146 * and converted the xp->xb_blkno to an absolute value relative 12147 * to the start of the device. 12148 * 12149 * It is also assumed that the higher layer has implemented 12150 * an "overrun" mechanism for the case where the request would 12151 * read/write beyond the end of a partition. In this case we 12152 * assume (and ASSERT) that bp->b_resid == 0. 12153 * 12154 * Note: The implementation for this routine assumes the target 12155 * block size remains constant between allocation and transport. 12156 */ 12157 12158 static void 12159 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12160 { 12161 struct sd_mapblocksize_info *bsp; 12162 struct sd_xbuf *xp; 12163 offset_t first_byte; 12164 daddr_t start_block, end_block; 12165 daddr_t request_bytes; 12166 ushort_t is_aligned = FALSE; 12167 12168 ASSERT(un != NULL); 12169 ASSERT(bp != NULL); 12170 ASSERT(!mutex_owned(SD_MUTEX(un))); 12171 ASSERT(bp->b_resid == 0); 12172 12173 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12174 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12175 12176 /* 12177 * For a non-writable CD, a write request is an error 12178 */ 12179 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12180 (un->un_f_mmc_writable_media == FALSE)) { 12181 bioerror(bp, EIO); 12182 bp->b_resid = bp->b_bcount; 12183 SD_BEGIN_IODONE(index, un, bp); 12184 return; 12185 } 12186 12187 /* 12188 * We do not need a shadow buf if the device is using 12189 * un->un_sys_blocksize as its block size or if bcount == 0. 12190 * In this case there is no layer-private data block allocated. 12191 */ 12192 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12193 (bp->b_bcount == 0)) { 12194 goto done; 12195 } 12196 12197 #if defined(__i386) || defined(__amd64) 12198 /* We do not support non-block-aligned transfers for ROD devices */ 12199 ASSERT(!ISROD(un)); 12200 #endif 12201 12202 xp = SD_GET_XBUF(bp); 12203 ASSERT(xp != NULL); 12204 12205 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12206 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12207 un->un_tgt_blocksize, un->un_sys_blocksize); 12208 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12209 "request start block:0x%x\n", xp->xb_blkno); 12210 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12211 "request len:0x%x\n", bp->b_bcount); 12212 12213 /* 12214 * Allocate the layer-private data area for the mapblocksize layer. 12215 * Layers are allowed to use the xp_private member of the sd_xbuf 12216 * struct to store the pointer to their layer-private data block, but 12217 * each layer also has the responsibility of restoring the prior 12218 * contents of xb_private before returning the buf/xbuf to the 12219 * higher layer that sent it. 12220 * 12221 * Here we save the prior contents of xp->xb_private into the 12222 * bsp->mbs_oprivate field of our layer-private data area. This value 12223 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12224 * the layer-private area and returning the buf/xbuf to the layer 12225 * that sent it. 12226 * 12227 * Note that here we use kmem_zalloc for the allocation as there are 12228 * parts of the mapblocksize code that expect certain fields to be 12229 * zero unless explicitly set to a required value. 12230 */ 12231 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12232 bsp->mbs_oprivate = xp->xb_private; 12233 xp->xb_private = bsp; 12234 12235 /* 12236 * This treats the data on the disk (target) as an array of bytes. 12237 * first_byte is the byte offset, from the beginning of the device, 12238 * to the location of the request. This is converted from a 12239 * un->un_sys_blocksize block address to a byte offset, and then back 12240 * to a block address based upon a un->un_tgt_blocksize block size. 12241 * 12242 * xp->xb_blkno should be absolute upon entry into this function, 12243 * but, but it is based upon partitions that use the "system" 12244 * block size. It must be adjusted to reflect the block size of 12245 * the target. 12246 * 12247 * Note that end_block is actually the block that follows the last 12248 * block of the request, but that's what is needed for the computation. 12249 */ 12250 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12251 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12252 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12253 un->un_tgt_blocksize; 12254 12255 /* request_bytes is rounded up to a multiple of the target block size */ 12256 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12257 12258 /* 12259 * See if the starting address of the request and the request 12260 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12261 * then we do not need to allocate a shadow buf to handle the request. 12262 */ 12263 if (((first_byte % un->un_tgt_blocksize) == 0) && 12264 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12265 is_aligned = TRUE; 12266 } 12267 12268 if ((bp->b_flags & B_READ) == 0) { 12269 /* 12270 * Lock the range for a write operation. An aligned request is 12271 * considered a simple write; otherwise the request must be a 12272 * read-modify-write. 12273 */ 12274 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12275 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12276 } 12277 12278 /* 12279 * Alloc a shadow buf if the request is not aligned. Also, this is 12280 * where the READ command is generated for a read-modify-write. (The 12281 * write phase is deferred until after the read completes.) 12282 */ 12283 if (is_aligned == FALSE) { 12284 12285 struct sd_mapblocksize_info *shadow_bsp; 12286 struct sd_xbuf *shadow_xp; 12287 struct buf *shadow_bp; 12288 12289 /* 12290 * Allocate the shadow buf and it associated xbuf. Note that 12291 * after this call the xb_blkno value in both the original 12292 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12293 * same: absolute relative to the start of the device, and 12294 * adjusted for the target block size. The b_blkno in the 12295 * shadow buf will also be set to this value. We should never 12296 * change b_blkno in the original bp however. 12297 * 12298 * Note also that the shadow buf will always need to be a 12299 * READ command, regardless of whether the incoming command 12300 * is a READ or a WRITE. 12301 */ 12302 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12303 xp->xb_blkno, 12304 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12305 12306 shadow_xp = SD_GET_XBUF(shadow_bp); 12307 12308 /* 12309 * Allocate the layer-private data for the shadow buf. 12310 * (No need to preserve xb_private in the shadow xbuf.) 12311 */ 12312 shadow_xp->xb_private = shadow_bsp = 12313 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12314 12315 /* 12316 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12317 * to figure out where the start of the user data is (based upon 12318 * the system block size) in the data returned by the READ 12319 * command (which will be based upon the target blocksize). Note 12320 * that this is only really used if the request is unaligned. 12321 */ 12322 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12323 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12324 ASSERT((bsp->mbs_copy_offset >= 0) && 12325 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12326 12327 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12328 12329 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12330 12331 /* Transfer the wmap (if any) to the shadow buf */ 12332 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12333 bsp->mbs_wmp = NULL; 12334 12335 /* 12336 * The shadow buf goes on from here in place of the 12337 * original buf. 12338 */ 12339 shadow_bsp->mbs_orig_bp = bp; 12340 bp = shadow_bp; 12341 } 12342 12343 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12344 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12345 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12346 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12347 request_bytes); 12348 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12349 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12350 12351 done: 12352 SD_NEXT_IOSTART(index, un, bp); 12353 12354 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12355 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12356 } 12357 12358 12359 /* 12360 * Function: sd_mapblocksize_iodone 12361 * 12362 * Description: Completion side processing for block-size mapping. 12363 * 12364 * Context: May be called under interrupt context 12365 */ 12366 12367 static void 12368 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12369 { 12370 struct sd_mapblocksize_info *bsp; 12371 struct sd_xbuf *xp; 12372 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12373 struct buf *orig_bp; /* ptr to the original buf */ 12374 offset_t shadow_end; 12375 offset_t request_end; 12376 offset_t shadow_start; 12377 ssize_t copy_offset; 12378 size_t copy_length; 12379 size_t shortfall; 12380 uint_t is_write; /* TRUE if this bp is a WRITE */ 12381 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12382 12383 ASSERT(un != NULL); 12384 ASSERT(bp != NULL); 12385 12386 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12387 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12388 12389 /* 12390 * There is no shadow buf or layer-private data if the target is 12391 * using un->un_sys_blocksize as its block size or if bcount == 0. 12392 */ 12393 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12394 (bp->b_bcount == 0)) { 12395 goto exit; 12396 } 12397 12398 xp = SD_GET_XBUF(bp); 12399 ASSERT(xp != NULL); 12400 12401 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12402 bsp = xp->xb_private; 12403 12404 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12405 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12406 12407 if (is_write) { 12408 /* 12409 * For a WRITE request we must free up the block range that 12410 * we have locked up. This holds regardless of whether this is 12411 * an aligned write request or a read-modify-write request. 12412 */ 12413 sd_range_unlock(un, bsp->mbs_wmp); 12414 bsp->mbs_wmp = NULL; 12415 } 12416 12417 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12418 /* 12419 * An aligned read or write command will have no shadow buf; 12420 * there is not much else to do with it. 12421 */ 12422 goto done; 12423 } 12424 12425 orig_bp = bsp->mbs_orig_bp; 12426 ASSERT(orig_bp != NULL); 12427 orig_xp = SD_GET_XBUF(orig_bp); 12428 ASSERT(orig_xp != NULL); 12429 ASSERT(!mutex_owned(SD_MUTEX(un))); 12430 12431 if (!is_write && has_wmap) { 12432 /* 12433 * A READ with a wmap means this is the READ phase of a 12434 * read-modify-write. If an error occurred on the READ then 12435 * we do not proceed with the WRITE phase or copy any data. 12436 * Just release the write maps and return with an error. 12437 */ 12438 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12439 orig_bp->b_resid = orig_bp->b_bcount; 12440 bioerror(orig_bp, bp->b_error); 12441 sd_range_unlock(un, bsp->mbs_wmp); 12442 goto freebuf_done; 12443 } 12444 } 12445 12446 /* 12447 * Here is where we set up to copy the data from the shadow buf 12448 * into the space associated with the original buf. 12449 * 12450 * To deal with the conversion between block sizes, these 12451 * computations treat the data as an array of bytes, with the 12452 * first byte (byte 0) corresponding to the first byte in the 12453 * first block on the disk. 12454 */ 12455 12456 /* 12457 * shadow_start and shadow_len indicate the location and size of 12458 * the data returned with the shadow IO request. 12459 */ 12460 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12461 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12462 12463 /* 12464 * copy_offset gives the offset (in bytes) from the start of the first 12465 * block of the READ request to the beginning of the data. We retrieve 12466 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12467 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12468 * data to be copied (in bytes). 12469 */ 12470 copy_offset = bsp->mbs_copy_offset; 12471 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12472 copy_length = orig_bp->b_bcount; 12473 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12474 12475 /* 12476 * Set up the resid and error fields of orig_bp as appropriate. 12477 */ 12478 if (shadow_end >= request_end) { 12479 /* We got all the requested data; set resid to zero */ 12480 orig_bp->b_resid = 0; 12481 } else { 12482 /* 12483 * We failed to get enough data to fully satisfy the original 12484 * request. Just copy back whatever data we got and set 12485 * up the residual and error code as required. 12486 * 12487 * 'shortfall' is the amount by which the data received with the 12488 * shadow buf has "fallen short" of the requested amount. 12489 */ 12490 shortfall = (size_t)(request_end - shadow_end); 12491 12492 if (shortfall > orig_bp->b_bcount) { 12493 /* 12494 * We did not get enough data to even partially 12495 * fulfill the original request. The residual is 12496 * equal to the amount requested. 12497 */ 12498 orig_bp->b_resid = orig_bp->b_bcount; 12499 } else { 12500 /* 12501 * We did not get all the data that we requested 12502 * from the device, but we will try to return what 12503 * portion we did get. 12504 */ 12505 orig_bp->b_resid = shortfall; 12506 } 12507 ASSERT(copy_length >= orig_bp->b_resid); 12508 copy_length -= orig_bp->b_resid; 12509 } 12510 12511 /* Propagate the error code from the shadow buf to the original buf */ 12512 bioerror(orig_bp, bp->b_error); 12513 12514 if (is_write) { 12515 goto freebuf_done; /* No data copying for a WRITE */ 12516 } 12517 12518 if (has_wmap) { 12519 /* 12520 * This is a READ command from the READ phase of a 12521 * read-modify-write request. We have to copy the data given 12522 * by the user OVER the data returned by the READ command, 12523 * then convert the command from a READ to a WRITE and send 12524 * it back to the target. 12525 */ 12526 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12527 copy_length); 12528 12529 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12530 12531 /* 12532 * Dispatch the WRITE command to the taskq thread, which 12533 * will in turn send the command to the target. When the 12534 * WRITE command completes, we (sd_mapblocksize_iodone()) 12535 * will get called again as part of the iodone chain 12536 * processing for it. Note that we will still be dealing 12537 * with the shadow buf at that point. 12538 */ 12539 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12540 KM_NOSLEEP) != 0) { 12541 /* 12542 * Dispatch was successful so we are done. Return 12543 * without going any higher up the iodone chain. Do 12544 * not free up any layer-private data until after the 12545 * WRITE completes. 12546 */ 12547 return; 12548 } 12549 12550 /* 12551 * Dispatch of the WRITE command failed; set up the error 12552 * condition and send this IO back up the iodone chain. 12553 */ 12554 bioerror(orig_bp, EIO); 12555 orig_bp->b_resid = orig_bp->b_bcount; 12556 12557 } else { 12558 /* 12559 * This is a regular READ request (ie, not a RMW). Copy the 12560 * data from the shadow buf into the original buf. The 12561 * copy_offset compensates for any "misalignment" between the 12562 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12563 * original buf (with its un->un_sys_blocksize blocks). 12564 */ 12565 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12566 copy_length); 12567 } 12568 12569 freebuf_done: 12570 12571 /* 12572 * At this point we still have both the shadow buf AND the original 12573 * buf to deal with, as well as the layer-private data area in each. 12574 * Local variables are as follows: 12575 * 12576 * bp -- points to shadow buf 12577 * xp -- points to xbuf of shadow buf 12578 * bsp -- points to layer-private data area of shadow buf 12579 * orig_bp -- points to original buf 12580 * 12581 * First free the shadow buf and its associated xbuf, then free the 12582 * layer-private data area from the shadow buf. There is no need to 12583 * restore xb_private in the shadow xbuf. 12584 */ 12585 sd_shadow_buf_free(bp); 12586 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12587 12588 /* 12589 * Now update the local variables to point to the original buf, xbuf, 12590 * and layer-private area. 12591 */ 12592 bp = orig_bp; 12593 xp = SD_GET_XBUF(bp); 12594 ASSERT(xp != NULL); 12595 ASSERT(xp == orig_xp); 12596 bsp = xp->xb_private; 12597 ASSERT(bsp != NULL); 12598 12599 done: 12600 /* 12601 * Restore xb_private to whatever it was set to by the next higher 12602 * layer in the chain, then free the layer-private data area. 12603 */ 12604 xp->xb_private = bsp->mbs_oprivate; 12605 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12606 12607 exit: 12608 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12609 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12610 12611 SD_NEXT_IODONE(index, un, bp); 12612 } 12613 12614 12615 /* 12616 * Function: sd_checksum_iostart 12617 * 12618 * Description: A stub function for a layer that's currently not used. 12619 * For now just a placeholder. 12620 * 12621 * Context: Kernel thread context 12622 */ 12623 12624 static void 12625 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12626 { 12627 ASSERT(un != NULL); 12628 ASSERT(bp != NULL); 12629 ASSERT(!mutex_owned(SD_MUTEX(un))); 12630 SD_NEXT_IOSTART(index, un, bp); 12631 } 12632 12633 12634 /* 12635 * Function: sd_checksum_iodone 12636 * 12637 * Description: A stub function for a layer that's currently not used. 12638 * For now just a placeholder. 12639 * 12640 * Context: May be called under interrupt context 12641 */ 12642 12643 static void 12644 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12645 { 12646 ASSERT(un != NULL); 12647 ASSERT(bp != NULL); 12648 ASSERT(!mutex_owned(SD_MUTEX(un))); 12649 SD_NEXT_IODONE(index, un, bp); 12650 } 12651 12652 12653 /* 12654 * Function: sd_checksum_uscsi_iostart 12655 * 12656 * Description: A stub function for a layer that's currently not used. 12657 * For now just a placeholder. 12658 * 12659 * Context: Kernel thread context 12660 */ 12661 12662 static void 12663 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12664 { 12665 ASSERT(un != NULL); 12666 ASSERT(bp != NULL); 12667 ASSERT(!mutex_owned(SD_MUTEX(un))); 12668 SD_NEXT_IOSTART(index, un, bp); 12669 } 12670 12671 12672 /* 12673 * Function: sd_checksum_uscsi_iodone 12674 * 12675 * Description: A stub function for a layer that's currently not used. 12676 * For now just a placeholder. 12677 * 12678 * Context: May be called under interrupt context 12679 */ 12680 12681 static void 12682 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12683 { 12684 ASSERT(un != NULL); 12685 ASSERT(bp != NULL); 12686 ASSERT(!mutex_owned(SD_MUTEX(un))); 12687 SD_NEXT_IODONE(index, un, bp); 12688 } 12689 12690 12691 /* 12692 * Function: sd_pm_iostart 12693 * 12694 * Description: iostart-side routine for Power mangement. 12695 * 12696 * Context: Kernel thread context 12697 */ 12698 12699 static void 12700 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12701 { 12702 ASSERT(un != NULL); 12703 ASSERT(bp != NULL); 12704 ASSERT(!mutex_owned(SD_MUTEX(un))); 12705 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12706 12707 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12708 12709 if (sd_pm_entry(un) != DDI_SUCCESS) { 12710 /* 12711 * Set up to return the failed buf back up the 'iodone' 12712 * side of the calling chain. 12713 */ 12714 bioerror(bp, EIO); 12715 bp->b_resid = bp->b_bcount; 12716 12717 SD_BEGIN_IODONE(index, un, bp); 12718 12719 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12720 return; 12721 } 12722 12723 SD_NEXT_IOSTART(index, un, bp); 12724 12725 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12726 } 12727 12728 12729 /* 12730 * Function: sd_pm_iodone 12731 * 12732 * Description: iodone-side routine for power mangement. 12733 * 12734 * Context: may be called from interrupt context 12735 */ 12736 12737 static void 12738 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12739 { 12740 ASSERT(un != NULL); 12741 ASSERT(bp != NULL); 12742 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12743 12744 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12745 12746 /* 12747 * After attach the following flag is only read, so don't 12748 * take the penalty of acquiring a mutex for it. 12749 */ 12750 if (un->un_f_pm_is_enabled == TRUE) { 12751 sd_pm_exit(un); 12752 } 12753 12754 SD_NEXT_IODONE(index, un, bp); 12755 12756 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12757 } 12758 12759 12760 /* 12761 * Function: sd_core_iostart 12762 * 12763 * Description: Primary driver function for enqueuing buf(9S) structs from 12764 * the system and initiating IO to the target device 12765 * 12766 * Context: Kernel thread context. Can sleep. 12767 * 12768 * Assumptions: - The given xp->xb_blkno is absolute 12769 * (ie, relative to the start of the device). 12770 * - The IO is to be done using the native blocksize of 12771 * the device, as specified in un->un_tgt_blocksize. 12772 */ 12773 /* ARGSUSED */ 12774 static void 12775 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12776 { 12777 struct sd_xbuf *xp; 12778 12779 ASSERT(un != NULL); 12780 ASSERT(bp != NULL); 12781 ASSERT(!mutex_owned(SD_MUTEX(un))); 12782 ASSERT(bp->b_resid == 0); 12783 12784 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12785 12786 xp = SD_GET_XBUF(bp); 12787 ASSERT(xp != NULL); 12788 12789 mutex_enter(SD_MUTEX(un)); 12790 12791 /* 12792 * If we are currently in the failfast state, fail any new IO 12793 * that has B_FAILFAST set, then return. 12794 */ 12795 if ((bp->b_flags & B_FAILFAST) && 12796 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12797 mutex_exit(SD_MUTEX(un)); 12798 bioerror(bp, EIO); 12799 bp->b_resid = bp->b_bcount; 12800 SD_BEGIN_IODONE(index, un, bp); 12801 return; 12802 } 12803 12804 if (SD_IS_DIRECT_PRIORITY(xp)) { 12805 /* 12806 * Priority command -- transport it immediately. 12807 * 12808 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12809 * because all direct priority commands should be associated 12810 * with error recovery actions which we don't want to retry. 12811 */ 12812 sd_start_cmds(un, bp); 12813 } else { 12814 /* 12815 * Normal command -- add it to the wait queue, then start 12816 * transporting commands from the wait queue. 12817 */ 12818 sd_add_buf_to_waitq(un, bp); 12819 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12820 sd_start_cmds(un, NULL); 12821 } 12822 12823 mutex_exit(SD_MUTEX(un)); 12824 12825 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12826 } 12827 12828 12829 /* 12830 * Function: sd_init_cdb_limits 12831 * 12832 * Description: This is to handle scsi_pkt initialization differences 12833 * between the driver platforms. 12834 * 12835 * Legacy behaviors: 12836 * 12837 * If the block number or the sector count exceeds the 12838 * capabilities of a Group 0 command, shift over to a 12839 * Group 1 command. We don't blindly use Group 1 12840 * commands because a) some drives (CDC Wren IVs) get a 12841 * bit confused, and b) there is probably a fair amount 12842 * of speed difference for a target to receive and decode 12843 * a 10 byte command instead of a 6 byte command. 12844 * 12845 * The xfer time difference of 6 vs 10 byte CDBs is 12846 * still significant so this code is still worthwhile. 12847 * 10 byte CDBs are very inefficient with the fas HBA driver 12848 * and older disks. Each CDB byte took 1 usec with some 12849 * popular disks. 12850 * 12851 * Context: Must be called at attach time 12852 */ 12853 12854 static void 12855 sd_init_cdb_limits(struct sd_lun *un) 12856 { 12857 /* 12858 * Use CDB_GROUP1 commands for most devices except for 12859 * parallel SCSI fixed drives in which case we get better 12860 * performance using CDB_GROUP0 commands (where applicable). 12861 */ 12862 un->un_mincdb = SD_CDB_GROUP1; 12863 #if !defined(__fibre) 12864 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12865 !ISREMOVABLE(un)) { 12866 un->un_mincdb = SD_CDB_GROUP0; 12867 } 12868 #endif 12869 12870 /* 12871 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12872 * commands for fixed disks unless we are building for a 32 bit 12873 * kernel. 12874 */ 12875 #ifdef _LP64 12876 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP4; 12877 #else 12878 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP1; 12879 #endif 12880 12881 /* 12882 * x86 systems require the PKT_DMA_PARTIAL flag 12883 */ 12884 #if defined(__x86) 12885 un->un_pkt_flags = PKT_DMA_PARTIAL; 12886 #else 12887 un->un_pkt_flags = 0; 12888 #endif 12889 12890 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12891 ? sizeof (struct scsi_arq_status) : 1); 12892 un->un_cmd_timeout = (ushort_t)sd_io_time; 12893 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12894 } 12895 12896 12897 /* 12898 * Function: sd_initpkt_for_buf 12899 * 12900 * Description: Allocate and initialize for transport a scsi_pkt struct, 12901 * based upon the info specified in the given buf struct. 12902 * 12903 * Assumes the xb_blkno in the request is absolute (ie, 12904 * relative to the start of the device (NOT partition!). 12905 * Also assumes that the request is using the native block 12906 * size of the device (as returned by the READ CAPACITY 12907 * command). 12908 * 12909 * Return Code: SD_PKT_ALLOC_SUCCESS 12910 * SD_PKT_ALLOC_FAILURE 12911 * SD_PKT_ALLOC_FAILURE_NO_DMA 12912 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12913 * 12914 * Context: Kernel thread and may be called from software interrupt context 12915 * as part of a sdrunout callback. This function may not block or 12916 * call routines that block 12917 */ 12918 12919 static int 12920 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12921 { 12922 struct sd_xbuf *xp; 12923 struct scsi_pkt *pktp = NULL; 12924 struct sd_lun *un; 12925 size_t blockcount; 12926 daddr_t startblock; 12927 int rval; 12928 int cmd_flags; 12929 12930 ASSERT(bp != NULL); 12931 ASSERT(pktpp != NULL); 12932 xp = SD_GET_XBUF(bp); 12933 ASSERT(xp != NULL); 12934 un = SD_GET_UN(bp); 12935 ASSERT(un != NULL); 12936 ASSERT(mutex_owned(SD_MUTEX(un))); 12937 ASSERT(bp->b_resid == 0); 12938 12939 SD_TRACE(SD_LOG_IO_CORE, un, 12940 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12941 12942 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12943 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12944 /* 12945 * Already have a scsi_pkt -- just need DMA resources. 12946 * We must recompute the CDB in case the mapping returns 12947 * a nonzero pkt_resid. 12948 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12949 * that is being retried, the unmap/remap of the DMA resouces 12950 * will result in the entire transfer starting over again 12951 * from the very first block. 12952 */ 12953 ASSERT(xp->xb_pktp != NULL); 12954 pktp = xp->xb_pktp; 12955 } else { 12956 pktp = NULL; 12957 } 12958 #endif /* __i386 || __amd64 */ 12959 12960 startblock = xp->xb_blkno; /* Absolute block num. */ 12961 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12962 12963 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12964 12965 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12966 12967 #else 12968 12969 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 12970 12971 #endif 12972 12973 /* 12974 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12975 * call scsi_init_pkt, and build the CDB. 12976 */ 12977 rval = sd_setup_rw_pkt(un, &pktp, bp, 12978 cmd_flags, sdrunout, (caddr_t)un, 12979 startblock, blockcount); 12980 12981 if (rval == 0) { 12982 /* 12983 * Success. 12984 * 12985 * If partial DMA is being used and required for this transfer. 12986 * set it up here. 12987 */ 12988 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12989 (pktp->pkt_resid != 0)) { 12990 12991 /* 12992 * Save the CDB length and pkt_resid for the 12993 * next xfer 12994 */ 12995 xp->xb_dma_resid = pktp->pkt_resid; 12996 12997 /* rezero resid */ 12998 pktp->pkt_resid = 0; 12999 13000 } else { 13001 xp->xb_dma_resid = 0; 13002 } 13003 13004 pktp->pkt_flags = un->un_tagflags; 13005 pktp->pkt_time = un->un_cmd_timeout; 13006 pktp->pkt_comp = sdintr; 13007 13008 pktp->pkt_private = bp; 13009 *pktpp = pktp; 13010 13011 SD_TRACE(SD_LOG_IO_CORE, un, 13012 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 13013 13014 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13015 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 13016 #endif 13017 13018 return (SD_PKT_ALLOC_SUCCESS); 13019 13020 } 13021 13022 /* 13023 * SD_PKT_ALLOC_FAILURE is the only expected failure code 13024 * from sd_setup_rw_pkt. 13025 */ 13026 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 13027 13028 if (rval == SD_PKT_ALLOC_FAILURE) { 13029 *pktpp = NULL; 13030 /* 13031 * Set the driver state to RWAIT to indicate the driver 13032 * is waiting on resource allocations. The driver will not 13033 * suspend, pm_suspend, or detatch while the state is RWAIT. 13034 */ 13035 New_state(un, SD_STATE_RWAIT); 13036 13037 SD_ERROR(SD_LOG_IO_CORE, un, 13038 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 13039 13040 if ((bp->b_flags & B_ERROR) != 0) { 13041 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13042 } 13043 return (SD_PKT_ALLOC_FAILURE); 13044 } else { 13045 /* 13046 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13047 * 13048 * This should never happen. Maybe someone messed with the 13049 * kernel's minphys? 13050 */ 13051 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13052 "Request rejected: too large for CDB: " 13053 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 13054 SD_ERROR(SD_LOG_IO_CORE, un, 13055 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 13056 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13057 13058 } 13059 } 13060 13061 13062 /* 13063 * Function: sd_destroypkt_for_buf 13064 * 13065 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 13066 * 13067 * Context: Kernel thread or interrupt context 13068 */ 13069 13070 static void 13071 sd_destroypkt_for_buf(struct buf *bp) 13072 { 13073 ASSERT(bp != NULL); 13074 ASSERT(SD_GET_UN(bp) != NULL); 13075 13076 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13077 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 13078 13079 ASSERT(SD_GET_PKTP(bp) != NULL); 13080 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13081 13082 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 13083 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 13084 } 13085 13086 /* 13087 * Function: sd_setup_rw_pkt 13088 * 13089 * Description: Determines appropriate CDB group for the requested LBA 13090 * and transfer length, calls scsi_init_pkt, and builds 13091 * the CDB. Do not use for partial DMA transfers except 13092 * for the initial transfer since the CDB size must 13093 * remain constant. 13094 * 13095 * Context: Kernel thread and may be called from software interrupt 13096 * context as part of a sdrunout callback. This function may not 13097 * block or call routines that block 13098 */ 13099 13100 13101 int 13102 sd_setup_rw_pkt(struct sd_lun *un, 13103 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13104 int (*callback)(caddr_t), caddr_t callback_arg, 13105 diskaddr_t lba, uint32_t blockcount) 13106 { 13107 struct scsi_pkt *return_pktp; 13108 union scsi_cdb *cdbp; 13109 struct sd_cdbinfo *cp = NULL; 13110 int i; 13111 13112 /* 13113 * See which size CDB to use, based upon the request. 13114 */ 13115 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13116 13117 /* 13118 * Check lba and block count against sd_cdbtab limits. 13119 * In the partial DMA case, we have to use the same size 13120 * CDB for all the transfers. Check lba + blockcount 13121 * against the max LBA so we know that segment of the 13122 * transfer can use the CDB we select. 13123 */ 13124 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13125 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13126 13127 /* 13128 * The command will fit into the CDB type 13129 * specified by sd_cdbtab[i]. 13130 */ 13131 cp = sd_cdbtab + i; 13132 13133 /* 13134 * Call scsi_init_pkt so we can fill in the 13135 * CDB. 13136 */ 13137 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13138 bp, cp->sc_grpcode, un->un_status_len, 0, 13139 flags, callback, callback_arg); 13140 13141 if (return_pktp != NULL) { 13142 13143 /* 13144 * Return new value of pkt 13145 */ 13146 *pktpp = return_pktp; 13147 13148 /* 13149 * To be safe, zero the CDB insuring there is 13150 * no leftover data from a previous command. 13151 */ 13152 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13153 13154 /* 13155 * Handle partial DMA mapping 13156 */ 13157 if (return_pktp->pkt_resid != 0) { 13158 13159 /* 13160 * Not going to xfer as many blocks as 13161 * originally expected 13162 */ 13163 blockcount -= 13164 SD_BYTES2TGTBLOCKS(un, 13165 return_pktp->pkt_resid); 13166 } 13167 13168 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13169 13170 /* 13171 * Set command byte based on the CDB 13172 * type we matched. 13173 */ 13174 cdbp->scc_cmd = cp->sc_grpmask | 13175 ((bp->b_flags & B_READ) ? 13176 SCMD_READ : SCMD_WRITE); 13177 13178 SD_FILL_SCSI1_LUN(un, return_pktp); 13179 13180 /* 13181 * Fill in LBA and length 13182 */ 13183 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13184 (cp->sc_grpcode == CDB_GROUP4) || 13185 (cp->sc_grpcode == CDB_GROUP0) || 13186 (cp->sc_grpcode == CDB_GROUP5)); 13187 13188 if (cp->sc_grpcode == CDB_GROUP1) { 13189 FORMG1ADDR(cdbp, lba); 13190 FORMG1COUNT(cdbp, blockcount); 13191 return (0); 13192 } else if (cp->sc_grpcode == CDB_GROUP4) { 13193 FORMG4LONGADDR(cdbp, lba); 13194 FORMG4COUNT(cdbp, blockcount); 13195 return (0); 13196 } else if (cp->sc_grpcode == CDB_GROUP0) { 13197 FORMG0ADDR(cdbp, lba); 13198 FORMG0COUNT(cdbp, blockcount); 13199 return (0); 13200 } else if (cp->sc_grpcode == CDB_GROUP5) { 13201 FORMG5ADDR(cdbp, lba); 13202 FORMG5COUNT(cdbp, blockcount); 13203 return (0); 13204 } 13205 13206 /* 13207 * It should be impossible to not match one 13208 * of the CDB types above, so we should never 13209 * reach this point. Set the CDB command byte 13210 * to test-unit-ready to avoid writing 13211 * to somewhere we don't intend. 13212 */ 13213 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13214 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13215 } else { 13216 /* 13217 * Couldn't get scsi_pkt 13218 */ 13219 return (SD_PKT_ALLOC_FAILURE); 13220 } 13221 } 13222 } 13223 13224 /* 13225 * None of the available CDB types were suitable. This really 13226 * should never happen: on a 64 bit system we support 13227 * READ16/WRITE16 which will hold an entire 64 bit disk address 13228 * and on a 32 bit system we will refuse to bind to a device 13229 * larger than 2TB so addresses will never be larger than 32 bits. 13230 */ 13231 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13232 } 13233 13234 #if defined(__i386) || defined(__amd64) 13235 /* 13236 * Function: sd_setup_next_rw_pkt 13237 * 13238 * Description: Setup packet for partial DMA transfers, except for the 13239 * initial transfer. sd_setup_rw_pkt should be used for 13240 * the initial transfer. 13241 * 13242 * Context: Kernel thread and may be called from interrupt context. 13243 */ 13244 13245 int 13246 sd_setup_next_rw_pkt(struct sd_lun *un, 13247 struct scsi_pkt *pktp, struct buf *bp, 13248 diskaddr_t lba, uint32_t blockcount) 13249 { 13250 uchar_t com; 13251 union scsi_cdb *cdbp; 13252 uchar_t cdb_group_id; 13253 13254 ASSERT(pktp != NULL); 13255 ASSERT(pktp->pkt_cdbp != NULL); 13256 13257 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13258 com = cdbp->scc_cmd; 13259 cdb_group_id = CDB_GROUPID(com); 13260 13261 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13262 (cdb_group_id == CDB_GROUPID_1) || 13263 (cdb_group_id == CDB_GROUPID_4) || 13264 (cdb_group_id == CDB_GROUPID_5)); 13265 13266 /* 13267 * Move pkt to the next portion of the xfer. 13268 * func is NULL_FUNC so we do not have to release 13269 * the disk mutex here. 13270 */ 13271 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13272 NULL_FUNC, NULL) == pktp) { 13273 /* Success. Handle partial DMA */ 13274 if (pktp->pkt_resid != 0) { 13275 blockcount -= 13276 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13277 } 13278 13279 cdbp->scc_cmd = com; 13280 SD_FILL_SCSI1_LUN(un, pktp); 13281 if (cdb_group_id == CDB_GROUPID_1) { 13282 FORMG1ADDR(cdbp, lba); 13283 FORMG1COUNT(cdbp, blockcount); 13284 return (0); 13285 } else if (cdb_group_id == CDB_GROUPID_4) { 13286 FORMG4LONGADDR(cdbp, lba); 13287 FORMG4COUNT(cdbp, blockcount); 13288 return (0); 13289 } else if (cdb_group_id == CDB_GROUPID_0) { 13290 FORMG0ADDR(cdbp, lba); 13291 FORMG0COUNT(cdbp, blockcount); 13292 return (0); 13293 } else if (cdb_group_id == CDB_GROUPID_5) { 13294 FORMG5ADDR(cdbp, lba); 13295 FORMG5COUNT(cdbp, blockcount); 13296 return (0); 13297 } 13298 13299 /* Unreachable */ 13300 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13301 } 13302 13303 /* 13304 * Error setting up next portion of cmd transfer. 13305 * Something is definitely very wrong and this 13306 * should not happen. 13307 */ 13308 return (SD_PKT_ALLOC_FAILURE); 13309 } 13310 #endif /* defined(__i386) || defined(__amd64) */ 13311 13312 /* 13313 * Function: sd_initpkt_for_uscsi 13314 * 13315 * Description: Allocate and initialize for transport a scsi_pkt struct, 13316 * based upon the info specified in the given uscsi_cmd struct. 13317 * 13318 * Return Code: SD_PKT_ALLOC_SUCCESS 13319 * SD_PKT_ALLOC_FAILURE 13320 * SD_PKT_ALLOC_FAILURE_NO_DMA 13321 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13322 * 13323 * Context: Kernel thread and may be called from software interrupt context 13324 * as part of a sdrunout callback. This function may not block or 13325 * call routines that block 13326 */ 13327 13328 static int 13329 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13330 { 13331 struct uscsi_cmd *uscmd; 13332 struct sd_xbuf *xp; 13333 struct scsi_pkt *pktp; 13334 struct sd_lun *un; 13335 uint32_t flags = 0; 13336 13337 ASSERT(bp != NULL); 13338 ASSERT(pktpp != NULL); 13339 xp = SD_GET_XBUF(bp); 13340 ASSERT(xp != NULL); 13341 un = SD_GET_UN(bp); 13342 ASSERT(un != NULL); 13343 ASSERT(mutex_owned(SD_MUTEX(un))); 13344 13345 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13346 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13347 ASSERT(uscmd != NULL); 13348 13349 SD_TRACE(SD_LOG_IO_CORE, un, 13350 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13351 13352 /* Allocate the scsi_pkt for the command. */ 13353 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13354 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13355 sizeof (struct scsi_arq_status), 0, un->un_pkt_flags, 13356 sdrunout, (caddr_t)un); 13357 13358 if (pktp == NULL) { 13359 *pktpp = NULL; 13360 /* 13361 * Set the driver state to RWAIT to indicate the driver 13362 * is waiting on resource allocations. The driver will not 13363 * suspend, pm_suspend, or detatch while the state is RWAIT. 13364 */ 13365 New_state(un, SD_STATE_RWAIT); 13366 13367 SD_ERROR(SD_LOG_IO_CORE, un, 13368 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13369 13370 if ((bp->b_flags & B_ERROR) != 0) { 13371 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13372 } 13373 return (SD_PKT_ALLOC_FAILURE); 13374 } 13375 13376 /* 13377 * We do not do DMA breakup for USCSI commands, so return failure 13378 * here if all the needed DMA resources were not allocated. 13379 */ 13380 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13381 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13382 scsi_destroy_pkt(pktp); 13383 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13384 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13385 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13386 } 13387 13388 /* Init the cdb from the given uscsi struct */ 13389 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13390 uscmd->uscsi_cdb[0], 0, 0, 0); 13391 13392 SD_FILL_SCSI1_LUN(un, pktp); 13393 13394 /* 13395 * Set up the optional USCSI flags. See the uscsi (7I) man page 13396 * for listing of the supported flags. 13397 */ 13398 13399 if (uscmd->uscsi_flags & USCSI_SILENT) { 13400 flags |= FLAG_SILENT; 13401 } 13402 13403 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13404 flags |= FLAG_DIAGNOSE; 13405 } 13406 13407 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13408 flags |= FLAG_ISOLATE; 13409 } 13410 13411 if (un->un_f_is_fibre == FALSE) { 13412 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13413 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13414 } 13415 } 13416 13417 /* 13418 * Set the pkt flags here so we save time later. 13419 * Note: These flags are NOT in the uscsi man page!!! 13420 */ 13421 if (uscmd->uscsi_flags & USCSI_HEAD) { 13422 flags |= FLAG_HEAD; 13423 } 13424 13425 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13426 flags |= FLAG_NOINTR; 13427 } 13428 13429 /* 13430 * For tagged queueing, things get a bit complicated. 13431 * Check first for head of queue and last for ordered queue. 13432 * If neither head nor order, use the default driver tag flags. 13433 */ 13434 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13435 if (uscmd->uscsi_flags & USCSI_HTAG) { 13436 flags |= FLAG_HTAG; 13437 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13438 flags |= FLAG_OTAG; 13439 } else { 13440 flags |= un->un_tagflags & FLAG_TAGMASK; 13441 } 13442 } 13443 13444 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13445 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13446 } 13447 13448 pktp->pkt_flags = flags; 13449 13450 /* Copy the caller's CDB into the pkt... */ 13451 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13452 13453 if (uscmd->uscsi_timeout == 0) { 13454 pktp->pkt_time = un->un_uscsi_timeout; 13455 } else { 13456 pktp->pkt_time = uscmd->uscsi_timeout; 13457 } 13458 13459 /* need it later to identify USCSI request in sdintr */ 13460 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13461 13462 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13463 13464 pktp->pkt_private = bp; 13465 pktp->pkt_comp = sdintr; 13466 *pktpp = pktp; 13467 13468 SD_TRACE(SD_LOG_IO_CORE, un, 13469 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13470 13471 return (SD_PKT_ALLOC_SUCCESS); 13472 } 13473 13474 13475 /* 13476 * Function: sd_destroypkt_for_uscsi 13477 * 13478 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13479 * IOs.. Also saves relevant info into the associated uscsi_cmd 13480 * struct. 13481 * 13482 * Context: May be called under interrupt context 13483 */ 13484 13485 static void 13486 sd_destroypkt_for_uscsi(struct buf *bp) 13487 { 13488 struct uscsi_cmd *uscmd; 13489 struct sd_xbuf *xp; 13490 struct scsi_pkt *pktp; 13491 struct sd_lun *un; 13492 13493 ASSERT(bp != NULL); 13494 xp = SD_GET_XBUF(bp); 13495 ASSERT(xp != NULL); 13496 un = SD_GET_UN(bp); 13497 ASSERT(un != NULL); 13498 ASSERT(!mutex_owned(SD_MUTEX(un))); 13499 pktp = SD_GET_PKTP(bp); 13500 ASSERT(pktp != NULL); 13501 13502 SD_TRACE(SD_LOG_IO_CORE, un, 13503 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13504 13505 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13506 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13507 ASSERT(uscmd != NULL); 13508 13509 /* Save the status and the residual into the uscsi_cmd struct */ 13510 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13511 uscmd->uscsi_resid = bp->b_resid; 13512 13513 /* 13514 * If enabled, copy any saved sense data into the area specified 13515 * by the uscsi command. 13516 */ 13517 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13518 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13519 /* 13520 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13521 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13522 */ 13523 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13524 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13525 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 13526 } 13527 13528 /* We are done with the scsi_pkt; free it now */ 13529 ASSERT(SD_GET_PKTP(bp) != NULL); 13530 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13531 13532 SD_TRACE(SD_LOG_IO_CORE, un, 13533 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13534 } 13535 13536 13537 /* 13538 * Function: sd_bioclone_alloc 13539 * 13540 * Description: Allocate a buf(9S) and init it as per the given buf 13541 * and the various arguments. The associated sd_xbuf 13542 * struct is (nearly) duplicated. The struct buf *bp 13543 * argument is saved in new_xp->xb_private. 13544 * 13545 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13546 * datalen - size of data area for the shadow bp 13547 * blkno - starting LBA 13548 * func - function pointer for b_iodone in the shadow buf. (May 13549 * be NULL if none.) 13550 * 13551 * Return Code: Pointer to allocates buf(9S) struct 13552 * 13553 * Context: Can sleep. 13554 */ 13555 13556 static struct buf * 13557 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13558 daddr_t blkno, int (*func)(struct buf *)) 13559 { 13560 struct sd_lun *un; 13561 struct sd_xbuf *xp; 13562 struct sd_xbuf *new_xp; 13563 struct buf *new_bp; 13564 13565 ASSERT(bp != NULL); 13566 xp = SD_GET_XBUF(bp); 13567 ASSERT(xp != NULL); 13568 un = SD_GET_UN(bp); 13569 ASSERT(un != NULL); 13570 ASSERT(!mutex_owned(SD_MUTEX(un))); 13571 13572 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13573 NULL, KM_SLEEP); 13574 13575 new_bp->b_lblkno = blkno; 13576 13577 /* 13578 * Allocate an xbuf for the shadow bp and copy the contents of the 13579 * original xbuf into it. 13580 */ 13581 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13582 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13583 13584 /* 13585 * The given bp is automatically saved in the xb_private member 13586 * of the new xbuf. Callers are allowed to depend on this. 13587 */ 13588 new_xp->xb_private = bp; 13589 13590 new_bp->b_private = new_xp; 13591 13592 return (new_bp); 13593 } 13594 13595 /* 13596 * Function: sd_shadow_buf_alloc 13597 * 13598 * Description: Allocate a buf(9S) and init it as per the given buf 13599 * and the various arguments. The associated sd_xbuf 13600 * struct is (nearly) duplicated. The struct buf *bp 13601 * argument is saved in new_xp->xb_private. 13602 * 13603 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13604 * datalen - size of data area for the shadow bp 13605 * bflags - B_READ or B_WRITE (pseudo flag) 13606 * blkno - starting LBA 13607 * func - function pointer for b_iodone in the shadow buf. (May 13608 * be NULL if none.) 13609 * 13610 * Return Code: Pointer to allocates buf(9S) struct 13611 * 13612 * Context: Can sleep. 13613 */ 13614 13615 static struct buf * 13616 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13617 daddr_t blkno, int (*func)(struct buf *)) 13618 { 13619 struct sd_lun *un; 13620 struct sd_xbuf *xp; 13621 struct sd_xbuf *new_xp; 13622 struct buf *new_bp; 13623 13624 ASSERT(bp != NULL); 13625 xp = SD_GET_XBUF(bp); 13626 ASSERT(xp != NULL); 13627 un = SD_GET_UN(bp); 13628 ASSERT(un != NULL); 13629 ASSERT(!mutex_owned(SD_MUTEX(un))); 13630 13631 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13632 bp_mapin(bp); 13633 } 13634 13635 bflags &= (B_READ | B_WRITE); 13636 #if defined(__i386) || defined(__amd64) 13637 new_bp = getrbuf(KM_SLEEP); 13638 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13639 new_bp->b_bcount = datalen; 13640 new_bp->b_flags = bp->b_flags | bflags; 13641 #else 13642 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13643 datalen, bflags, SLEEP_FUNC, NULL); 13644 #endif 13645 new_bp->av_forw = NULL; 13646 new_bp->av_back = NULL; 13647 new_bp->b_dev = bp->b_dev; 13648 new_bp->b_blkno = blkno; 13649 new_bp->b_iodone = func; 13650 new_bp->b_edev = bp->b_edev; 13651 new_bp->b_resid = 0; 13652 13653 /* We need to preserve the B_FAILFAST flag */ 13654 if (bp->b_flags & B_FAILFAST) { 13655 new_bp->b_flags |= B_FAILFAST; 13656 } 13657 13658 /* 13659 * Allocate an xbuf for the shadow bp and copy the contents of the 13660 * original xbuf into it. 13661 */ 13662 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13663 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13664 13665 /* Need later to copy data between the shadow buf & original buf! */ 13666 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13667 13668 /* 13669 * The given bp is automatically saved in the xb_private member 13670 * of the new xbuf. Callers are allowed to depend on this. 13671 */ 13672 new_xp->xb_private = bp; 13673 13674 new_bp->b_private = new_xp; 13675 13676 return (new_bp); 13677 } 13678 13679 /* 13680 * Function: sd_bioclone_free 13681 * 13682 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13683 * in the larger than partition operation. 13684 * 13685 * Context: May be called under interrupt context 13686 */ 13687 13688 static void 13689 sd_bioclone_free(struct buf *bp) 13690 { 13691 struct sd_xbuf *xp; 13692 13693 ASSERT(bp != NULL); 13694 xp = SD_GET_XBUF(bp); 13695 ASSERT(xp != NULL); 13696 13697 /* 13698 * Call bp_mapout() before freeing the buf, in case a lower 13699 * layer or HBA had done a bp_mapin(). we must do this here 13700 * as we are the "originator" of the shadow buf. 13701 */ 13702 bp_mapout(bp); 13703 13704 /* 13705 * Null out b_iodone before freeing the bp, to ensure that the driver 13706 * never gets confused by a stale value in this field. (Just a little 13707 * extra defensiveness here.) 13708 */ 13709 bp->b_iodone = NULL; 13710 13711 freerbuf(bp); 13712 13713 kmem_free(xp, sizeof (struct sd_xbuf)); 13714 } 13715 13716 /* 13717 * Function: sd_shadow_buf_free 13718 * 13719 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13720 * 13721 * Context: May be called under interrupt context 13722 */ 13723 13724 static void 13725 sd_shadow_buf_free(struct buf *bp) 13726 { 13727 struct sd_xbuf *xp; 13728 13729 ASSERT(bp != NULL); 13730 xp = SD_GET_XBUF(bp); 13731 ASSERT(xp != NULL); 13732 13733 #if defined(__sparc) 13734 /* 13735 * Call bp_mapout() before freeing the buf, in case a lower 13736 * layer or HBA had done a bp_mapin(). we must do this here 13737 * as we are the "originator" of the shadow buf. 13738 */ 13739 bp_mapout(bp); 13740 #endif 13741 13742 /* 13743 * Null out b_iodone before freeing the bp, to ensure that the driver 13744 * never gets confused by a stale value in this field. (Just a little 13745 * extra defensiveness here.) 13746 */ 13747 bp->b_iodone = NULL; 13748 13749 #if defined(__i386) || defined(__amd64) 13750 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13751 freerbuf(bp); 13752 #else 13753 scsi_free_consistent_buf(bp); 13754 #endif 13755 13756 kmem_free(xp, sizeof (struct sd_xbuf)); 13757 } 13758 13759 13760 /* 13761 * Function: sd_print_transport_rejected_message 13762 * 13763 * Description: This implements the ludicrously complex rules for printing 13764 * a "transport rejected" message. This is to address the 13765 * specific problem of having a flood of this error message 13766 * produced when a failover occurs. 13767 * 13768 * Context: Any. 13769 */ 13770 13771 static void 13772 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13773 int code) 13774 { 13775 ASSERT(un != NULL); 13776 ASSERT(mutex_owned(SD_MUTEX(un))); 13777 ASSERT(xp != NULL); 13778 13779 /* 13780 * Print the "transport rejected" message under the following 13781 * conditions: 13782 * 13783 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13784 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13785 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13786 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13787 * scsi_transport(9F) (which indicates that the target might have 13788 * gone off-line). This uses the un->un_tran_fatal_count 13789 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13790 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13791 * from scsi_transport(). 13792 * 13793 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13794 * the preceeding cases in order for the message to be printed. 13795 */ 13796 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13797 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13798 (code != TRAN_FATAL_ERROR) || 13799 (un->un_tran_fatal_count == 1)) { 13800 switch (code) { 13801 case TRAN_BADPKT: 13802 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13803 "transport rejected bad packet\n"); 13804 break; 13805 case TRAN_FATAL_ERROR: 13806 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13807 "transport rejected fatal error\n"); 13808 break; 13809 default: 13810 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13811 "transport rejected (%d)\n", code); 13812 break; 13813 } 13814 } 13815 } 13816 } 13817 13818 13819 /* 13820 * Function: sd_add_buf_to_waitq 13821 * 13822 * Description: Add the given buf(9S) struct to the wait queue for the 13823 * instance. If sorting is enabled, then the buf is added 13824 * to the queue via an elevator sort algorithm (a la 13825 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13826 * If sorting is not enabled, then the buf is just added 13827 * to the end of the wait queue. 13828 * 13829 * Return Code: void 13830 * 13831 * Context: Does not sleep/block, therefore technically can be called 13832 * from any context. However if sorting is enabled then the 13833 * execution time is indeterminate, and may take long if 13834 * the wait queue grows large. 13835 */ 13836 13837 static void 13838 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13839 { 13840 struct buf *ap; 13841 13842 ASSERT(bp != NULL); 13843 ASSERT(un != NULL); 13844 ASSERT(mutex_owned(SD_MUTEX(un))); 13845 13846 /* If the queue is empty, add the buf as the only entry & return. */ 13847 if (un->un_waitq_headp == NULL) { 13848 ASSERT(un->un_waitq_tailp == NULL); 13849 un->un_waitq_headp = un->un_waitq_tailp = bp; 13850 bp->av_forw = NULL; 13851 return; 13852 } 13853 13854 ASSERT(un->un_waitq_tailp != NULL); 13855 13856 /* 13857 * If sorting is disabled, just add the buf to the tail end of 13858 * the wait queue and return. 13859 */ 13860 if (un->un_f_disksort_disabled) { 13861 un->un_waitq_tailp->av_forw = bp; 13862 un->un_waitq_tailp = bp; 13863 bp->av_forw = NULL; 13864 return; 13865 } 13866 13867 /* 13868 * Sort thru the list of requests currently on the wait queue 13869 * and add the new buf request at the appropriate position. 13870 * 13871 * The un->un_waitq_headp is an activity chain pointer on which 13872 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13873 * first queue holds those requests which are positioned after 13874 * the current SD_GET_BLKNO() (in the first request); the second holds 13875 * requests which came in after their SD_GET_BLKNO() number was passed. 13876 * Thus we implement a one way scan, retracting after reaching 13877 * the end of the drive to the first request on the second 13878 * queue, at which time it becomes the first queue. 13879 * A one-way scan is natural because of the way UNIX read-ahead 13880 * blocks are allocated. 13881 * 13882 * If we lie after the first request, then we must locate the 13883 * second request list and add ourselves to it. 13884 */ 13885 ap = un->un_waitq_headp; 13886 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13887 while (ap->av_forw != NULL) { 13888 /* 13889 * Look for an "inversion" in the (normally 13890 * ascending) block numbers. This indicates 13891 * the start of the second request list. 13892 */ 13893 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13894 /* 13895 * Search the second request list for the 13896 * first request at a larger block number. 13897 * We go before that; however if there is 13898 * no such request, we go at the end. 13899 */ 13900 do { 13901 if (SD_GET_BLKNO(bp) < 13902 SD_GET_BLKNO(ap->av_forw)) { 13903 goto insert; 13904 } 13905 ap = ap->av_forw; 13906 } while (ap->av_forw != NULL); 13907 goto insert; /* after last */ 13908 } 13909 ap = ap->av_forw; 13910 } 13911 13912 /* 13913 * No inversions... we will go after the last, and 13914 * be the first request in the second request list. 13915 */ 13916 goto insert; 13917 } 13918 13919 /* 13920 * Request is at/after the current request... 13921 * sort in the first request list. 13922 */ 13923 while (ap->av_forw != NULL) { 13924 /* 13925 * We want to go after the current request (1) if 13926 * there is an inversion after it (i.e. it is the end 13927 * of the first request list), or (2) if the next 13928 * request is a larger block no. than our request. 13929 */ 13930 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13931 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13932 goto insert; 13933 } 13934 ap = ap->av_forw; 13935 } 13936 13937 /* 13938 * Neither a second list nor a larger request, therefore 13939 * we go at the end of the first list (which is the same 13940 * as the end of the whole schebang). 13941 */ 13942 insert: 13943 bp->av_forw = ap->av_forw; 13944 ap->av_forw = bp; 13945 13946 /* 13947 * If we inserted onto the tail end of the waitq, make sure the 13948 * tail pointer is updated. 13949 */ 13950 if (ap == un->un_waitq_tailp) { 13951 un->un_waitq_tailp = bp; 13952 } 13953 } 13954 13955 13956 /* 13957 * Function: sd_start_cmds 13958 * 13959 * Description: Remove and transport cmds from the driver queues. 13960 * 13961 * Arguments: un - pointer to the unit (soft state) struct for the target. 13962 * 13963 * immed_bp - ptr to a buf to be transported immediately. Only 13964 * the immed_bp is transported; bufs on the waitq are not 13965 * processed and the un_retry_bp is not checked. If immed_bp is 13966 * NULL, then normal queue processing is performed. 13967 * 13968 * Context: May be called from kernel thread context, interrupt context, 13969 * or runout callback context. This function may not block or 13970 * call routines that block. 13971 */ 13972 13973 static void 13974 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13975 { 13976 struct sd_xbuf *xp; 13977 struct buf *bp; 13978 void (*statp)(kstat_io_t *); 13979 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13980 void (*saved_statp)(kstat_io_t *); 13981 #endif 13982 int rval; 13983 13984 ASSERT(un != NULL); 13985 ASSERT(mutex_owned(SD_MUTEX(un))); 13986 ASSERT(un->un_ncmds_in_transport >= 0); 13987 ASSERT(un->un_throttle >= 0); 13988 13989 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13990 13991 do { 13992 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13993 saved_statp = NULL; 13994 #endif 13995 13996 /* 13997 * If we are syncing or dumping, fail the command to 13998 * avoid recursively calling back into scsi_transport(). 13999 * See panic.c for more information about the states 14000 * the system can be in during panic. 14001 */ 14002 if ((un->un_state == SD_STATE_DUMPING) || 14003 (un->un_in_callback > 1)) { 14004 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14005 "sd_start_cmds: panicking\n"); 14006 goto exit; 14007 } 14008 14009 if ((bp = immed_bp) != NULL) { 14010 /* 14011 * We have a bp that must be transported immediately. 14012 * It's OK to transport the immed_bp here without doing 14013 * the throttle limit check because the immed_bp is 14014 * always used in a retry/recovery case. This means 14015 * that we know we are not at the throttle limit by 14016 * virtue of the fact that to get here we must have 14017 * already gotten a command back via sdintr(). This also 14018 * relies on (1) the command on un_retry_bp preventing 14019 * further commands from the waitq from being issued; 14020 * and (2) the code in sd_retry_command checking the 14021 * throttle limit before issuing a delayed or immediate 14022 * retry. This holds even if the throttle limit is 14023 * currently ratcheted down from its maximum value. 14024 */ 14025 statp = kstat_runq_enter; 14026 if (bp == un->un_retry_bp) { 14027 ASSERT((un->un_retry_statp == NULL) || 14028 (un->un_retry_statp == kstat_waitq_enter) || 14029 (un->un_retry_statp == 14030 kstat_runq_back_to_waitq)); 14031 /* 14032 * If the waitq kstat was incremented when 14033 * sd_set_retry_bp() queued this bp for a retry, 14034 * then we must set up statp so that the waitq 14035 * count will get decremented correctly below. 14036 * Also we must clear un->un_retry_statp to 14037 * ensure that we do not act on a stale value 14038 * in this field. 14039 */ 14040 if ((un->un_retry_statp == kstat_waitq_enter) || 14041 (un->un_retry_statp == 14042 kstat_runq_back_to_waitq)) { 14043 statp = kstat_waitq_to_runq; 14044 } 14045 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14046 saved_statp = un->un_retry_statp; 14047 #endif 14048 un->un_retry_statp = NULL; 14049 14050 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14051 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 14052 "un_throttle:%d un_ncmds_in_transport:%d\n", 14053 un, un->un_retry_bp, un->un_throttle, 14054 un->un_ncmds_in_transport); 14055 } else { 14056 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 14057 "processing priority bp:0x%p\n", bp); 14058 } 14059 14060 } else if ((bp = un->un_waitq_headp) != NULL) { 14061 /* 14062 * A command on the waitq is ready to go, but do not 14063 * send it if: 14064 * 14065 * (1) the throttle limit has been reached, or 14066 * (2) a retry is pending, or 14067 * (3) a START_STOP_UNIT callback pending, or 14068 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 14069 * command is pending. 14070 * 14071 * For all of these conditions, IO processing will 14072 * restart after the condition is cleared. 14073 */ 14074 if (un->un_ncmds_in_transport >= un->un_throttle) { 14075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14076 "sd_start_cmds: exiting, " 14077 "throttle limit reached!\n"); 14078 goto exit; 14079 } 14080 if (un->un_retry_bp != NULL) { 14081 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14082 "sd_start_cmds: exiting, retry pending!\n"); 14083 goto exit; 14084 } 14085 if (un->un_startstop_timeid != NULL) { 14086 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14087 "sd_start_cmds: exiting, " 14088 "START_STOP pending!\n"); 14089 goto exit; 14090 } 14091 if (un->un_direct_priority_timeid != NULL) { 14092 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14093 "sd_start_cmds: exiting, " 14094 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14095 goto exit; 14096 } 14097 14098 /* Dequeue the command */ 14099 un->un_waitq_headp = bp->av_forw; 14100 if (un->un_waitq_headp == NULL) { 14101 un->un_waitq_tailp = NULL; 14102 } 14103 bp->av_forw = NULL; 14104 statp = kstat_waitq_to_runq; 14105 SD_TRACE(SD_LOG_IO_CORE, un, 14106 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14107 14108 } else { 14109 /* No work to do so bail out now */ 14110 SD_TRACE(SD_LOG_IO_CORE, un, 14111 "sd_start_cmds: no more work, exiting!\n"); 14112 goto exit; 14113 } 14114 14115 /* 14116 * Reset the state to normal. This is the mechanism by which 14117 * the state transitions from either SD_STATE_RWAIT or 14118 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14119 * If state is SD_STATE_PM_CHANGING then this command is 14120 * part of the device power control and the state must 14121 * not be put back to normal. Doing so would would 14122 * allow new commands to proceed when they shouldn't, 14123 * the device may be going off. 14124 */ 14125 if ((un->un_state != SD_STATE_SUSPENDED) && 14126 (un->un_state != SD_STATE_PM_CHANGING)) { 14127 New_state(un, SD_STATE_NORMAL); 14128 } 14129 14130 xp = SD_GET_XBUF(bp); 14131 ASSERT(xp != NULL); 14132 14133 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14134 /* 14135 * Allocate the scsi_pkt if we need one, or attach DMA 14136 * resources if we have a scsi_pkt that needs them. The 14137 * latter should only occur for commands that are being 14138 * retried. 14139 */ 14140 if ((xp->xb_pktp == NULL) || 14141 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14142 #else 14143 if (xp->xb_pktp == NULL) { 14144 #endif 14145 /* 14146 * There is no scsi_pkt allocated for this buf. Call 14147 * the initpkt function to allocate & init one. 14148 * 14149 * The scsi_init_pkt runout callback functionality is 14150 * implemented as follows: 14151 * 14152 * 1) The initpkt function always calls 14153 * scsi_init_pkt(9F) with sdrunout specified as the 14154 * callback routine. 14155 * 2) A successful packet allocation is initialized and 14156 * the I/O is transported. 14157 * 3) The I/O associated with an allocation resource 14158 * failure is left on its queue to be retried via 14159 * runout or the next I/O. 14160 * 4) The I/O associated with a DMA error is removed 14161 * from the queue and failed with EIO. Processing of 14162 * the transport queues is also halted to be 14163 * restarted via runout or the next I/O. 14164 * 5) The I/O associated with a CDB size or packet 14165 * size error is removed from the queue and failed 14166 * with EIO. Processing of the transport queues is 14167 * continued. 14168 * 14169 * Note: there is no interface for canceling a runout 14170 * callback. To prevent the driver from detaching or 14171 * suspending while a runout is pending the driver 14172 * state is set to SD_STATE_RWAIT 14173 * 14174 * Note: using the scsi_init_pkt callback facility can 14175 * result in an I/O request persisting at the head of 14176 * the list which cannot be satisfied even after 14177 * multiple retries. In the future the driver may 14178 * implement some kind of maximum runout count before 14179 * failing an I/O. 14180 * 14181 * Note: the use of funcp below may seem superfluous, 14182 * but it helps warlock figure out the correct 14183 * initpkt function calls (see [s]sd.wlcmd). 14184 */ 14185 struct scsi_pkt *pktp; 14186 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14187 14188 ASSERT(bp != un->un_rqs_bp); 14189 14190 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14191 switch ((*funcp)(bp, &pktp)) { 14192 case SD_PKT_ALLOC_SUCCESS: 14193 xp->xb_pktp = pktp; 14194 SD_TRACE(SD_LOG_IO_CORE, un, 14195 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14196 pktp); 14197 goto got_pkt; 14198 14199 case SD_PKT_ALLOC_FAILURE: 14200 /* 14201 * Temporary (hopefully) resource depletion. 14202 * Since retries and RQS commands always have a 14203 * scsi_pkt allocated, these cases should never 14204 * get here. So the only cases this needs to 14205 * handle is a bp from the waitq (which we put 14206 * back onto the waitq for sdrunout), or a bp 14207 * sent as an immed_bp (which we just fail). 14208 */ 14209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14210 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14211 14212 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14213 14214 if (bp == immed_bp) { 14215 /* 14216 * If SD_XB_DMA_FREED is clear, then 14217 * this is a failure to allocate a 14218 * scsi_pkt, and we must fail the 14219 * command. 14220 */ 14221 if ((xp->xb_pkt_flags & 14222 SD_XB_DMA_FREED) == 0) { 14223 break; 14224 } 14225 14226 /* 14227 * If this immediate command is NOT our 14228 * un_retry_bp, then we must fail it. 14229 */ 14230 if (bp != un->un_retry_bp) { 14231 break; 14232 } 14233 14234 /* 14235 * We get here if this cmd is our 14236 * un_retry_bp that was DMAFREED, but 14237 * scsi_init_pkt() failed to reallocate 14238 * DMA resources when we attempted to 14239 * retry it. This can happen when an 14240 * mpxio failover is in progress, but 14241 * we don't want to just fail the 14242 * command in this case. 14243 * 14244 * Use timeout(9F) to restart it after 14245 * a 100ms delay. We don't want to 14246 * let sdrunout() restart it, because 14247 * sdrunout() is just supposed to start 14248 * commands that are sitting on the 14249 * wait queue. The un_retry_bp stays 14250 * set until the command completes, but 14251 * sdrunout can be called many times 14252 * before that happens. Since sdrunout 14253 * cannot tell if the un_retry_bp is 14254 * already in the transport, it could 14255 * end up calling scsi_transport() for 14256 * the un_retry_bp multiple times. 14257 * 14258 * Also: don't schedule the callback 14259 * if some other callback is already 14260 * pending. 14261 */ 14262 if (un->un_retry_statp == NULL) { 14263 /* 14264 * restore the kstat pointer to 14265 * keep kstat counts coherent 14266 * when we do retry the command. 14267 */ 14268 un->un_retry_statp = 14269 saved_statp; 14270 } 14271 14272 if ((un->un_startstop_timeid == NULL) && 14273 (un->un_retry_timeid == NULL) && 14274 (un->un_direct_priority_timeid == 14275 NULL)) { 14276 14277 un->un_retry_timeid = 14278 timeout( 14279 sd_start_retry_command, 14280 un, SD_RESTART_TIMEOUT); 14281 } 14282 goto exit; 14283 } 14284 14285 #else 14286 if (bp == immed_bp) { 14287 break; /* Just fail the command */ 14288 } 14289 #endif 14290 14291 /* Add the buf back to the head of the waitq */ 14292 bp->av_forw = un->un_waitq_headp; 14293 un->un_waitq_headp = bp; 14294 if (un->un_waitq_tailp == NULL) { 14295 un->un_waitq_tailp = bp; 14296 } 14297 goto exit; 14298 14299 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14300 /* 14301 * HBA DMA resource failure. Fail the command 14302 * and continue processing of the queues. 14303 */ 14304 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14305 "sd_start_cmds: " 14306 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14307 break; 14308 14309 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14310 /* 14311 * Note:x86: Partial DMA mapping not supported 14312 * for USCSI commands, and all the needed DMA 14313 * resources were not allocated. 14314 */ 14315 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14316 "sd_start_cmds: " 14317 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14318 break; 14319 14320 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14321 /* 14322 * Note:x86: Request cannot fit into CDB based 14323 * on lba and len. 14324 */ 14325 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14326 "sd_start_cmds: " 14327 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14328 break; 14329 14330 default: 14331 /* Should NEVER get here! */ 14332 panic("scsi_initpkt error"); 14333 /*NOTREACHED*/ 14334 } 14335 14336 /* 14337 * Fatal error in allocating a scsi_pkt for this buf. 14338 * Update kstats & return the buf with an error code. 14339 * We must use sd_return_failed_command_no_restart() to 14340 * avoid a recursive call back into sd_start_cmds(). 14341 * However this also means that we must keep processing 14342 * the waitq here in order to avoid stalling. 14343 */ 14344 if (statp == kstat_waitq_to_runq) { 14345 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14346 } 14347 sd_return_failed_command_no_restart(un, bp, EIO); 14348 if (bp == immed_bp) { 14349 /* immed_bp is gone by now, so clear this */ 14350 immed_bp = NULL; 14351 } 14352 continue; 14353 } 14354 got_pkt: 14355 if (bp == immed_bp) { 14356 /* goto the head of the class.... */ 14357 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14358 } 14359 14360 un->un_ncmds_in_transport++; 14361 SD_UPDATE_KSTATS(un, statp, bp); 14362 14363 /* 14364 * Call scsi_transport() to send the command to the target. 14365 * According to SCSA architecture, we must drop the mutex here 14366 * before calling scsi_transport() in order to avoid deadlock. 14367 * Note that the scsi_pkt's completion routine can be executed 14368 * (from interrupt context) even before the call to 14369 * scsi_transport() returns. 14370 */ 14371 SD_TRACE(SD_LOG_IO_CORE, un, 14372 "sd_start_cmds: calling scsi_transport()\n"); 14373 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14374 14375 mutex_exit(SD_MUTEX(un)); 14376 rval = scsi_transport(xp->xb_pktp); 14377 mutex_enter(SD_MUTEX(un)); 14378 14379 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14380 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14381 14382 switch (rval) { 14383 case TRAN_ACCEPT: 14384 /* Clear this with every pkt accepted by the HBA */ 14385 un->un_tran_fatal_count = 0; 14386 break; /* Success; try the next cmd (if any) */ 14387 14388 case TRAN_BUSY: 14389 un->un_ncmds_in_transport--; 14390 ASSERT(un->un_ncmds_in_transport >= 0); 14391 14392 /* 14393 * Don't retry request sense, the sense data 14394 * is lost when another request is sent. 14395 * Free up the rqs buf and retry 14396 * the original failed cmd. Update kstat. 14397 */ 14398 if (bp == un->un_rqs_bp) { 14399 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14400 bp = sd_mark_rqs_idle(un, xp); 14401 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14402 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 14403 kstat_waitq_enter); 14404 goto exit; 14405 } 14406 14407 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14408 /* 14409 * Free the DMA resources for the scsi_pkt. This will 14410 * allow mpxio to select another path the next time 14411 * we call scsi_transport() with this scsi_pkt. 14412 * See sdintr() for the rationalization behind this. 14413 */ 14414 if ((un->un_f_is_fibre == TRUE) && 14415 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14416 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14417 scsi_dmafree(xp->xb_pktp); 14418 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14419 } 14420 #endif 14421 14422 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14423 /* 14424 * Commands that are SD_PATH_DIRECT_PRIORITY 14425 * are for error recovery situations. These do 14426 * not use the normal command waitq, so if they 14427 * get a TRAN_BUSY we cannot put them back onto 14428 * the waitq for later retry. One possible 14429 * problem is that there could already be some 14430 * other command on un_retry_bp that is waiting 14431 * for this one to complete, so we would be 14432 * deadlocked if we put this command back onto 14433 * the waitq for later retry (since un_retry_bp 14434 * must complete before the driver gets back to 14435 * commands on the waitq). 14436 * 14437 * To avoid deadlock we must schedule a callback 14438 * that will restart this command after a set 14439 * interval. This should keep retrying for as 14440 * long as the underlying transport keeps 14441 * returning TRAN_BUSY (just like for other 14442 * commands). Use the same timeout interval as 14443 * for the ordinary TRAN_BUSY retry. 14444 */ 14445 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14446 "sd_start_cmds: scsi_transport() returned " 14447 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14448 14449 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14450 un->un_direct_priority_timeid = 14451 timeout(sd_start_direct_priority_command, 14452 bp, SD_BSY_TIMEOUT / 500); 14453 14454 goto exit; 14455 } 14456 14457 /* 14458 * For TRAN_BUSY, we want to reduce the throttle value, 14459 * unless we are retrying a command. 14460 */ 14461 if (bp != un->un_retry_bp) { 14462 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14463 } 14464 14465 /* 14466 * Set up the bp to be tried again 10 ms later. 14467 * Note:x86: Is there a timeout value in the sd_lun 14468 * for this condition? 14469 */ 14470 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 14471 kstat_runq_back_to_waitq); 14472 goto exit; 14473 14474 case TRAN_FATAL_ERROR: 14475 un->un_tran_fatal_count++; 14476 /* FALLTHRU */ 14477 14478 case TRAN_BADPKT: 14479 default: 14480 un->un_ncmds_in_transport--; 14481 ASSERT(un->un_ncmds_in_transport >= 0); 14482 14483 /* 14484 * If this is our REQUEST SENSE command with a 14485 * transport error, we must get back the pointers 14486 * to the original buf, and mark the REQUEST 14487 * SENSE command as "available". 14488 */ 14489 if (bp == un->un_rqs_bp) { 14490 bp = sd_mark_rqs_idle(un, xp); 14491 xp = SD_GET_XBUF(bp); 14492 } else { 14493 /* 14494 * Legacy behavior: do not update transport 14495 * error count for request sense commands. 14496 */ 14497 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14498 } 14499 14500 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14501 sd_print_transport_rejected_message(un, xp, rval); 14502 14503 /* 14504 * We must use sd_return_failed_command_no_restart() to 14505 * avoid a recursive call back into sd_start_cmds(). 14506 * However this also means that we must keep processing 14507 * the waitq here in order to avoid stalling. 14508 */ 14509 sd_return_failed_command_no_restart(un, bp, EIO); 14510 14511 /* 14512 * Notify any threads waiting in sd_ddi_suspend() that 14513 * a command completion has occurred. 14514 */ 14515 if (un->un_state == SD_STATE_SUSPENDED) { 14516 cv_broadcast(&un->un_disk_busy_cv); 14517 } 14518 14519 if (bp == immed_bp) { 14520 /* immed_bp is gone by now, so clear this */ 14521 immed_bp = NULL; 14522 } 14523 break; 14524 } 14525 14526 } while (immed_bp == NULL); 14527 14528 exit: 14529 ASSERT(mutex_owned(SD_MUTEX(un))); 14530 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14531 } 14532 14533 14534 /* 14535 * Function: sd_return_command 14536 * 14537 * Description: Returns a command to its originator (with or without an 14538 * error). Also starts commands waiting to be transported 14539 * to the target. 14540 * 14541 * Context: May be called from interrupt, kernel, or timeout context 14542 */ 14543 14544 static void 14545 sd_return_command(struct sd_lun *un, struct buf *bp) 14546 { 14547 struct sd_xbuf *xp; 14548 #if defined(__i386) || defined(__amd64) 14549 struct scsi_pkt *pktp; 14550 #endif 14551 14552 ASSERT(bp != NULL); 14553 ASSERT(un != NULL); 14554 ASSERT(mutex_owned(SD_MUTEX(un))); 14555 ASSERT(bp != un->un_rqs_bp); 14556 xp = SD_GET_XBUF(bp); 14557 ASSERT(xp != NULL); 14558 14559 #if defined(__i386) || defined(__amd64) 14560 pktp = SD_GET_PKTP(bp); 14561 #endif 14562 14563 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14564 14565 #if defined(__i386) || defined(__amd64) 14566 /* 14567 * Note:x86: check for the "sdrestart failed" case. 14568 */ 14569 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14570 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14571 (xp->xb_pktp->pkt_resid == 0)) { 14572 14573 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14574 /* 14575 * Successfully set up next portion of cmd 14576 * transfer, try sending it 14577 */ 14578 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14579 NULL, NULL, 0, (clock_t)0, NULL); 14580 sd_start_cmds(un, NULL); 14581 return; /* Note:x86: need a return here? */ 14582 } 14583 } 14584 #endif 14585 14586 /* 14587 * If this is the failfast bp, clear it from un_failfast_bp. This 14588 * can happen if upon being re-tried the failfast bp either 14589 * succeeded or encountered another error (possibly even a different 14590 * error than the one that precipitated the failfast state, but in 14591 * that case it would have had to exhaust retries as well). Regardless, 14592 * this should not occur whenever the instance is in the active 14593 * failfast state. 14594 */ 14595 if (bp == un->un_failfast_bp) { 14596 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14597 un->un_failfast_bp = NULL; 14598 } 14599 14600 /* 14601 * Clear the failfast state upon successful completion of ANY cmd. 14602 */ 14603 if (bp->b_error == 0) { 14604 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14605 } 14606 14607 /* 14608 * This is used if the command was retried one or more times. Show that 14609 * we are done with it, and allow processing of the waitq to resume. 14610 */ 14611 if (bp == un->un_retry_bp) { 14612 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14613 "sd_return_command: un:0x%p: " 14614 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14615 un->un_retry_bp = NULL; 14616 un->un_retry_statp = NULL; 14617 } 14618 14619 SD_UPDATE_RDWR_STATS(un, bp); 14620 SD_UPDATE_PARTITION_STATS(un, bp); 14621 14622 switch (un->un_state) { 14623 case SD_STATE_SUSPENDED: 14624 /* 14625 * Notify any threads waiting in sd_ddi_suspend() that 14626 * a command completion has occurred. 14627 */ 14628 cv_broadcast(&un->un_disk_busy_cv); 14629 break; 14630 default: 14631 sd_start_cmds(un, NULL); 14632 break; 14633 } 14634 14635 /* Return this command up the iodone chain to its originator. */ 14636 mutex_exit(SD_MUTEX(un)); 14637 14638 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14639 xp->xb_pktp = NULL; 14640 14641 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14642 14643 ASSERT(!mutex_owned(SD_MUTEX(un))); 14644 mutex_enter(SD_MUTEX(un)); 14645 14646 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14647 } 14648 14649 14650 /* 14651 * Function: sd_return_failed_command 14652 * 14653 * Description: Command completion when an error occurred. 14654 * 14655 * Context: May be called from interrupt context 14656 */ 14657 14658 static void 14659 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14660 { 14661 ASSERT(bp != NULL); 14662 ASSERT(un != NULL); 14663 ASSERT(mutex_owned(SD_MUTEX(un))); 14664 14665 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14666 "sd_return_failed_command: entry\n"); 14667 14668 /* 14669 * b_resid could already be nonzero due to a partial data 14670 * transfer, so do not change it here. 14671 */ 14672 SD_BIOERROR(bp, errcode); 14673 14674 sd_return_command(un, bp); 14675 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14676 "sd_return_failed_command: exit\n"); 14677 } 14678 14679 14680 /* 14681 * Function: sd_return_failed_command_no_restart 14682 * 14683 * Description: Same as sd_return_failed_command, but ensures that no 14684 * call back into sd_start_cmds will be issued. 14685 * 14686 * Context: May be called from interrupt context 14687 */ 14688 14689 static void 14690 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14691 int errcode) 14692 { 14693 struct sd_xbuf *xp; 14694 14695 ASSERT(bp != NULL); 14696 ASSERT(un != NULL); 14697 ASSERT(mutex_owned(SD_MUTEX(un))); 14698 xp = SD_GET_XBUF(bp); 14699 ASSERT(xp != NULL); 14700 ASSERT(errcode != 0); 14701 14702 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14703 "sd_return_failed_command_no_restart: entry\n"); 14704 14705 /* 14706 * b_resid could already be nonzero due to a partial data 14707 * transfer, so do not change it here. 14708 */ 14709 SD_BIOERROR(bp, errcode); 14710 14711 /* 14712 * If this is the failfast bp, clear it. This can happen if the 14713 * failfast bp encounterd a fatal error when we attempted to 14714 * re-try it (such as a scsi_transport(9F) failure). However 14715 * we should NOT be in an active failfast state if the failfast 14716 * bp is not NULL. 14717 */ 14718 if (bp == un->un_failfast_bp) { 14719 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14720 un->un_failfast_bp = NULL; 14721 } 14722 14723 if (bp == un->un_retry_bp) { 14724 /* 14725 * This command was retried one or more times. Show that we are 14726 * done with it, and allow processing of the waitq to resume. 14727 */ 14728 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14729 "sd_return_failed_command_no_restart: " 14730 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14731 un->un_retry_bp = NULL; 14732 un->un_retry_statp = NULL; 14733 } 14734 14735 SD_UPDATE_RDWR_STATS(un, bp); 14736 SD_UPDATE_PARTITION_STATS(un, bp); 14737 14738 mutex_exit(SD_MUTEX(un)); 14739 14740 if (xp->xb_pktp != NULL) { 14741 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14742 xp->xb_pktp = NULL; 14743 } 14744 14745 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14746 14747 mutex_enter(SD_MUTEX(un)); 14748 14749 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14750 "sd_return_failed_command_no_restart: exit\n"); 14751 } 14752 14753 14754 /* 14755 * Function: sd_retry_command 14756 * 14757 * Description: queue up a command for retry, or (optionally) fail it 14758 * if retry counts are exhausted. 14759 * 14760 * Arguments: un - Pointer to the sd_lun struct for the target. 14761 * 14762 * bp - Pointer to the buf for the command to be retried. 14763 * 14764 * retry_check_flag - Flag to see which (if any) of the retry 14765 * counts should be decremented/checked. If the indicated 14766 * retry count is exhausted, then the command will not be 14767 * retried; it will be failed instead. This should use a 14768 * value equal to one of the following: 14769 * 14770 * SD_RETRIES_NOCHECK 14771 * SD_RESD_RETRIES_STANDARD 14772 * SD_RETRIES_VICTIM 14773 * 14774 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14775 * if the check should be made to see of FLAG_ISOLATE is set 14776 * in the pkt. If FLAG_ISOLATE is set, then the command is 14777 * not retried, it is simply failed. 14778 * 14779 * user_funcp - Ptr to function to call before dispatching the 14780 * command. May be NULL if no action needs to be performed. 14781 * (Primarily intended for printing messages.) 14782 * 14783 * user_arg - Optional argument to be passed along to 14784 * the user_funcp call. 14785 * 14786 * failure_code - errno return code to set in the bp if the 14787 * command is going to be failed. 14788 * 14789 * retry_delay - Retry delay interval in (clock_t) units. May 14790 * be zero which indicates that the retry should be retried 14791 * immediately (ie, without an intervening delay). 14792 * 14793 * statp - Ptr to kstat function to be updated if the command 14794 * is queued for a delayed retry. May be NULL if no kstat 14795 * update is desired. 14796 * 14797 * Context: May be called from interupt context. 14798 */ 14799 14800 static void 14801 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14802 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14803 code), void *user_arg, int failure_code, clock_t retry_delay, 14804 void (*statp)(kstat_io_t *)) 14805 { 14806 struct sd_xbuf *xp; 14807 struct scsi_pkt *pktp; 14808 14809 ASSERT(un != NULL); 14810 ASSERT(mutex_owned(SD_MUTEX(un))); 14811 ASSERT(bp != NULL); 14812 xp = SD_GET_XBUF(bp); 14813 ASSERT(xp != NULL); 14814 pktp = SD_GET_PKTP(bp); 14815 ASSERT(pktp != NULL); 14816 14817 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14818 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14819 14820 /* 14821 * If we are syncing or dumping, fail the command to avoid 14822 * recursively calling back into scsi_transport(). 14823 */ 14824 if (ddi_in_panic()) { 14825 goto fail_command_no_log; 14826 } 14827 14828 /* 14829 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14830 * log an error and fail the command. 14831 */ 14832 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14833 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14834 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14835 sd_dump_memory(un, SD_LOG_IO, "CDB", 14836 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14837 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14838 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14839 goto fail_command; 14840 } 14841 14842 /* 14843 * If we are suspended, then put the command onto head of the 14844 * wait queue since we don't want to start more commands. 14845 */ 14846 switch (un->un_state) { 14847 case SD_STATE_SUSPENDED: 14848 case SD_STATE_DUMPING: 14849 bp->av_forw = un->un_waitq_headp; 14850 un->un_waitq_headp = bp; 14851 if (un->un_waitq_tailp == NULL) { 14852 un->un_waitq_tailp = bp; 14853 } 14854 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14855 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14856 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14857 return; 14858 default: 14859 break; 14860 } 14861 14862 /* 14863 * If the caller wants us to check FLAG_ISOLATE, then see if that 14864 * is set; if it is then we do not want to retry the command. 14865 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14866 */ 14867 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14868 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14869 goto fail_command; 14870 } 14871 } 14872 14873 14874 /* 14875 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14876 * command timeout or a selection timeout has occurred. This means 14877 * that we were unable to establish an kind of communication with 14878 * the target, and subsequent retries and/or commands are likely 14879 * to encounter similar results and take a long time to complete. 14880 * 14881 * If this is a failfast error condition, we need to update the 14882 * failfast state, even if this bp does not have B_FAILFAST set. 14883 */ 14884 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14885 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14886 ASSERT(un->un_failfast_bp == NULL); 14887 /* 14888 * If we are already in the active failfast state, and 14889 * another failfast error condition has been detected, 14890 * then fail this command if it has B_FAILFAST set. 14891 * If B_FAILFAST is clear, then maintain the legacy 14892 * behavior of retrying heroically, even tho this will 14893 * take a lot more time to fail the command. 14894 */ 14895 if (bp->b_flags & B_FAILFAST) { 14896 goto fail_command; 14897 } 14898 } else { 14899 /* 14900 * We're not in the active failfast state, but we 14901 * have a failfast error condition, so we must begin 14902 * transition to the next state. We do this regardless 14903 * of whether or not this bp has B_FAILFAST set. 14904 */ 14905 if (un->un_failfast_bp == NULL) { 14906 /* 14907 * This is the first bp to meet a failfast 14908 * condition so save it on un_failfast_bp & 14909 * do normal retry processing. Do not enter 14910 * active failfast state yet. This marks 14911 * entry into the "failfast pending" state. 14912 */ 14913 un->un_failfast_bp = bp; 14914 14915 } else if (un->un_failfast_bp == bp) { 14916 /* 14917 * This is the second time *this* bp has 14918 * encountered a failfast error condition, 14919 * so enter active failfast state & flush 14920 * queues as appropriate. 14921 */ 14922 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14923 un->un_failfast_bp = NULL; 14924 sd_failfast_flushq(un); 14925 14926 /* 14927 * Fail this bp now if B_FAILFAST set; 14928 * otherwise continue with retries. (It would 14929 * be pretty ironic if this bp succeeded on a 14930 * subsequent retry after we just flushed all 14931 * the queues). 14932 */ 14933 if (bp->b_flags & B_FAILFAST) { 14934 goto fail_command; 14935 } 14936 14937 #if !defined(lint) && !defined(__lint) 14938 } else { 14939 /* 14940 * If neither of the preceeding conditionals 14941 * was true, it means that there is some 14942 * *other* bp that has met an inital failfast 14943 * condition and is currently either being 14944 * retried or is waiting to be retried. In 14945 * that case we should perform normal retry 14946 * processing on *this* bp, since there is a 14947 * chance that the current failfast condition 14948 * is transient and recoverable. If that does 14949 * not turn out to be the case, then retries 14950 * will be cleared when the wait queue is 14951 * flushed anyway. 14952 */ 14953 #endif 14954 } 14955 } 14956 } else { 14957 /* 14958 * SD_RETRIES_FAILFAST is clear, which indicates that we 14959 * likely were able to at least establish some level of 14960 * communication with the target and subsequent commands 14961 * and/or retries are likely to get through to the target, 14962 * In this case we want to be aggressive about clearing 14963 * the failfast state. Note that this does not affect 14964 * the "failfast pending" condition. 14965 */ 14966 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14967 } 14968 14969 14970 /* 14971 * Check the specified retry count to see if we can still do 14972 * any retries with this pkt before we should fail it. 14973 */ 14974 switch (retry_check_flag & SD_RETRIES_MASK) { 14975 case SD_RETRIES_VICTIM: 14976 /* 14977 * Check the victim retry count. If exhausted, then fall 14978 * thru & check against the standard retry count. 14979 */ 14980 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14981 /* Increment count & proceed with the retry */ 14982 xp->xb_victim_retry_count++; 14983 break; 14984 } 14985 /* Victim retries exhausted, fall back to std. retries... */ 14986 /* FALLTHRU */ 14987 14988 case SD_RETRIES_STANDARD: 14989 if (xp->xb_retry_count >= un->un_retry_count) { 14990 /* Retries exhausted, fail the command */ 14991 SD_TRACE(SD_LOG_IO_CORE, un, 14992 "sd_retry_command: retries exhausted!\n"); 14993 /* 14994 * update b_resid for failed SCMD_READ & SCMD_WRITE 14995 * commands with nonzero pkt_resid. 14996 */ 14997 if ((pktp->pkt_reason == CMD_CMPLT) && 14998 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14999 (pktp->pkt_resid != 0)) { 15000 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 15001 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 15002 SD_UPDATE_B_RESID(bp, pktp); 15003 } 15004 } 15005 goto fail_command; 15006 } 15007 xp->xb_retry_count++; 15008 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15009 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15010 break; 15011 15012 case SD_RETRIES_UA: 15013 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 15014 /* Retries exhausted, fail the command */ 15015 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 15016 "Unit Attention retries exhausted. " 15017 "Check the target.\n"); 15018 goto fail_command; 15019 } 15020 xp->xb_ua_retry_count++; 15021 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15022 "sd_retry_command: retry count:%d\n", 15023 xp->xb_ua_retry_count); 15024 break; 15025 15026 case SD_RETRIES_BUSY: 15027 if (xp->xb_retry_count >= un->un_busy_retry_count) { 15028 /* Retries exhausted, fail the command */ 15029 SD_TRACE(SD_LOG_IO_CORE, un, 15030 "sd_retry_command: retries exhausted!\n"); 15031 goto fail_command; 15032 } 15033 xp->xb_retry_count++; 15034 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15035 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 15036 break; 15037 15038 case SD_RETRIES_NOCHECK: 15039 default: 15040 /* No retry count to check. Just proceed with the retry */ 15041 break; 15042 } 15043 15044 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 15045 15046 /* 15047 * If we were given a zero timeout, we must attempt to retry the 15048 * command immediately (ie, without a delay). 15049 */ 15050 if (retry_delay == 0) { 15051 /* 15052 * Check some limiting conditions to see if we can actually 15053 * do the immediate retry. If we cannot, then we must 15054 * fall back to queueing up a delayed retry. 15055 */ 15056 if (un->un_ncmds_in_transport >= un->un_throttle) { 15057 /* 15058 * We are at the throttle limit for the target, 15059 * fall back to delayed retry. 15060 */ 15061 retry_delay = SD_BSY_TIMEOUT; 15062 statp = kstat_waitq_enter; 15063 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15064 "sd_retry_command: immed. retry hit " 15065 "throttle!\n"); 15066 } else { 15067 /* 15068 * We're clear to proceed with the immediate retry. 15069 * First call the user-provided function (if any) 15070 */ 15071 if (user_funcp != NULL) { 15072 (*user_funcp)(un, bp, user_arg, 15073 SD_IMMEDIATE_RETRY_ISSUED); 15074 } 15075 15076 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15077 "sd_retry_command: issuing immediate retry\n"); 15078 15079 /* 15080 * Call sd_start_cmds() to transport the command to 15081 * the target. 15082 */ 15083 sd_start_cmds(un, bp); 15084 15085 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15086 "sd_retry_command exit\n"); 15087 return; 15088 } 15089 } 15090 15091 /* 15092 * Set up to retry the command after a delay. 15093 * First call the user-provided function (if any) 15094 */ 15095 if (user_funcp != NULL) { 15096 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15097 } 15098 15099 sd_set_retry_bp(un, bp, retry_delay, statp); 15100 15101 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15102 return; 15103 15104 fail_command: 15105 15106 if (user_funcp != NULL) { 15107 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15108 } 15109 15110 fail_command_no_log: 15111 15112 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15113 "sd_retry_command: returning failed command\n"); 15114 15115 sd_return_failed_command(un, bp, failure_code); 15116 15117 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15118 } 15119 15120 15121 /* 15122 * Function: sd_set_retry_bp 15123 * 15124 * Description: Set up the given bp for retry. 15125 * 15126 * Arguments: un - ptr to associated softstate 15127 * bp - ptr to buf(9S) for the command 15128 * retry_delay - time interval before issuing retry (may be 0) 15129 * statp - optional pointer to kstat function 15130 * 15131 * Context: May be called under interrupt context 15132 */ 15133 15134 static void 15135 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15136 void (*statp)(kstat_io_t *)) 15137 { 15138 ASSERT(un != NULL); 15139 ASSERT(mutex_owned(SD_MUTEX(un))); 15140 ASSERT(bp != NULL); 15141 15142 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15143 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15144 15145 /* 15146 * Indicate that the command is being retried. This will not allow any 15147 * other commands on the wait queue to be transported to the target 15148 * until this command has been completed (success or failure). The 15149 * "retry command" is not transported to the target until the given 15150 * time delay expires, unless the user specified a 0 retry_delay. 15151 * 15152 * Note: the timeout(9F) callback routine is what actually calls 15153 * sd_start_cmds() to transport the command, with the exception of a 15154 * zero retry_delay. The only current implementor of a zero retry delay 15155 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15156 */ 15157 if (un->un_retry_bp == NULL) { 15158 ASSERT(un->un_retry_statp == NULL); 15159 un->un_retry_bp = bp; 15160 15161 /* 15162 * If the user has not specified a delay the command should 15163 * be queued and no timeout should be scheduled. 15164 */ 15165 if (retry_delay == 0) { 15166 /* 15167 * Save the kstat pointer that will be used in the 15168 * call to SD_UPDATE_KSTATS() below, so that 15169 * sd_start_cmds() can correctly decrement the waitq 15170 * count when it is time to transport this command. 15171 */ 15172 un->un_retry_statp = statp; 15173 goto done; 15174 } 15175 } 15176 15177 if (un->un_retry_bp == bp) { 15178 /* 15179 * Save the kstat pointer that will be used in the call to 15180 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15181 * correctly decrement the waitq count when it is time to 15182 * transport this command. 15183 */ 15184 un->un_retry_statp = statp; 15185 15186 /* 15187 * Schedule a timeout if: 15188 * 1) The user has specified a delay. 15189 * 2) There is not a START_STOP_UNIT callback pending. 15190 * 15191 * If no delay has been specified, then it is up to the caller 15192 * to ensure that IO processing continues without stalling. 15193 * Effectively, this means that the caller will issue the 15194 * required call to sd_start_cmds(). The START_STOP_UNIT 15195 * callback does this after the START STOP UNIT command has 15196 * completed. In either of these cases we should not schedule 15197 * a timeout callback here. Also don't schedule the timeout if 15198 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15199 */ 15200 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15201 (un->un_direct_priority_timeid == NULL)) { 15202 un->un_retry_timeid = 15203 timeout(sd_start_retry_command, un, retry_delay); 15204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15205 "sd_set_retry_bp: setting timeout: un: 0x%p" 15206 " bp:0x%p un_retry_timeid:0x%p\n", 15207 un, bp, un->un_retry_timeid); 15208 } 15209 } else { 15210 /* 15211 * We only get in here if there is already another command 15212 * waiting to be retried. In this case, we just put the 15213 * given command onto the wait queue, so it can be transported 15214 * after the current retry command has completed. 15215 * 15216 * Also we have to make sure that if the command at the head 15217 * of the wait queue is the un_failfast_bp, that we do not 15218 * put ahead of it any other commands that are to be retried. 15219 */ 15220 if ((un->un_failfast_bp != NULL) && 15221 (un->un_failfast_bp == un->un_waitq_headp)) { 15222 /* 15223 * Enqueue this command AFTER the first command on 15224 * the wait queue (which is also un_failfast_bp). 15225 */ 15226 bp->av_forw = un->un_waitq_headp->av_forw; 15227 un->un_waitq_headp->av_forw = bp; 15228 if (un->un_waitq_headp == un->un_waitq_tailp) { 15229 un->un_waitq_tailp = bp; 15230 } 15231 } else { 15232 /* Enqueue this command at the head of the waitq. */ 15233 bp->av_forw = un->un_waitq_headp; 15234 un->un_waitq_headp = bp; 15235 if (un->un_waitq_tailp == NULL) { 15236 un->un_waitq_tailp = bp; 15237 } 15238 } 15239 15240 if (statp == NULL) { 15241 statp = kstat_waitq_enter; 15242 } 15243 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15244 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15245 } 15246 15247 done: 15248 if (statp != NULL) { 15249 SD_UPDATE_KSTATS(un, statp, bp); 15250 } 15251 15252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15253 "sd_set_retry_bp: exit un:0x%p\n", un); 15254 } 15255 15256 15257 /* 15258 * Function: sd_start_retry_command 15259 * 15260 * Description: Start the command that has been waiting on the target's 15261 * retry queue. Called from timeout(9F) context after the 15262 * retry delay interval has expired. 15263 * 15264 * Arguments: arg - pointer to associated softstate for the device. 15265 * 15266 * Context: timeout(9F) thread context. May not sleep. 15267 */ 15268 15269 static void 15270 sd_start_retry_command(void *arg) 15271 { 15272 struct sd_lun *un = arg; 15273 15274 ASSERT(un != NULL); 15275 ASSERT(!mutex_owned(SD_MUTEX(un))); 15276 15277 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15278 "sd_start_retry_command: entry\n"); 15279 15280 mutex_enter(SD_MUTEX(un)); 15281 15282 un->un_retry_timeid = NULL; 15283 15284 if (un->un_retry_bp != NULL) { 15285 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15286 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15287 un, un->un_retry_bp); 15288 sd_start_cmds(un, un->un_retry_bp); 15289 } 15290 15291 mutex_exit(SD_MUTEX(un)); 15292 15293 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15294 "sd_start_retry_command: exit\n"); 15295 } 15296 15297 15298 /* 15299 * Function: sd_start_direct_priority_command 15300 * 15301 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15302 * received TRAN_BUSY when we called scsi_transport() to send it 15303 * to the underlying HBA. This function is called from timeout(9F) 15304 * context after the delay interval has expired. 15305 * 15306 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15307 * 15308 * Context: timeout(9F) thread context. May not sleep. 15309 */ 15310 15311 static void 15312 sd_start_direct_priority_command(void *arg) 15313 { 15314 struct buf *priority_bp = arg; 15315 struct sd_lun *un; 15316 15317 ASSERT(priority_bp != NULL); 15318 un = SD_GET_UN(priority_bp); 15319 ASSERT(un != NULL); 15320 ASSERT(!mutex_owned(SD_MUTEX(un))); 15321 15322 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15323 "sd_start_direct_priority_command: entry\n"); 15324 15325 mutex_enter(SD_MUTEX(un)); 15326 un->un_direct_priority_timeid = NULL; 15327 sd_start_cmds(un, priority_bp); 15328 mutex_exit(SD_MUTEX(un)); 15329 15330 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15331 "sd_start_direct_priority_command: exit\n"); 15332 } 15333 15334 15335 /* 15336 * Function: sd_send_request_sense_command 15337 * 15338 * Description: Sends a REQUEST SENSE command to the target 15339 * 15340 * Context: May be called from interrupt context. 15341 */ 15342 15343 static void 15344 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15345 struct scsi_pkt *pktp) 15346 { 15347 ASSERT(bp != NULL); 15348 ASSERT(un != NULL); 15349 ASSERT(mutex_owned(SD_MUTEX(un))); 15350 15351 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15352 "entry: buf:0x%p\n", bp); 15353 15354 /* 15355 * If we are syncing or dumping, then fail the command to avoid a 15356 * recursive callback into scsi_transport(). Also fail the command 15357 * if we are suspended (legacy behavior). 15358 */ 15359 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15360 (un->un_state == SD_STATE_DUMPING)) { 15361 sd_return_failed_command(un, bp, EIO); 15362 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15363 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15364 return; 15365 } 15366 15367 /* 15368 * Retry the failed command and don't issue the request sense if: 15369 * 1) the sense buf is busy 15370 * 2) we have 1 or more outstanding commands on the target 15371 * (the sense data will be cleared or invalidated any way) 15372 * 15373 * Note: There could be an issue with not checking a retry limit here, 15374 * the problem is determining which retry limit to check. 15375 */ 15376 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15377 /* Don't retry if the command is flagged as non-retryable */ 15378 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15379 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15380 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 15381 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15382 "sd_send_request_sense_command: " 15383 "at full throttle, retrying exit\n"); 15384 } else { 15385 sd_return_failed_command(un, bp, EIO); 15386 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15387 "sd_send_request_sense_command: " 15388 "at full throttle, non-retryable exit\n"); 15389 } 15390 return; 15391 } 15392 15393 sd_mark_rqs_busy(un, bp); 15394 sd_start_cmds(un, un->un_rqs_bp); 15395 15396 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15397 "sd_send_request_sense_command: exit\n"); 15398 } 15399 15400 15401 /* 15402 * Function: sd_mark_rqs_busy 15403 * 15404 * Description: Indicate that the request sense bp for this instance is 15405 * in use. 15406 * 15407 * Context: May be called under interrupt context 15408 */ 15409 15410 static void 15411 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15412 { 15413 struct sd_xbuf *sense_xp; 15414 15415 ASSERT(un != NULL); 15416 ASSERT(bp != NULL); 15417 ASSERT(mutex_owned(SD_MUTEX(un))); 15418 ASSERT(un->un_sense_isbusy == 0); 15419 15420 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15421 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15422 15423 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15424 ASSERT(sense_xp != NULL); 15425 15426 SD_INFO(SD_LOG_IO, un, 15427 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15428 15429 ASSERT(sense_xp->xb_pktp != NULL); 15430 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15431 == (FLAG_SENSING | FLAG_HEAD)); 15432 15433 un->un_sense_isbusy = 1; 15434 un->un_rqs_bp->b_resid = 0; 15435 sense_xp->xb_pktp->pkt_resid = 0; 15436 sense_xp->xb_pktp->pkt_reason = 0; 15437 15438 /* So we can get back the bp at interrupt time! */ 15439 sense_xp->xb_sense_bp = bp; 15440 15441 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15442 15443 /* 15444 * Mark this buf as awaiting sense data. (This is already set in 15445 * the pkt_flags for the RQS packet.) 15446 */ 15447 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15448 15449 sense_xp->xb_retry_count = 0; 15450 sense_xp->xb_victim_retry_count = 0; 15451 sense_xp->xb_ua_retry_count = 0; 15452 sense_xp->xb_dma_resid = 0; 15453 15454 /* Clean up the fields for auto-request sense */ 15455 sense_xp->xb_sense_status = 0; 15456 sense_xp->xb_sense_state = 0; 15457 sense_xp->xb_sense_resid = 0; 15458 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15459 15460 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15461 } 15462 15463 15464 /* 15465 * Function: sd_mark_rqs_idle 15466 * 15467 * Description: SD_MUTEX must be held continuously through this routine 15468 * to prevent reuse of the rqs struct before the caller can 15469 * complete it's processing. 15470 * 15471 * Return Code: Pointer to the RQS buf 15472 * 15473 * Context: May be called under interrupt context 15474 */ 15475 15476 static struct buf * 15477 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15478 { 15479 struct buf *bp; 15480 ASSERT(un != NULL); 15481 ASSERT(sense_xp != NULL); 15482 ASSERT(mutex_owned(SD_MUTEX(un))); 15483 ASSERT(un->un_sense_isbusy != 0); 15484 15485 un->un_sense_isbusy = 0; 15486 bp = sense_xp->xb_sense_bp; 15487 sense_xp->xb_sense_bp = NULL; 15488 15489 /* This pkt is no longer interested in getting sense data */ 15490 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15491 15492 return (bp); 15493 } 15494 15495 15496 15497 /* 15498 * Function: sd_alloc_rqs 15499 * 15500 * Description: Set up the unit to receive auto request sense data 15501 * 15502 * Return Code: DDI_SUCCESS or DDI_FAILURE 15503 * 15504 * Context: Called under attach(9E) context 15505 */ 15506 15507 static int 15508 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15509 { 15510 struct sd_xbuf *xp; 15511 15512 ASSERT(un != NULL); 15513 ASSERT(!mutex_owned(SD_MUTEX(un))); 15514 ASSERT(un->un_rqs_bp == NULL); 15515 ASSERT(un->un_rqs_pktp == NULL); 15516 15517 /* 15518 * First allocate the required buf and scsi_pkt structs, then set up 15519 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15520 */ 15521 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15522 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15523 if (un->un_rqs_bp == NULL) { 15524 return (DDI_FAILURE); 15525 } 15526 15527 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15528 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15529 15530 if (un->un_rqs_pktp == NULL) { 15531 sd_free_rqs(un); 15532 return (DDI_FAILURE); 15533 } 15534 15535 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15536 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15537 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 15538 15539 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15540 15541 /* Set up the other needed members in the ARQ scsi_pkt. */ 15542 un->un_rqs_pktp->pkt_comp = sdintr; 15543 un->un_rqs_pktp->pkt_time = sd_io_time; 15544 un->un_rqs_pktp->pkt_flags |= 15545 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15546 15547 /* 15548 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15549 * provide any intpkt, destroypkt routines as we take care of 15550 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15551 */ 15552 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15553 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15554 xp->xb_pktp = un->un_rqs_pktp; 15555 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15556 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15557 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15558 15559 /* 15560 * Save the pointer to the request sense private bp so it can 15561 * be retrieved in sdintr. 15562 */ 15563 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15564 ASSERT(un->un_rqs_bp->b_private == xp); 15565 15566 /* 15567 * See if the HBA supports auto-request sense for the specified 15568 * target/lun. If it does, then try to enable it (if not already 15569 * enabled). 15570 * 15571 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15572 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15573 * return success. However, in both of these cases ARQ is always 15574 * enabled and scsi_ifgetcap will always return true. The best approach 15575 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15576 * 15577 * The 3rd case is the HBA (adp) always return enabled on 15578 * scsi_ifgetgetcap even when it's not enable, the best approach 15579 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15580 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15581 */ 15582 15583 if (un->un_f_is_fibre == TRUE) { 15584 un->un_f_arq_enabled = TRUE; 15585 } else { 15586 #if defined(__i386) || defined(__amd64) 15587 /* 15588 * Circumvent the Adaptec bug, remove this code when 15589 * the bug is fixed 15590 */ 15591 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15592 #endif 15593 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15594 case 0: 15595 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15596 "sd_alloc_rqs: HBA supports ARQ\n"); 15597 /* 15598 * ARQ is supported by this HBA but currently is not 15599 * enabled. Attempt to enable it and if successful then 15600 * mark this instance as ARQ enabled. 15601 */ 15602 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15603 == 1) { 15604 /* Successfully enabled ARQ in the HBA */ 15605 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15606 "sd_alloc_rqs: ARQ enabled\n"); 15607 un->un_f_arq_enabled = TRUE; 15608 } else { 15609 /* Could not enable ARQ in the HBA */ 15610 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15611 "sd_alloc_rqs: failed ARQ enable\n"); 15612 un->un_f_arq_enabled = FALSE; 15613 } 15614 break; 15615 case 1: 15616 /* 15617 * ARQ is supported by this HBA and is already enabled. 15618 * Just mark ARQ as enabled for this instance. 15619 */ 15620 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15621 "sd_alloc_rqs: ARQ already enabled\n"); 15622 un->un_f_arq_enabled = TRUE; 15623 break; 15624 default: 15625 /* 15626 * ARQ is not supported by this HBA; disable it for this 15627 * instance. 15628 */ 15629 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15630 "sd_alloc_rqs: HBA does not support ARQ\n"); 15631 un->un_f_arq_enabled = FALSE; 15632 break; 15633 } 15634 } 15635 15636 return (DDI_SUCCESS); 15637 } 15638 15639 15640 /* 15641 * Function: sd_free_rqs 15642 * 15643 * Description: Cleanup for the pre-instance RQS command. 15644 * 15645 * Context: Kernel thread context 15646 */ 15647 15648 static void 15649 sd_free_rqs(struct sd_lun *un) 15650 { 15651 ASSERT(un != NULL); 15652 15653 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15654 15655 /* 15656 * If consistent memory is bound to a scsi_pkt, the pkt 15657 * has to be destroyed *before* freeing the consistent memory. 15658 * Don't change the sequence of this operations. 15659 * scsi_destroy_pkt() might access memory, which isn't allowed, 15660 * after it was freed in scsi_free_consistent_buf(). 15661 */ 15662 if (un->un_rqs_pktp != NULL) { 15663 scsi_destroy_pkt(un->un_rqs_pktp); 15664 un->un_rqs_pktp = NULL; 15665 } 15666 15667 if (un->un_rqs_bp != NULL) { 15668 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 15669 scsi_free_consistent_buf(un->un_rqs_bp); 15670 un->un_rqs_bp = NULL; 15671 } 15672 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15673 } 15674 15675 15676 15677 /* 15678 * Function: sd_reduce_throttle 15679 * 15680 * Description: Reduces the maximun # of outstanding commands on a 15681 * target to the current number of outstanding commands. 15682 * Queues a tiemout(9F) callback to restore the limit 15683 * after a specified interval has elapsed. 15684 * Typically used when we get a TRAN_BUSY return code 15685 * back from scsi_transport(). 15686 * 15687 * Arguments: un - ptr to the sd_lun softstate struct 15688 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15689 * 15690 * Context: May be called from interrupt context 15691 */ 15692 15693 static void 15694 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15695 { 15696 ASSERT(un != NULL); 15697 ASSERT(mutex_owned(SD_MUTEX(un))); 15698 ASSERT(un->un_ncmds_in_transport >= 0); 15699 15700 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15701 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15702 un, un->un_throttle, un->un_ncmds_in_transport); 15703 15704 if (un->un_throttle > 1) { 15705 if (un->un_f_use_adaptive_throttle == TRUE) { 15706 switch (throttle_type) { 15707 case SD_THROTTLE_TRAN_BUSY: 15708 if (un->un_busy_throttle == 0) { 15709 un->un_busy_throttle = un->un_throttle; 15710 } 15711 break; 15712 case SD_THROTTLE_QFULL: 15713 un->un_busy_throttle = 0; 15714 break; 15715 default: 15716 ASSERT(FALSE); 15717 } 15718 15719 if (un->un_ncmds_in_transport > 0) { 15720 un->un_throttle = un->un_ncmds_in_transport; 15721 } 15722 15723 } else { 15724 if (un->un_ncmds_in_transport == 0) { 15725 un->un_throttle = 1; 15726 } else { 15727 un->un_throttle = un->un_ncmds_in_transport; 15728 } 15729 } 15730 } 15731 15732 /* Reschedule the timeout if none is currently active */ 15733 if (un->un_reset_throttle_timeid == NULL) { 15734 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15735 un, SD_THROTTLE_RESET_INTERVAL); 15736 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15737 "sd_reduce_throttle: timeout scheduled!\n"); 15738 } 15739 15740 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15741 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15742 } 15743 15744 15745 15746 /* 15747 * Function: sd_restore_throttle 15748 * 15749 * Description: Callback function for timeout(9F). Resets the current 15750 * value of un->un_throttle to its default. 15751 * 15752 * Arguments: arg - pointer to associated softstate for the device. 15753 * 15754 * Context: May be called from interrupt context 15755 */ 15756 15757 static void 15758 sd_restore_throttle(void *arg) 15759 { 15760 struct sd_lun *un = arg; 15761 15762 ASSERT(un != NULL); 15763 ASSERT(!mutex_owned(SD_MUTEX(un))); 15764 15765 mutex_enter(SD_MUTEX(un)); 15766 15767 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15768 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15769 15770 un->un_reset_throttle_timeid = NULL; 15771 15772 if (un->un_f_use_adaptive_throttle == TRUE) { 15773 /* 15774 * If un_busy_throttle is nonzero, then it contains the 15775 * value that un_throttle was when we got a TRAN_BUSY back 15776 * from scsi_transport(). We want to revert back to this 15777 * value. 15778 * 15779 * In the QFULL case, the throttle limit will incrementally 15780 * increase until it reaches max throttle. 15781 */ 15782 if (un->un_busy_throttle > 0) { 15783 un->un_throttle = un->un_busy_throttle; 15784 un->un_busy_throttle = 0; 15785 } else { 15786 /* 15787 * increase throttle by 10% open gate slowly, schedule 15788 * another restore if saved throttle has not been 15789 * reached 15790 */ 15791 short throttle; 15792 if (sd_qfull_throttle_enable) { 15793 throttle = un->un_throttle + 15794 max((un->un_throttle / 10), 1); 15795 un->un_throttle = 15796 (throttle < un->un_saved_throttle) ? 15797 throttle : un->un_saved_throttle; 15798 if (un->un_throttle < un->un_saved_throttle) { 15799 un->un_reset_throttle_timeid = 15800 timeout(sd_restore_throttle, 15801 un, SD_QFULL_THROTTLE_RESET_INTERVAL); 15802 } 15803 } 15804 } 15805 15806 /* 15807 * If un_throttle has fallen below the low-water mark, we 15808 * restore the maximum value here (and allow it to ratchet 15809 * down again if necessary). 15810 */ 15811 if (un->un_throttle < un->un_min_throttle) { 15812 un->un_throttle = un->un_saved_throttle; 15813 } 15814 } else { 15815 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15816 "restoring limit from 0x%x to 0x%x\n", 15817 un->un_throttle, un->un_saved_throttle); 15818 un->un_throttle = un->un_saved_throttle; 15819 } 15820 15821 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15822 "sd_restore_throttle: calling sd_start_cmds!\n"); 15823 15824 sd_start_cmds(un, NULL); 15825 15826 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15827 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15828 un, un->un_throttle); 15829 15830 mutex_exit(SD_MUTEX(un)); 15831 15832 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15833 } 15834 15835 /* 15836 * Function: sdrunout 15837 * 15838 * Description: Callback routine for scsi_init_pkt when a resource allocation 15839 * fails. 15840 * 15841 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15842 * soft state instance. 15843 * 15844 * Return Code: The scsi_init_pkt routine allows for the callback function to 15845 * return a 0 indicating the callback should be rescheduled or a 1 15846 * indicating not to reschedule. This routine always returns 1 15847 * because the driver always provides a callback function to 15848 * scsi_init_pkt. This results in a callback always being scheduled 15849 * (via the scsi_init_pkt callback implementation) if a resource 15850 * failure occurs. 15851 * 15852 * Context: This callback function may not block or call routines that block 15853 * 15854 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15855 * request persisting at the head of the list which cannot be 15856 * satisfied even after multiple retries. In the future the driver 15857 * may implement some time of maximum runout count before failing 15858 * an I/O. 15859 */ 15860 15861 static int 15862 sdrunout(caddr_t arg) 15863 { 15864 struct sd_lun *un = (struct sd_lun *)arg; 15865 15866 ASSERT(un != NULL); 15867 ASSERT(!mutex_owned(SD_MUTEX(un))); 15868 15869 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15870 15871 mutex_enter(SD_MUTEX(un)); 15872 sd_start_cmds(un, NULL); 15873 mutex_exit(SD_MUTEX(un)); 15874 /* 15875 * This callback routine always returns 1 (i.e. do not reschedule) 15876 * because we always specify sdrunout as the callback handler for 15877 * scsi_init_pkt inside the call to sd_start_cmds. 15878 */ 15879 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15880 return (1); 15881 } 15882 15883 15884 /* 15885 * Function: sdintr 15886 * 15887 * Description: Completion callback routine for scsi_pkt(9S) structs 15888 * sent to the HBA driver via scsi_transport(9F). 15889 * 15890 * Context: Interrupt context 15891 */ 15892 15893 static void 15894 sdintr(struct scsi_pkt *pktp) 15895 { 15896 struct buf *bp; 15897 struct sd_xbuf *xp; 15898 struct sd_lun *un; 15899 15900 ASSERT(pktp != NULL); 15901 bp = (struct buf *)pktp->pkt_private; 15902 ASSERT(bp != NULL); 15903 xp = SD_GET_XBUF(bp); 15904 ASSERT(xp != NULL); 15905 ASSERT(xp->xb_pktp != NULL); 15906 un = SD_GET_UN(bp); 15907 ASSERT(un != NULL); 15908 ASSERT(!mutex_owned(SD_MUTEX(un))); 15909 15910 #ifdef SD_FAULT_INJECTION 15911 15912 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15913 /* SD FaultInjection */ 15914 sd_faultinjection(pktp); 15915 15916 #endif /* SD_FAULT_INJECTION */ 15917 15918 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15919 " xp:0x%p, un:0x%p\n", bp, xp, un); 15920 15921 mutex_enter(SD_MUTEX(un)); 15922 15923 /* Reduce the count of the #commands currently in transport */ 15924 un->un_ncmds_in_transport--; 15925 ASSERT(un->un_ncmds_in_transport >= 0); 15926 15927 /* Increment counter to indicate that the callback routine is active */ 15928 un->un_in_callback++; 15929 15930 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15931 15932 #ifdef SDDEBUG 15933 if (bp == un->un_retry_bp) { 15934 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15935 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15936 un, un->un_retry_bp, un->un_ncmds_in_transport); 15937 } 15938 #endif 15939 15940 /* 15941 * If pkt_reason is CMD_DEV_GONE, just fail the command 15942 */ 15943 if (pktp->pkt_reason == CMD_DEV_GONE) { 15944 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15945 "Device is gone\n"); 15946 sd_return_failed_command(un, bp, EIO); 15947 goto exit; 15948 } 15949 15950 /* 15951 * First see if the pkt has auto-request sense data with it.... 15952 * Look at the packet state first so we don't take a performance 15953 * hit looking at the arq enabled flag unless absolutely necessary. 15954 */ 15955 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15956 (un->un_f_arq_enabled == TRUE)) { 15957 /* 15958 * The HBA did an auto request sense for this command so check 15959 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15960 * driver command that should not be retried. 15961 */ 15962 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15963 /* 15964 * Save the relevant sense info into the xp for the 15965 * original cmd. 15966 */ 15967 struct scsi_arq_status *asp; 15968 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15969 xp->xb_sense_status = 15970 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15971 xp->xb_sense_state = asp->sts_rqpkt_state; 15972 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15973 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15974 min(sizeof (struct scsi_extended_sense), 15975 SENSE_LENGTH)); 15976 15977 /* fail the command */ 15978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15979 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15980 sd_return_failed_command(un, bp, EIO); 15981 goto exit; 15982 } 15983 15984 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15985 /* 15986 * We want to either retry or fail this command, so free 15987 * the DMA resources here. If we retry the command then 15988 * the DMA resources will be reallocated in sd_start_cmds(). 15989 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15990 * causes the *entire* transfer to start over again from the 15991 * beginning of the request, even for PARTIAL chunks that 15992 * have already transferred successfully. 15993 */ 15994 if ((un->un_f_is_fibre == TRUE) && 15995 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15996 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15997 scsi_dmafree(pktp); 15998 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15999 } 16000 #endif 16001 16002 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16003 "sdintr: arq done, sd_handle_auto_request_sense\n"); 16004 16005 sd_handle_auto_request_sense(un, bp, xp, pktp); 16006 goto exit; 16007 } 16008 16009 /* Next see if this is the REQUEST SENSE pkt for the instance */ 16010 if (pktp->pkt_flags & FLAG_SENSING) { 16011 /* This pktp is from the unit's REQUEST_SENSE command */ 16012 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16013 "sdintr: sd_handle_request_sense\n"); 16014 sd_handle_request_sense(un, bp, xp, pktp); 16015 goto exit; 16016 } 16017 16018 /* 16019 * Check to see if the command successfully completed as requested; 16020 * this is the most common case (and also the hot performance path). 16021 * 16022 * Requirements for successful completion are: 16023 * pkt_reason is CMD_CMPLT and packet status is status good. 16024 * In addition: 16025 * - A residual of zero indicates successful completion no matter what 16026 * the command is. 16027 * - If the residual is not zero and the command is not a read or 16028 * write, then it's still defined as successful completion. In other 16029 * words, if the command is a read or write the residual must be 16030 * zero for successful completion. 16031 * - If the residual is not zero and the command is a read or 16032 * write, and it's a USCSICMD, then it's still defined as 16033 * successful completion. 16034 */ 16035 if ((pktp->pkt_reason == CMD_CMPLT) && 16036 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 16037 16038 /* 16039 * Since this command is returned with a good status, we 16040 * can reset the count for Sonoma failover. 16041 */ 16042 un->un_sonoma_failure_count = 0; 16043 16044 /* 16045 * Return all USCSI commands on good status 16046 */ 16047 if (pktp->pkt_resid == 0) { 16048 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16049 "sdintr: returning command for resid == 0\n"); 16050 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 16051 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 16052 SD_UPDATE_B_RESID(bp, pktp); 16053 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16054 "sdintr: returning command for resid != 0\n"); 16055 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 16056 SD_UPDATE_B_RESID(bp, pktp); 16057 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16058 "sdintr: returning uscsi command\n"); 16059 } else { 16060 goto not_successful; 16061 } 16062 sd_return_command(un, bp); 16063 16064 /* 16065 * Decrement counter to indicate that the callback routine 16066 * is done. 16067 */ 16068 un->un_in_callback--; 16069 ASSERT(un->un_in_callback >= 0); 16070 mutex_exit(SD_MUTEX(un)); 16071 16072 return; 16073 } 16074 16075 not_successful: 16076 16077 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 16078 /* 16079 * The following is based upon knowledge of the underlying transport 16080 * and its use of DMA resources. This code should be removed when 16081 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 16082 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 16083 * and sd_start_cmds(). 16084 * 16085 * Free any DMA resources associated with this command if there 16086 * is a chance it could be retried or enqueued for later retry. 16087 * If we keep the DMA binding then mpxio cannot reissue the 16088 * command on another path whenever a path failure occurs. 16089 * 16090 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 16091 * causes the *entire* transfer to start over again from the 16092 * beginning of the request, even for PARTIAL chunks that 16093 * have already transferred successfully. 16094 * 16095 * This is only done for non-uscsi commands (and also skipped for the 16096 * driver's internal RQS command). Also just do this for Fibre Channel 16097 * devices as these are the only ones that support mpxio. 16098 */ 16099 if ((un->un_f_is_fibre == TRUE) && 16100 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 16101 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 16102 scsi_dmafree(pktp); 16103 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 16104 } 16105 #endif 16106 16107 /* 16108 * The command did not successfully complete as requested so check 16109 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 16110 * driver command that should not be retried so just return. If 16111 * FLAG_DIAGNOSE is not set the error will be processed below. 16112 */ 16113 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 16114 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16115 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16116 /* 16117 * Issue a request sense if a check condition caused the error 16118 * (we handle the auto request sense case above), otherwise 16119 * just fail the command. 16120 */ 16121 if ((pktp->pkt_reason == CMD_CMPLT) && 16122 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16123 sd_send_request_sense_command(un, bp, pktp); 16124 } else { 16125 sd_return_failed_command(un, bp, EIO); 16126 } 16127 goto exit; 16128 } 16129 16130 /* 16131 * The command did not successfully complete as requested so process 16132 * the error, retry, and/or attempt recovery. 16133 */ 16134 switch (pktp->pkt_reason) { 16135 case CMD_CMPLT: 16136 switch (SD_GET_PKT_STATUS(pktp)) { 16137 case STATUS_GOOD: 16138 /* 16139 * The command completed successfully with a non-zero 16140 * residual 16141 */ 16142 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16143 "sdintr: STATUS_GOOD \n"); 16144 sd_pkt_status_good(un, bp, xp, pktp); 16145 break; 16146 16147 case STATUS_CHECK: 16148 case STATUS_TERMINATED: 16149 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16150 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16151 sd_pkt_status_check_condition(un, bp, xp, pktp); 16152 break; 16153 16154 case STATUS_BUSY: 16155 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16156 "sdintr: STATUS_BUSY\n"); 16157 sd_pkt_status_busy(un, bp, xp, pktp); 16158 break; 16159 16160 case STATUS_RESERVATION_CONFLICT: 16161 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16162 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16163 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16164 break; 16165 16166 case STATUS_QFULL: 16167 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16168 "sdintr: STATUS_QFULL\n"); 16169 sd_pkt_status_qfull(un, bp, xp, pktp); 16170 break; 16171 16172 case STATUS_MET: 16173 case STATUS_INTERMEDIATE: 16174 case STATUS_SCSI2: 16175 case STATUS_INTERMEDIATE_MET: 16176 case STATUS_ACA_ACTIVE: 16177 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16178 "Unexpected SCSI status received: 0x%x\n", 16179 SD_GET_PKT_STATUS(pktp)); 16180 sd_return_failed_command(un, bp, EIO); 16181 break; 16182 16183 default: 16184 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16185 "Invalid SCSI status received: 0x%x\n", 16186 SD_GET_PKT_STATUS(pktp)); 16187 sd_return_failed_command(un, bp, EIO); 16188 break; 16189 16190 } 16191 break; 16192 16193 case CMD_INCOMPLETE: 16194 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16195 "sdintr: CMD_INCOMPLETE\n"); 16196 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16197 break; 16198 case CMD_TRAN_ERR: 16199 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16200 "sdintr: CMD_TRAN_ERR\n"); 16201 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16202 break; 16203 case CMD_RESET: 16204 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16205 "sdintr: CMD_RESET \n"); 16206 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16207 break; 16208 case CMD_ABORTED: 16209 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16210 "sdintr: CMD_ABORTED \n"); 16211 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16212 break; 16213 case CMD_TIMEOUT: 16214 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16215 "sdintr: CMD_TIMEOUT\n"); 16216 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16217 break; 16218 case CMD_UNX_BUS_FREE: 16219 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16220 "sdintr: CMD_UNX_BUS_FREE \n"); 16221 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16222 break; 16223 case CMD_TAG_REJECT: 16224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16225 "sdintr: CMD_TAG_REJECT\n"); 16226 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16227 break; 16228 default: 16229 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16230 "sdintr: default\n"); 16231 sd_pkt_reason_default(un, bp, xp, pktp); 16232 break; 16233 } 16234 16235 exit: 16236 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16237 16238 /* Decrement counter to indicate that the callback routine is done. */ 16239 un->un_in_callback--; 16240 ASSERT(un->un_in_callback >= 0); 16241 16242 /* 16243 * At this point, the pkt has been dispatched, ie, it is either 16244 * being re-tried or has been returned to its caller and should 16245 * not be referenced. 16246 */ 16247 16248 mutex_exit(SD_MUTEX(un)); 16249 } 16250 16251 16252 /* 16253 * Function: sd_print_incomplete_msg 16254 * 16255 * Description: Prints the error message for a CMD_INCOMPLETE error. 16256 * 16257 * Arguments: un - ptr to associated softstate for the device. 16258 * bp - ptr to the buf(9S) for the command. 16259 * arg - message string ptr 16260 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16261 * or SD_NO_RETRY_ISSUED. 16262 * 16263 * Context: May be called under interrupt context 16264 */ 16265 16266 static void 16267 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16268 { 16269 struct scsi_pkt *pktp; 16270 char *msgp; 16271 char *cmdp = arg; 16272 16273 ASSERT(un != NULL); 16274 ASSERT(mutex_owned(SD_MUTEX(un))); 16275 ASSERT(bp != NULL); 16276 ASSERT(arg != NULL); 16277 pktp = SD_GET_PKTP(bp); 16278 ASSERT(pktp != NULL); 16279 16280 switch (code) { 16281 case SD_DELAYED_RETRY_ISSUED: 16282 case SD_IMMEDIATE_RETRY_ISSUED: 16283 msgp = "retrying"; 16284 break; 16285 case SD_NO_RETRY_ISSUED: 16286 default: 16287 msgp = "giving up"; 16288 break; 16289 } 16290 16291 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16292 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16293 "incomplete %s- %s\n", cmdp, msgp); 16294 } 16295 } 16296 16297 16298 16299 /* 16300 * Function: sd_pkt_status_good 16301 * 16302 * Description: Processing for a STATUS_GOOD code in pkt_status. 16303 * 16304 * Context: May be called under interrupt context 16305 */ 16306 16307 static void 16308 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16309 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16310 { 16311 char *cmdp; 16312 16313 ASSERT(un != NULL); 16314 ASSERT(mutex_owned(SD_MUTEX(un))); 16315 ASSERT(bp != NULL); 16316 ASSERT(xp != NULL); 16317 ASSERT(pktp != NULL); 16318 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16319 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16320 ASSERT(pktp->pkt_resid != 0); 16321 16322 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16323 16324 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16325 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16326 case SCMD_READ: 16327 cmdp = "read"; 16328 break; 16329 case SCMD_WRITE: 16330 cmdp = "write"; 16331 break; 16332 default: 16333 SD_UPDATE_B_RESID(bp, pktp); 16334 sd_return_command(un, bp); 16335 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16336 return; 16337 } 16338 16339 /* 16340 * See if we can retry the read/write, preferrably immediately. 16341 * If retries are exhaused, then sd_retry_command() will update 16342 * the b_resid count. 16343 */ 16344 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16345 cmdp, EIO, (clock_t)0, NULL); 16346 16347 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16348 } 16349 16350 16351 16352 16353 16354 /* 16355 * Function: sd_handle_request_sense 16356 * 16357 * Description: Processing for non-auto Request Sense command. 16358 * 16359 * Arguments: un - ptr to associated softstate 16360 * sense_bp - ptr to buf(9S) for the RQS command 16361 * sense_xp - ptr to the sd_xbuf for the RQS command 16362 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16363 * 16364 * Context: May be called under interrupt context 16365 */ 16366 16367 static void 16368 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16369 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16370 { 16371 struct buf *cmd_bp; /* buf for the original command */ 16372 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16373 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16374 16375 ASSERT(un != NULL); 16376 ASSERT(mutex_owned(SD_MUTEX(un))); 16377 ASSERT(sense_bp != NULL); 16378 ASSERT(sense_xp != NULL); 16379 ASSERT(sense_pktp != NULL); 16380 16381 /* 16382 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16383 * RQS command and not the original command. 16384 */ 16385 ASSERT(sense_pktp == un->un_rqs_pktp); 16386 ASSERT(sense_bp == un->un_rqs_bp); 16387 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16388 (FLAG_SENSING | FLAG_HEAD)); 16389 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16390 FLAG_SENSING) == FLAG_SENSING); 16391 16392 /* These are the bp, xp, and pktp for the original command */ 16393 cmd_bp = sense_xp->xb_sense_bp; 16394 cmd_xp = SD_GET_XBUF(cmd_bp); 16395 cmd_pktp = SD_GET_PKTP(cmd_bp); 16396 16397 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16398 /* 16399 * The REQUEST SENSE command failed. Release the REQUEST 16400 * SENSE command for re-use, get back the bp for the original 16401 * command, and attempt to re-try the original command if 16402 * FLAG_DIAGNOSE is not set in the original packet. 16403 */ 16404 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16405 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16406 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16407 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16408 NULL, NULL, EIO, (clock_t)0, NULL); 16409 return; 16410 } 16411 } 16412 16413 /* 16414 * Save the relevant sense info into the xp for the original cmd. 16415 * 16416 * Note: if the request sense failed the state info will be zero 16417 * as set in sd_mark_rqs_busy() 16418 */ 16419 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16420 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16421 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16422 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 16423 16424 /* 16425 * Free up the RQS command.... 16426 * NOTE: 16427 * Must do this BEFORE calling sd_validate_sense_data! 16428 * sd_validate_sense_data may return the original command in 16429 * which case the pkt will be freed and the flags can no 16430 * longer be touched. 16431 * SD_MUTEX is held through this process until the command 16432 * is dispatched based upon the sense data, so there are 16433 * no race conditions. 16434 */ 16435 (void) sd_mark_rqs_idle(un, sense_xp); 16436 16437 /* 16438 * For a retryable command see if we have valid sense data, if so then 16439 * turn it over to sd_decode_sense() to figure out the right course of 16440 * action. Just fail a non-retryable command. 16441 */ 16442 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16443 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 16444 SD_SENSE_DATA_IS_VALID) { 16445 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16446 } 16447 } else { 16448 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16449 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16450 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16451 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16452 sd_return_failed_command(un, cmd_bp, EIO); 16453 } 16454 } 16455 16456 16457 16458 16459 /* 16460 * Function: sd_handle_auto_request_sense 16461 * 16462 * Description: Processing for auto-request sense information. 16463 * 16464 * Arguments: un - ptr to associated softstate 16465 * bp - ptr to buf(9S) for the command 16466 * xp - ptr to the sd_xbuf for the command 16467 * pktp - ptr to the scsi_pkt(9S) for the command 16468 * 16469 * Context: May be called under interrupt context 16470 */ 16471 16472 static void 16473 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16474 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16475 { 16476 struct scsi_arq_status *asp; 16477 16478 ASSERT(un != NULL); 16479 ASSERT(mutex_owned(SD_MUTEX(un))); 16480 ASSERT(bp != NULL); 16481 ASSERT(xp != NULL); 16482 ASSERT(pktp != NULL); 16483 ASSERT(pktp != un->un_rqs_pktp); 16484 ASSERT(bp != un->un_rqs_bp); 16485 16486 /* 16487 * For auto-request sense, we get a scsi_arq_status back from 16488 * the HBA, with the sense data in the sts_sensedata member. 16489 * The pkt_scbp of the packet points to this scsi_arq_status. 16490 */ 16491 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16492 16493 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16494 /* 16495 * The auto REQUEST SENSE failed; see if we can re-try 16496 * the original command. 16497 */ 16498 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16499 "auto request sense failed (reason=%s)\n", 16500 scsi_rname(asp->sts_rqpkt_reason)); 16501 16502 sd_reset_target(un, pktp); 16503 16504 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16505 NULL, NULL, EIO, (clock_t)0, NULL); 16506 return; 16507 } 16508 16509 /* Save the relevant sense info into the xp for the original cmd. */ 16510 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16511 xp->xb_sense_state = asp->sts_rqpkt_state; 16512 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16513 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16514 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 16515 16516 /* 16517 * See if we have valid sense data, if so then turn it over to 16518 * sd_decode_sense() to figure out the right course of action. 16519 */ 16520 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 16521 sd_decode_sense(un, bp, xp, pktp); 16522 } 16523 } 16524 16525 16526 /* 16527 * Function: sd_print_sense_failed_msg 16528 * 16529 * Description: Print log message when RQS has failed. 16530 * 16531 * Arguments: un - ptr to associated softstate 16532 * bp - ptr to buf(9S) for the command 16533 * arg - generic message string ptr 16534 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16535 * or SD_NO_RETRY_ISSUED 16536 * 16537 * Context: May be called from interrupt context 16538 */ 16539 16540 static void 16541 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16542 int code) 16543 { 16544 char *msgp = arg; 16545 16546 ASSERT(un != NULL); 16547 ASSERT(mutex_owned(SD_MUTEX(un))); 16548 ASSERT(bp != NULL); 16549 16550 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16551 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16552 } 16553 } 16554 16555 16556 /* 16557 * Function: sd_validate_sense_data 16558 * 16559 * Description: Check the given sense data for validity. 16560 * If the sense data is not valid, the command will 16561 * be either failed or retried! 16562 * 16563 * Return Code: SD_SENSE_DATA_IS_INVALID 16564 * SD_SENSE_DATA_IS_VALID 16565 * 16566 * Context: May be called from interrupt context 16567 */ 16568 16569 static int 16570 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 16571 { 16572 struct scsi_extended_sense *esp; 16573 struct scsi_pkt *pktp; 16574 size_t actual_len; 16575 char *msgp = NULL; 16576 16577 ASSERT(un != NULL); 16578 ASSERT(mutex_owned(SD_MUTEX(un))); 16579 ASSERT(bp != NULL); 16580 ASSERT(bp != un->un_rqs_bp); 16581 ASSERT(xp != NULL); 16582 16583 pktp = SD_GET_PKTP(bp); 16584 ASSERT(pktp != NULL); 16585 16586 /* 16587 * Check the status of the RQS command (auto or manual). 16588 */ 16589 switch (xp->xb_sense_status & STATUS_MASK) { 16590 case STATUS_GOOD: 16591 break; 16592 16593 case STATUS_RESERVATION_CONFLICT: 16594 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16595 return (SD_SENSE_DATA_IS_INVALID); 16596 16597 case STATUS_BUSY: 16598 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16599 "Busy Status on REQUEST SENSE\n"); 16600 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16601 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16602 return (SD_SENSE_DATA_IS_INVALID); 16603 16604 case STATUS_QFULL: 16605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16606 "QFULL Status on REQUEST SENSE\n"); 16607 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16608 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16609 return (SD_SENSE_DATA_IS_INVALID); 16610 16611 case STATUS_CHECK: 16612 case STATUS_TERMINATED: 16613 msgp = "Check Condition on REQUEST SENSE\n"; 16614 goto sense_failed; 16615 16616 default: 16617 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16618 goto sense_failed; 16619 } 16620 16621 /* 16622 * See if we got the minimum required amount of sense data. 16623 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16624 * or less. 16625 */ 16626 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 16627 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16628 (actual_len == 0)) { 16629 msgp = "Request Sense couldn't get sense data\n"; 16630 goto sense_failed; 16631 } 16632 16633 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16634 msgp = "Not enough sense information\n"; 16635 goto sense_failed; 16636 } 16637 16638 /* 16639 * We require the extended sense data 16640 */ 16641 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16642 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16643 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16644 static char tmp[8]; 16645 static char buf[148]; 16646 char *p = (char *)(xp->xb_sense_data); 16647 int i; 16648 16649 mutex_enter(&sd_sense_mutex); 16650 (void) strcpy(buf, "undecodable sense information:"); 16651 for (i = 0; i < actual_len; i++) { 16652 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16653 (void) strcpy(&buf[strlen(buf)], tmp); 16654 } 16655 i = strlen(buf); 16656 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16657 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16658 mutex_exit(&sd_sense_mutex); 16659 } 16660 /* Note: Legacy behavior, fail the command with no retry */ 16661 sd_return_failed_command(un, bp, EIO); 16662 return (SD_SENSE_DATA_IS_INVALID); 16663 } 16664 16665 /* 16666 * Check that es_code is valid (es_class concatenated with es_code 16667 * make up the "response code" field. es_class will always be 7, so 16668 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16669 * format. 16670 */ 16671 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16672 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16673 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16674 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16675 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16676 goto sense_failed; 16677 } 16678 16679 return (SD_SENSE_DATA_IS_VALID); 16680 16681 sense_failed: 16682 /* 16683 * If the request sense failed (for whatever reason), attempt 16684 * to retry the original command. 16685 */ 16686 #if defined(__i386) || defined(__amd64) 16687 /* 16688 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16689 * sddef.h for Sparc platform, and x86 uses 1 binary 16690 * for both SCSI/FC. 16691 * The SD_RETRY_DELAY value need to be adjusted here 16692 * when SD_RETRY_DELAY change in sddef.h 16693 */ 16694 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16695 sd_print_sense_failed_msg, msgp, EIO, 16696 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16697 #else 16698 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16699 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16700 #endif 16701 16702 return (SD_SENSE_DATA_IS_INVALID); 16703 } 16704 16705 16706 16707 /* 16708 * Function: sd_decode_sense 16709 * 16710 * Description: Take recovery action(s) when SCSI Sense Data is received. 16711 * 16712 * Context: Interrupt context. 16713 */ 16714 16715 static void 16716 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16717 struct scsi_pkt *pktp) 16718 { 16719 struct scsi_extended_sense *esp; 16720 struct scsi_descr_sense_hdr *sdsp; 16721 uint8_t asc, ascq, sense_key; 16722 16723 ASSERT(un != NULL); 16724 ASSERT(mutex_owned(SD_MUTEX(un))); 16725 ASSERT(bp != NULL); 16726 ASSERT(bp != un->un_rqs_bp); 16727 ASSERT(xp != NULL); 16728 ASSERT(pktp != NULL); 16729 16730 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16731 16732 switch (esp->es_code) { 16733 case CODE_FMT_DESCR_CURRENT: 16734 case CODE_FMT_DESCR_DEFERRED: 16735 sdsp = (struct scsi_descr_sense_hdr *)xp->xb_sense_data; 16736 sense_key = sdsp->ds_key; 16737 asc = sdsp->ds_add_code; 16738 ascq = sdsp->ds_qual_code; 16739 break; 16740 case CODE_FMT_VENDOR_SPECIFIC: 16741 case CODE_FMT_FIXED_CURRENT: 16742 case CODE_FMT_FIXED_DEFERRED: 16743 default: 16744 sense_key = esp->es_key; 16745 asc = esp->es_add_code; 16746 ascq = esp->es_qual_code; 16747 break; 16748 } 16749 16750 switch (sense_key) { 16751 case KEY_NO_SENSE: 16752 sd_sense_key_no_sense(un, bp, xp, pktp); 16753 break; 16754 case KEY_RECOVERABLE_ERROR: 16755 sd_sense_key_recoverable_error(un, asc, bp, xp, pktp); 16756 break; 16757 case KEY_NOT_READY: 16758 sd_sense_key_not_ready(un, asc, ascq, bp, xp, pktp); 16759 break; 16760 case KEY_MEDIUM_ERROR: 16761 case KEY_HARDWARE_ERROR: 16762 sd_sense_key_medium_or_hardware_error(un, 16763 sense_key, asc, bp, xp, pktp); 16764 break; 16765 case KEY_ILLEGAL_REQUEST: 16766 sd_sense_key_illegal_request(un, bp, xp, pktp); 16767 break; 16768 case KEY_UNIT_ATTENTION: 16769 sd_sense_key_unit_attention(un, asc, bp, xp, pktp); 16770 break; 16771 case KEY_WRITE_PROTECT: 16772 case KEY_VOLUME_OVERFLOW: 16773 case KEY_MISCOMPARE: 16774 sd_sense_key_fail_command(un, bp, xp, pktp); 16775 break; 16776 case KEY_BLANK_CHECK: 16777 sd_sense_key_blank_check(un, bp, xp, pktp); 16778 break; 16779 case KEY_ABORTED_COMMAND: 16780 sd_sense_key_aborted_command(un, bp, xp, pktp); 16781 break; 16782 case KEY_VENDOR_UNIQUE: 16783 case KEY_COPY_ABORTED: 16784 case KEY_EQUAL: 16785 case KEY_RESERVED: 16786 default: 16787 sd_sense_key_default(un, sense_key, bp, xp, pktp); 16788 break; 16789 } 16790 } 16791 16792 16793 /* 16794 * Function: sd_dump_memory 16795 * 16796 * Description: Debug logging routine to print the contents of a user provided 16797 * buffer. The output of the buffer is broken up into 256 byte 16798 * segments due to a size constraint of the scsi_log. 16799 * implementation. 16800 * 16801 * Arguments: un - ptr to softstate 16802 * comp - component mask 16803 * title - "title" string to preceed data when printed 16804 * data - ptr to data block to be printed 16805 * len - size of data block to be printed 16806 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16807 * 16808 * Context: May be called from interrupt context 16809 */ 16810 16811 #define SD_DUMP_MEMORY_BUF_SIZE 256 16812 16813 static char *sd_dump_format_string[] = { 16814 " 0x%02x", 16815 " %c" 16816 }; 16817 16818 static void 16819 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16820 int len, int fmt) 16821 { 16822 int i, j; 16823 int avail_count; 16824 int start_offset; 16825 int end_offset; 16826 size_t entry_len; 16827 char *bufp; 16828 char *local_buf; 16829 char *format_string; 16830 16831 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16832 16833 /* 16834 * In the debug version of the driver, this function is called from a 16835 * number of places which are NOPs in the release driver. 16836 * The debug driver therefore has additional methods of filtering 16837 * debug output. 16838 */ 16839 #ifdef SDDEBUG 16840 /* 16841 * In the debug version of the driver we can reduce the amount of debug 16842 * messages by setting sd_error_level to something other than 16843 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16844 * sd_component_mask. 16845 */ 16846 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16847 (sd_error_level != SCSI_ERR_ALL)) { 16848 return; 16849 } 16850 if (((sd_component_mask & comp) == 0) || 16851 (sd_error_level != SCSI_ERR_ALL)) { 16852 return; 16853 } 16854 #else 16855 if (sd_error_level != SCSI_ERR_ALL) { 16856 return; 16857 } 16858 #endif 16859 16860 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16861 bufp = local_buf; 16862 /* 16863 * Available length is the length of local_buf[], minus the 16864 * length of the title string, minus one for the ":", minus 16865 * one for the newline, minus one for the NULL terminator. 16866 * This gives the #bytes available for holding the printed 16867 * values from the given data buffer. 16868 */ 16869 if (fmt == SD_LOG_HEX) { 16870 format_string = sd_dump_format_string[0]; 16871 } else /* SD_LOG_CHAR */ { 16872 format_string = sd_dump_format_string[1]; 16873 } 16874 /* 16875 * Available count is the number of elements from the given 16876 * data buffer that we can fit into the available length. 16877 * This is based upon the size of the format string used. 16878 * Make one entry and find it's size. 16879 */ 16880 (void) sprintf(bufp, format_string, data[0]); 16881 entry_len = strlen(bufp); 16882 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16883 16884 j = 0; 16885 while (j < len) { 16886 bufp = local_buf; 16887 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16888 start_offset = j; 16889 16890 end_offset = start_offset + avail_count; 16891 16892 (void) sprintf(bufp, "%s:", title); 16893 bufp += strlen(bufp); 16894 for (i = start_offset; ((i < end_offset) && (j < len)); 16895 i++, j++) { 16896 (void) sprintf(bufp, format_string, data[i]); 16897 bufp += entry_len; 16898 } 16899 (void) sprintf(bufp, "\n"); 16900 16901 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16902 } 16903 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16904 } 16905 16906 /* 16907 * Function: sd_print_sense_msg 16908 * 16909 * Description: Log a message based upon the given sense data. 16910 * 16911 * Arguments: un - ptr to associated softstate 16912 * bp - ptr to buf(9S) for the command 16913 * arg - ptr to associate sd_sense_info struct 16914 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16915 * or SD_NO_RETRY_ISSUED 16916 * 16917 * Context: May be called from interrupt context 16918 */ 16919 16920 static void 16921 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16922 { 16923 struct sd_xbuf *xp; 16924 struct scsi_pkt *pktp; 16925 struct scsi_extended_sense *sensep; 16926 daddr_t request_blkno; 16927 diskaddr_t err_blkno; 16928 int severity; 16929 int pfa_flag; 16930 int fixed_format = TRUE; 16931 extern struct scsi_key_strings scsi_cmds[]; 16932 16933 ASSERT(un != NULL); 16934 ASSERT(mutex_owned(SD_MUTEX(un))); 16935 ASSERT(bp != NULL); 16936 xp = SD_GET_XBUF(bp); 16937 ASSERT(xp != NULL); 16938 pktp = SD_GET_PKTP(bp); 16939 ASSERT(pktp != NULL); 16940 ASSERT(arg != NULL); 16941 16942 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16943 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16944 16945 if ((code == SD_DELAYED_RETRY_ISSUED) || 16946 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16947 severity = SCSI_ERR_RETRYABLE; 16948 } 16949 16950 /* Use absolute block number for the request block number */ 16951 request_blkno = xp->xb_blkno; 16952 16953 /* 16954 * Now try to get the error block number from the sense data 16955 */ 16956 sensep = (struct scsi_extended_sense *)xp->xb_sense_data; 16957 switch (sensep->es_code) { 16958 case CODE_FMT_DESCR_CURRENT: 16959 case CODE_FMT_DESCR_DEFERRED: 16960 err_blkno = 16961 sd_extract_sense_info_descr( 16962 (struct scsi_descr_sense_hdr *)sensep); 16963 fixed_format = FALSE; 16964 break; 16965 case CODE_FMT_FIXED_CURRENT: 16966 case CODE_FMT_FIXED_DEFERRED: 16967 case CODE_FMT_VENDOR_SPECIFIC: 16968 default: 16969 /* 16970 * With the es_valid bit set, we assume that the error 16971 * blkno is in the sense data. Also, if xp->xb_blkno is 16972 * greater than 0xffffffff then the target *should* have used 16973 * a descriptor sense format (or it shouldn't have set 16974 * the es_valid bit), and we may as well ignore the 16975 * 32-bit value. 16976 */ 16977 if ((sensep->es_valid != 0) && (xp->xb_blkno <= 0xffffffff)) { 16978 err_blkno = (diskaddr_t) 16979 ((sensep->es_info_1 << 24) | 16980 (sensep->es_info_2 << 16) | 16981 (sensep->es_info_3 << 8) | 16982 (sensep->es_info_4)); 16983 } else { 16984 err_blkno = (diskaddr_t)-1; 16985 } 16986 break; 16987 } 16988 16989 if (err_blkno == (diskaddr_t)-1) { 16990 /* 16991 * Without the es_valid bit set (for fixed format) or an 16992 * information descriptor (for descriptor format) we cannot 16993 * be certain of the error blkno, so just use the 16994 * request_blkno. 16995 */ 16996 err_blkno = (diskaddr_t)request_blkno; 16997 } else { 16998 /* 16999 * We retrieved the error block number from the information 17000 * portion of the sense data. 17001 * 17002 * For USCSI commands we are better off using the error 17003 * block no. as the requested block no. (This is the best 17004 * we can estimate.) 17005 */ 17006 if ((SD_IS_BUFIO(xp) == FALSE) && 17007 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 17008 request_blkno = err_blkno; 17009 } 17010 } 17011 17012 /* 17013 * The following will log the buffer contents for the release driver 17014 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 17015 * level is set to verbose. 17016 */ 17017 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 17018 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 17019 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 17020 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 17021 17022 if (pfa_flag == FALSE) { 17023 /* This is normally only set for USCSI */ 17024 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 17025 return; 17026 } 17027 17028 if ((SD_IS_BUFIO(xp) == TRUE) && 17029 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 17030 (severity < sd_error_level))) { 17031 return; 17032 } 17033 } 17034 17035 /* 17036 * If the data is fixed format then check for Sonoma Failover, 17037 * and keep a count of how many failed I/O's. We should not have 17038 * to worry about Sonoma returning descriptor format sense data, 17039 * and asc/ascq are in a different location in descriptor format. 17040 */ 17041 if (fixed_format && 17042 (SD_IS_LSI(un)) && (sensep->es_key == KEY_ILLEGAL_REQUEST) && 17043 (sensep->es_add_code == 0x94) && (sensep->es_qual_code == 0x01)) { 17044 un->un_sonoma_failure_count++; 17045 if (un->un_sonoma_failure_count > 1) { 17046 return; 17047 } 17048 } 17049 17050 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 17051 request_blkno, err_blkno, scsi_cmds, sensep, 17052 un->un_additional_codes, NULL); 17053 } 17054 17055 /* 17056 * Function: sd_extract_sense_info_descr 17057 * 17058 * Description: Retrieve "information" field from descriptor format 17059 * sense data. Iterates through each sense descriptor 17060 * looking for the information descriptor and returns 17061 * the information field from that descriptor. 17062 * 17063 * Context: May be called from interrupt context 17064 */ 17065 17066 static diskaddr_t 17067 sd_extract_sense_info_descr(struct scsi_descr_sense_hdr *sdsp) 17068 { 17069 diskaddr_t result; 17070 uint8_t *descr_offset; 17071 int valid_sense_length; 17072 struct scsi_information_sense_descr *isd; 17073 17074 /* 17075 * Initialize result to -1 indicating there is no information 17076 * descriptor 17077 */ 17078 result = (diskaddr_t)-1; 17079 17080 /* 17081 * The first descriptor will immediately follow the header 17082 */ 17083 descr_offset = (uint8_t *)(sdsp+1); /* Pointer arithmetic */ 17084 17085 /* 17086 * Calculate the amount of valid sense data 17087 */ 17088 valid_sense_length = 17089 min((sizeof (struct scsi_descr_sense_hdr) + 17090 sdsp->ds_addl_sense_length), 17091 SENSE_LENGTH); 17092 17093 /* 17094 * Iterate through the list of descriptors, stopping when we 17095 * run out of sense data 17096 */ 17097 while ((descr_offset + sizeof (struct scsi_information_sense_descr)) <= 17098 (uint8_t *)sdsp + valid_sense_length) { 17099 /* 17100 * Check if this is an information descriptor. We can 17101 * use the scsi_information_sense_descr structure as a 17102 * template sense the first two fields are always the 17103 * same 17104 */ 17105 isd = (struct scsi_information_sense_descr *)descr_offset; 17106 if (isd->isd_descr_type == DESCR_INFORMATION) { 17107 /* 17108 * Found an information descriptor. Copy the 17109 * information field. There will only be one 17110 * information descriptor so we can stop looking. 17111 */ 17112 result = 17113 (((diskaddr_t)isd->isd_information[0] << 56) | 17114 ((diskaddr_t)isd->isd_information[1] << 48) | 17115 ((diskaddr_t)isd->isd_information[2] << 40) | 17116 ((diskaddr_t)isd->isd_information[3] << 32) | 17117 ((diskaddr_t)isd->isd_information[4] << 24) | 17118 ((diskaddr_t)isd->isd_information[5] << 16) | 17119 ((diskaddr_t)isd->isd_information[6] << 8) | 17120 ((diskaddr_t)isd->isd_information[7])); 17121 break; 17122 } 17123 17124 /* 17125 * Get pointer to the next descriptor. The "additional 17126 * length" field holds the length of the descriptor except 17127 * for the "type" and "additional length" fields, so 17128 * we need to add 2 to get the total length. 17129 */ 17130 descr_offset += (isd->isd_addl_length + 2); 17131 } 17132 17133 return (result); 17134 } 17135 17136 /* 17137 * Function: sd_sense_key_no_sense 17138 * 17139 * Description: Recovery action when sense data was not received. 17140 * 17141 * Context: May be called from interrupt context 17142 */ 17143 17144 static void 17145 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17146 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17147 { 17148 struct sd_sense_info si; 17149 17150 ASSERT(un != NULL); 17151 ASSERT(mutex_owned(SD_MUTEX(un))); 17152 ASSERT(bp != NULL); 17153 ASSERT(xp != NULL); 17154 ASSERT(pktp != NULL); 17155 17156 si.ssi_severity = SCSI_ERR_FATAL; 17157 si.ssi_pfa_flag = FALSE; 17158 17159 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17160 17161 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17162 &si, EIO, (clock_t)0, NULL); 17163 } 17164 17165 17166 /* 17167 * Function: sd_sense_key_recoverable_error 17168 * 17169 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17170 * 17171 * Context: May be called from interrupt context 17172 */ 17173 17174 static void 17175 sd_sense_key_recoverable_error(struct sd_lun *un, 17176 uint8_t asc, 17177 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17178 { 17179 struct sd_sense_info si; 17180 17181 ASSERT(un != NULL); 17182 ASSERT(mutex_owned(SD_MUTEX(un))); 17183 ASSERT(bp != NULL); 17184 ASSERT(xp != NULL); 17185 ASSERT(pktp != NULL); 17186 17187 /* 17188 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17189 */ 17190 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17191 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17192 si.ssi_severity = SCSI_ERR_INFO; 17193 si.ssi_pfa_flag = TRUE; 17194 } else { 17195 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17196 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17197 si.ssi_severity = SCSI_ERR_RECOVERED; 17198 si.ssi_pfa_flag = FALSE; 17199 } 17200 17201 if (pktp->pkt_resid == 0) { 17202 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17203 sd_return_command(un, bp); 17204 return; 17205 } 17206 17207 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17208 &si, EIO, (clock_t)0, NULL); 17209 } 17210 17211 17212 17213 17214 /* 17215 * Function: sd_sense_key_not_ready 17216 * 17217 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17218 * 17219 * Context: May be called from interrupt context 17220 */ 17221 17222 static void 17223 sd_sense_key_not_ready(struct sd_lun *un, 17224 uint8_t asc, uint8_t ascq, 17225 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17226 { 17227 struct sd_sense_info si; 17228 17229 ASSERT(un != NULL); 17230 ASSERT(mutex_owned(SD_MUTEX(un))); 17231 ASSERT(bp != NULL); 17232 ASSERT(xp != NULL); 17233 ASSERT(pktp != NULL); 17234 17235 si.ssi_severity = SCSI_ERR_FATAL; 17236 si.ssi_pfa_flag = FALSE; 17237 17238 /* 17239 * Update error stats after first NOT READY error. Disks may have 17240 * been powered down and may need to be restarted. For CDROMs, 17241 * report NOT READY errors only if media is present. 17242 */ 17243 if ((ISCD(un) && (un->un_f_geometry_is_valid == TRUE)) || 17244 (xp->xb_retry_count > 0)) { 17245 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17246 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17247 } 17248 17249 /* 17250 * Just fail if the "not ready" retry limit has been reached. 17251 */ 17252 if (xp->xb_retry_count >= un->un_notready_retry_count) { 17253 /* Special check for error message printing for removables. */ 17254 if ((ISREMOVABLE(un)) && (asc == 0x04) && 17255 (ascq >= 0x04)) { 17256 si.ssi_severity = SCSI_ERR_ALL; 17257 } 17258 goto fail_command; 17259 } 17260 17261 /* 17262 * Check the ASC and ASCQ in the sense data as needed, to determine 17263 * what to do. 17264 */ 17265 switch (asc) { 17266 case 0x04: /* LOGICAL UNIT NOT READY */ 17267 /* 17268 * disk drives that don't spin up result in a very long delay 17269 * in format without warning messages. We will log a message 17270 * if the error level is set to verbose. 17271 */ 17272 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17273 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17274 "logical unit not ready, resetting disk\n"); 17275 } 17276 17277 /* 17278 * There are different requirements for CDROMs and disks for 17279 * the number of retries. If a CD-ROM is giving this, it is 17280 * probably reading TOC and is in the process of getting 17281 * ready, so we should keep on trying for a long time to make 17282 * sure that all types of media are taken in account (for 17283 * some media the drive takes a long time to read TOC). For 17284 * disks we do not want to retry this too many times as this 17285 * can cause a long hang in format when the drive refuses to 17286 * spin up (a very common failure). 17287 */ 17288 switch (ascq) { 17289 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17290 /* 17291 * Disk drives frequently refuse to spin up which 17292 * results in a very long hang in format without 17293 * warning messages. 17294 * 17295 * Note: This code preserves the legacy behavior of 17296 * comparing xb_retry_count against zero for fibre 17297 * channel targets instead of comparing against the 17298 * un_reset_retry_count value. The reason for this 17299 * discrepancy has been so utterly lost beneath the 17300 * Sands of Time that even Indiana Jones could not 17301 * find it. 17302 */ 17303 if (un->un_f_is_fibre == TRUE) { 17304 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17305 (xp->xb_retry_count > 0)) && 17306 (un->un_startstop_timeid == NULL)) { 17307 scsi_log(SD_DEVINFO(un), sd_label, 17308 CE_WARN, "logical unit not ready, " 17309 "resetting disk\n"); 17310 sd_reset_target(un, pktp); 17311 } 17312 } else { 17313 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17314 (xp->xb_retry_count > 17315 un->un_reset_retry_count)) && 17316 (un->un_startstop_timeid == NULL)) { 17317 scsi_log(SD_DEVINFO(un), sd_label, 17318 CE_WARN, "logical unit not ready, " 17319 "resetting disk\n"); 17320 sd_reset_target(un, pktp); 17321 } 17322 } 17323 break; 17324 17325 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17326 /* 17327 * If the target is in the process of becoming 17328 * ready, just proceed with the retry. This can 17329 * happen with CD-ROMs that take a long time to 17330 * read TOC after a power cycle or reset. 17331 */ 17332 goto do_retry; 17333 17334 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17335 break; 17336 17337 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17338 /* 17339 * Retries cannot help here so just fail right away. 17340 */ 17341 goto fail_command; 17342 17343 case 0x88: 17344 /* 17345 * Vendor-unique code for T3/T4: it indicates a 17346 * path problem in a mutipathed config, but as far as 17347 * the target driver is concerned it equates to a fatal 17348 * error, so we should just fail the command right away 17349 * (without printing anything to the console). If this 17350 * is not a T3/T4, fall thru to the default recovery 17351 * action. 17352 * T3/T4 is FC only, don't need to check is_fibre 17353 */ 17354 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17355 sd_return_failed_command(un, bp, EIO); 17356 return; 17357 } 17358 /* FALLTHRU */ 17359 17360 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17361 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17362 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17363 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17364 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17365 default: /* Possible future codes in SCSI spec? */ 17366 /* 17367 * For removable-media devices, do not retry if 17368 * ASCQ > 2 as these result mostly from USCSI commands 17369 * on MMC devices issued to check status of an 17370 * operation initiated in immediate mode. Also for 17371 * ASCQ >= 4 do not print console messages as these 17372 * mainly represent a user-initiated operation 17373 * instead of a system failure. 17374 */ 17375 if (ISREMOVABLE(un)) { 17376 si.ssi_severity = SCSI_ERR_ALL; 17377 goto fail_command; 17378 } 17379 break; 17380 } 17381 17382 /* 17383 * As part of our recovery attempt for the NOT READY 17384 * condition, we issue a START STOP UNIT command. However 17385 * we want to wait for a short delay before attempting this 17386 * as there may still be more commands coming back from the 17387 * target with the check condition. To do this we use 17388 * timeout(9F) to call sd_start_stop_unit_callback() after 17389 * the delay interval expires. (sd_start_stop_unit_callback() 17390 * dispatches sd_start_stop_unit_task(), which will issue 17391 * the actual START STOP UNIT command. The delay interval 17392 * is one-half of the delay that we will use to retry the 17393 * command that generated the NOT READY condition. 17394 * 17395 * Note that we could just dispatch sd_start_stop_unit_task() 17396 * from here and allow it to sleep for the delay interval, 17397 * but then we would be tying up the taskq thread 17398 * uncesessarily for the duration of the delay. 17399 * 17400 * Do not issue the START STOP UNIT if the current command 17401 * is already a START STOP UNIT. 17402 */ 17403 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17404 break; 17405 } 17406 17407 /* 17408 * Do not schedule the timeout if one is already pending. 17409 */ 17410 if (un->un_startstop_timeid != NULL) { 17411 SD_INFO(SD_LOG_ERROR, un, 17412 "sd_sense_key_not_ready: restart already issued to" 17413 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17414 ddi_get_instance(SD_DEVINFO(un))); 17415 break; 17416 } 17417 17418 /* 17419 * Schedule the START STOP UNIT command, then queue the command 17420 * for a retry. 17421 * 17422 * Note: A timeout is not scheduled for this retry because we 17423 * want the retry to be serial with the START_STOP_UNIT. The 17424 * retry will be started when the START_STOP_UNIT is completed 17425 * in sd_start_stop_unit_task. 17426 */ 17427 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17428 un, SD_BSY_TIMEOUT / 2); 17429 xp->xb_retry_count++; 17430 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17431 return; 17432 17433 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17434 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17435 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17436 "unit does not respond to selection\n"); 17437 } 17438 break; 17439 17440 case 0x3A: /* MEDIUM NOT PRESENT */ 17441 if (sd_error_level >= SCSI_ERR_FATAL) { 17442 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17443 "Caddy not inserted in drive\n"); 17444 } 17445 17446 sr_ejected(un); 17447 un->un_mediastate = DKIO_EJECTED; 17448 /* The state has changed, inform the media watch routines */ 17449 cv_broadcast(&un->un_state_cv); 17450 /* Just fail if no media is present in the drive. */ 17451 goto fail_command; 17452 17453 default: 17454 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17455 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17456 "Unit not Ready. Additional sense code 0x%x\n", 17457 asc); 17458 } 17459 break; 17460 } 17461 17462 do_retry: 17463 17464 /* 17465 * Retry the command, as some targets may report NOT READY for 17466 * several seconds after being reset. 17467 */ 17468 xp->xb_retry_count++; 17469 si.ssi_severity = SCSI_ERR_RETRYABLE; 17470 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17471 &si, EIO, SD_BSY_TIMEOUT, NULL); 17472 17473 return; 17474 17475 fail_command: 17476 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17477 sd_return_failed_command(un, bp, EIO); 17478 } 17479 17480 17481 17482 /* 17483 * Function: sd_sense_key_medium_or_hardware_error 17484 * 17485 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17486 * sense key. 17487 * 17488 * Context: May be called from interrupt context 17489 */ 17490 17491 static void 17492 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17493 int sense_key, uint8_t asc, 17494 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17495 { 17496 struct sd_sense_info si; 17497 17498 ASSERT(un != NULL); 17499 ASSERT(mutex_owned(SD_MUTEX(un))); 17500 ASSERT(bp != NULL); 17501 ASSERT(xp != NULL); 17502 ASSERT(pktp != NULL); 17503 17504 si.ssi_severity = SCSI_ERR_FATAL; 17505 si.ssi_pfa_flag = FALSE; 17506 17507 if (sense_key == KEY_MEDIUM_ERROR) { 17508 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17509 } 17510 17511 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17512 17513 if ((un->un_reset_retry_count != 0) && 17514 (xp->xb_retry_count == un->un_reset_retry_count)) { 17515 mutex_exit(SD_MUTEX(un)); 17516 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17517 if (un->un_f_allow_bus_device_reset == TRUE) { 17518 17519 boolean_t try_resetting_target = B_TRUE; 17520 17521 /* 17522 * We need to be able to handle specific ASC when we are 17523 * handling a KEY_HARDWARE_ERROR. In particular 17524 * taking the default action of resetting the target may 17525 * not be the appropriate way to attempt recovery. 17526 * Resetting a target because of a single LUN failure 17527 * victimizes all LUNs on that target. 17528 * 17529 * This is true for the LSI arrays, if an LSI 17530 * array controller returns an ASC of 0x84 (LUN Dead) we 17531 * should trust it. 17532 */ 17533 17534 if (sense_key == KEY_HARDWARE_ERROR) { 17535 switch (asc) { 17536 case 0x84: 17537 if (SD_IS_LSI(un)) { 17538 try_resetting_target = B_FALSE; 17539 } 17540 break; 17541 default: 17542 break; 17543 } 17544 } 17545 17546 if (try_resetting_target == B_TRUE) { 17547 int reset_retval = 0; 17548 if (un->un_f_lun_reset_enabled == TRUE) { 17549 SD_TRACE(SD_LOG_IO_CORE, un, 17550 "sd_sense_key_medium_or_hardware_" 17551 "error: issuing RESET_LUN\n"); 17552 reset_retval = 17553 scsi_reset(SD_ADDRESS(un), 17554 RESET_LUN); 17555 } 17556 if (reset_retval == 0) { 17557 SD_TRACE(SD_LOG_IO_CORE, un, 17558 "sd_sense_key_medium_or_hardware_" 17559 "error: issuing RESET_TARGET\n"); 17560 (void) scsi_reset(SD_ADDRESS(un), 17561 RESET_TARGET); 17562 } 17563 } 17564 } 17565 mutex_enter(SD_MUTEX(un)); 17566 } 17567 17568 /* 17569 * This really ought to be a fatal error, but we will retry anyway 17570 * as some drives report this as a spurious error. 17571 */ 17572 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17573 &si, EIO, (clock_t)0, NULL); 17574 } 17575 17576 17577 17578 /* 17579 * Function: sd_sense_key_illegal_request 17580 * 17581 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17582 * 17583 * Context: May be called from interrupt context 17584 */ 17585 17586 static void 17587 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17588 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17589 { 17590 struct sd_sense_info si; 17591 17592 ASSERT(un != NULL); 17593 ASSERT(mutex_owned(SD_MUTEX(un))); 17594 ASSERT(bp != NULL); 17595 ASSERT(xp != NULL); 17596 ASSERT(pktp != NULL); 17597 17598 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17599 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17600 17601 si.ssi_severity = SCSI_ERR_INFO; 17602 si.ssi_pfa_flag = FALSE; 17603 17604 /* Pointless to retry if the target thinks it's an illegal request */ 17605 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17606 sd_return_failed_command(un, bp, EIO); 17607 } 17608 17609 17610 17611 17612 /* 17613 * Function: sd_sense_key_unit_attention 17614 * 17615 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17616 * 17617 * Context: May be called from interrupt context 17618 */ 17619 17620 static void 17621 sd_sense_key_unit_attention(struct sd_lun *un, 17622 uint8_t asc, 17623 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17624 { 17625 /* 17626 * For UNIT ATTENTION we allow retries for one minute. Devices 17627 * like Sonoma can return UNIT ATTENTION close to a minute 17628 * under certain conditions. 17629 */ 17630 int retry_check_flag = SD_RETRIES_UA; 17631 struct sd_sense_info si; 17632 17633 ASSERT(un != NULL); 17634 ASSERT(mutex_owned(SD_MUTEX(un))); 17635 ASSERT(bp != NULL); 17636 ASSERT(xp != NULL); 17637 ASSERT(pktp != NULL); 17638 17639 si.ssi_severity = SCSI_ERR_INFO; 17640 si.ssi_pfa_flag = FALSE; 17641 17642 17643 switch (asc) { 17644 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17645 if (sd_report_pfa != 0) { 17646 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17647 si.ssi_pfa_flag = TRUE; 17648 retry_check_flag = SD_RETRIES_STANDARD; 17649 goto do_retry; 17650 } 17651 break; 17652 17653 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17654 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17655 un->un_resvd_status |= 17656 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17657 } 17658 /* FALLTHRU */ 17659 17660 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17661 if (!ISREMOVABLE(un)) { 17662 break; 17663 } 17664 17665 /* 17666 * When we get a unit attention from a removable-media device, 17667 * it may be in a state that will take a long time to recover 17668 * (e.g., from a reset). Since we are executing in interrupt 17669 * context here, we cannot wait around for the device to come 17670 * back. So hand this command off to sd_media_change_task() 17671 * for deferred processing under taskq thread context. (Note 17672 * that the command still may be failed if a problem is 17673 * encountered at a later time.) 17674 */ 17675 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17676 KM_NOSLEEP) == 0) { 17677 /* 17678 * Cannot dispatch the request so fail the command. 17679 */ 17680 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17681 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17682 si.ssi_severity = SCSI_ERR_FATAL; 17683 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17684 sd_return_failed_command(un, bp, EIO); 17685 } 17686 /* 17687 * Either the command has been successfully dispatched to a 17688 * task Q for retrying, or the dispatch failed. In either case 17689 * do NOT retry again by calling sd_retry_command. This sets up 17690 * two retries of the same command and when one completes and 17691 * frees the resources the other will access freed memory, 17692 * a bad thing. 17693 */ 17694 return; 17695 17696 default: 17697 break; 17698 } 17699 17700 if (!ISREMOVABLE(un)) { 17701 /* 17702 * Do not update these here for removables. For removables 17703 * these stats are updated (1) above if we failed to dispatch 17704 * sd_media_change_task(), or (2) sd_media_change_task() may 17705 * update these later if it encounters an error. 17706 */ 17707 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17708 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17709 } 17710 17711 do_retry: 17712 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17713 EIO, SD_UA_RETRY_DELAY, NULL); 17714 } 17715 17716 17717 17718 /* 17719 * Function: sd_sense_key_fail_command 17720 * 17721 * Description: Use to fail a command when we don't like the sense key that 17722 * was returned. 17723 * 17724 * Context: May be called from interrupt context 17725 */ 17726 17727 static void 17728 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17729 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17730 { 17731 struct sd_sense_info si; 17732 17733 ASSERT(un != NULL); 17734 ASSERT(mutex_owned(SD_MUTEX(un))); 17735 ASSERT(bp != NULL); 17736 ASSERT(xp != NULL); 17737 ASSERT(pktp != NULL); 17738 17739 si.ssi_severity = SCSI_ERR_FATAL; 17740 si.ssi_pfa_flag = FALSE; 17741 17742 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17743 sd_return_failed_command(un, bp, EIO); 17744 } 17745 17746 17747 17748 /* 17749 * Function: sd_sense_key_blank_check 17750 * 17751 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17752 * Has no monetary connotation. 17753 * 17754 * Context: May be called from interrupt context 17755 */ 17756 17757 static void 17758 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17759 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17760 { 17761 struct sd_sense_info si; 17762 17763 ASSERT(un != NULL); 17764 ASSERT(mutex_owned(SD_MUTEX(un))); 17765 ASSERT(bp != NULL); 17766 ASSERT(xp != NULL); 17767 ASSERT(pktp != NULL); 17768 17769 /* 17770 * Blank check is not fatal for removable devices, therefore 17771 * it does not require a console message. 17772 */ 17773 si.ssi_severity = (ISREMOVABLE(un)) ? SCSI_ERR_ALL : SCSI_ERR_FATAL; 17774 si.ssi_pfa_flag = FALSE; 17775 17776 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17777 sd_return_failed_command(un, bp, EIO); 17778 } 17779 17780 17781 17782 17783 /* 17784 * Function: sd_sense_key_aborted_command 17785 * 17786 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17787 * 17788 * Context: May be called from interrupt context 17789 */ 17790 17791 static void 17792 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17793 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17794 { 17795 struct sd_sense_info si; 17796 17797 ASSERT(un != NULL); 17798 ASSERT(mutex_owned(SD_MUTEX(un))); 17799 ASSERT(bp != NULL); 17800 ASSERT(xp != NULL); 17801 ASSERT(pktp != NULL); 17802 17803 si.ssi_severity = SCSI_ERR_FATAL; 17804 si.ssi_pfa_flag = FALSE; 17805 17806 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17807 17808 /* 17809 * This really ought to be a fatal error, but we will retry anyway 17810 * as some drives report this as a spurious error. 17811 */ 17812 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17813 &si, EIO, (clock_t)0, NULL); 17814 } 17815 17816 17817 17818 /* 17819 * Function: sd_sense_key_default 17820 * 17821 * Description: Default recovery action for several SCSI sense keys (basically 17822 * attempts a retry). 17823 * 17824 * Context: May be called from interrupt context 17825 */ 17826 17827 static void 17828 sd_sense_key_default(struct sd_lun *un, 17829 int sense_key, 17830 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17831 { 17832 struct sd_sense_info si; 17833 17834 ASSERT(un != NULL); 17835 ASSERT(mutex_owned(SD_MUTEX(un))); 17836 ASSERT(bp != NULL); 17837 ASSERT(xp != NULL); 17838 ASSERT(pktp != NULL); 17839 17840 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17841 17842 /* 17843 * Undecoded sense key. Attempt retries and hope that will fix 17844 * the problem. Otherwise, we're dead. 17845 */ 17846 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17847 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17848 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17849 } 17850 17851 si.ssi_severity = SCSI_ERR_FATAL; 17852 si.ssi_pfa_flag = FALSE; 17853 17854 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17855 &si, EIO, (clock_t)0, NULL); 17856 } 17857 17858 17859 17860 /* 17861 * Function: sd_print_retry_msg 17862 * 17863 * Description: Print a message indicating the retry action being taken. 17864 * 17865 * Arguments: un - ptr to associated softstate 17866 * bp - ptr to buf(9S) for the command 17867 * arg - not used. 17868 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17869 * or SD_NO_RETRY_ISSUED 17870 * 17871 * Context: May be called from interrupt context 17872 */ 17873 /* ARGSUSED */ 17874 static void 17875 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17876 { 17877 struct sd_xbuf *xp; 17878 struct scsi_pkt *pktp; 17879 char *reasonp; 17880 char *msgp; 17881 17882 ASSERT(un != NULL); 17883 ASSERT(mutex_owned(SD_MUTEX(un))); 17884 ASSERT(bp != NULL); 17885 pktp = SD_GET_PKTP(bp); 17886 ASSERT(pktp != NULL); 17887 xp = SD_GET_XBUF(bp); 17888 ASSERT(xp != NULL); 17889 17890 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17891 mutex_enter(&un->un_pm_mutex); 17892 if ((un->un_state == SD_STATE_SUSPENDED) || 17893 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17894 (pktp->pkt_flags & FLAG_SILENT)) { 17895 mutex_exit(&un->un_pm_mutex); 17896 goto update_pkt_reason; 17897 } 17898 mutex_exit(&un->un_pm_mutex); 17899 17900 /* 17901 * Suppress messages if they are all the same pkt_reason; with 17902 * TQ, many (up to 256) are returned with the same pkt_reason. 17903 * If we are in panic, then suppress the retry messages. 17904 */ 17905 switch (flag) { 17906 case SD_NO_RETRY_ISSUED: 17907 msgp = "giving up"; 17908 break; 17909 case SD_IMMEDIATE_RETRY_ISSUED: 17910 case SD_DELAYED_RETRY_ISSUED: 17911 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17912 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17913 (sd_error_level != SCSI_ERR_ALL))) { 17914 return; 17915 } 17916 msgp = "retrying command"; 17917 break; 17918 default: 17919 goto update_pkt_reason; 17920 } 17921 17922 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17923 scsi_rname(pktp->pkt_reason)); 17924 17925 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17926 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17927 17928 update_pkt_reason: 17929 /* 17930 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17931 * This is to prevent multiple console messages for the same failure 17932 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17933 * when the command is retried successfully because there still may be 17934 * more commands coming back with the same value of pktp->pkt_reason. 17935 */ 17936 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17937 un->un_last_pkt_reason = pktp->pkt_reason; 17938 } 17939 } 17940 17941 17942 /* 17943 * Function: sd_print_cmd_incomplete_msg 17944 * 17945 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17946 * 17947 * Arguments: un - ptr to associated softstate 17948 * bp - ptr to buf(9S) for the command 17949 * arg - passed to sd_print_retry_msg() 17950 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17951 * or SD_NO_RETRY_ISSUED 17952 * 17953 * Context: May be called from interrupt context 17954 */ 17955 17956 static void 17957 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17958 int code) 17959 { 17960 dev_info_t *dip; 17961 17962 ASSERT(un != NULL); 17963 ASSERT(mutex_owned(SD_MUTEX(un))); 17964 ASSERT(bp != NULL); 17965 17966 switch (code) { 17967 case SD_NO_RETRY_ISSUED: 17968 /* Command was failed. Someone turned off this target? */ 17969 if (un->un_state != SD_STATE_OFFLINE) { 17970 /* 17971 * Suppress message if we are detaching and 17972 * device has been disconnected 17973 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17974 * private interface and not part of the DDI 17975 */ 17976 dip = un->un_sd->sd_dev; 17977 if (!(DEVI_IS_DETACHING(dip) && 17978 DEVI_IS_DEVICE_REMOVED(dip))) { 17979 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17980 "disk not responding to selection\n"); 17981 } 17982 New_state(un, SD_STATE_OFFLINE); 17983 } 17984 break; 17985 17986 case SD_DELAYED_RETRY_ISSUED: 17987 case SD_IMMEDIATE_RETRY_ISSUED: 17988 default: 17989 /* Command was successfully queued for retry */ 17990 sd_print_retry_msg(un, bp, arg, code); 17991 break; 17992 } 17993 } 17994 17995 17996 /* 17997 * Function: sd_pkt_reason_cmd_incomplete 17998 * 17999 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 18000 * 18001 * Context: May be called from interrupt context 18002 */ 18003 18004 static void 18005 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 18006 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18007 { 18008 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 18009 18010 ASSERT(un != NULL); 18011 ASSERT(mutex_owned(SD_MUTEX(un))); 18012 ASSERT(bp != NULL); 18013 ASSERT(xp != NULL); 18014 ASSERT(pktp != NULL); 18015 18016 /* Do not do a reset if selection did not complete */ 18017 /* Note: Should this not just check the bit? */ 18018 if (pktp->pkt_state != STATE_GOT_BUS) { 18019 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18020 sd_reset_target(un, pktp); 18021 } 18022 18023 /* 18024 * If the target was not successfully selected, then set 18025 * SD_RETRIES_FAILFAST to indicate that we lost communication 18026 * with the target, and further retries and/or commands are 18027 * likely to take a long time. 18028 */ 18029 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 18030 flag |= SD_RETRIES_FAILFAST; 18031 } 18032 18033 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18034 18035 sd_retry_command(un, bp, flag, 18036 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18037 } 18038 18039 18040 18041 /* 18042 * Function: sd_pkt_reason_cmd_tran_err 18043 * 18044 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 18045 * 18046 * Context: May be called from interrupt context 18047 */ 18048 18049 static void 18050 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 18051 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18052 { 18053 ASSERT(un != NULL); 18054 ASSERT(mutex_owned(SD_MUTEX(un))); 18055 ASSERT(bp != NULL); 18056 ASSERT(xp != NULL); 18057 ASSERT(pktp != NULL); 18058 18059 /* 18060 * Do not reset if we got a parity error, or if 18061 * selection did not complete. 18062 */ 18063 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18064 /* Note: Should this not just check the bit for pkt_state? */ 18065 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 18066 (pktp->pkt_state != STATE_GOT_BUS)) { 18067 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18068 sd_reset_target(un, pktp); 18069 } 18070 18071 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18072 18073 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18074 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18075 } 18076 18077 18078 18079 /* 18080 * Function: sd_pkt_reason_cmd_reset 18081 * 18082 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 18083 * 18084 * Context: May be called from interrupt context 18085 */ 18086 18087 static void 18088 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 18089 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18090 { 18091 ASSERT(un != NULL); 18092 ASSERT(mutex_owned(SD_MUTEX(un))); 18093 ASSERT(bp != NULL); 18094 ASSERT(xp != NULL); 18095 ASSERT(pktp != NULL); 18096 18097 /* The target may still be running the command, so try to reset. */ 18098 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18099 sd_reset_target(un, pktp); 18100 18101 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18102 18103 /* 18104 * If pkt_reason is CMD_RESET chances are that this pkt got 18105 * reset because another target on this bus caused it. The target 18106 * that caused it should get CMD_TIMEOUT with pkt_statistics 18107 * of STAT_TIMEOUT/STAT_DEV_RESET. 18108 */ 18109 18110 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18111 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18112 } 18113 18114 18115 18116 18117 /* 18118 * Function: sd_pkt_reason_cmd_aborted 18119 * 18120 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18121 * 18122 * Context: May be called from interrupt context 18123 */ 18124 18125 static void 18126 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18127 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18128 { 18129 ASSERT(un != NULL); 18130 ASSERT(mutex_owned(SD_MUTEX(un))); 18131 ASSERT(bp != NULL); 18132 ASSERT(xp != NULL); 18133 ASSERT(pktp != NULL); 18134 18135 /* The target may still be running the command, so try to reset. */ 18136 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18137 sd_reset_target(un, pktp); 18138 18139 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18140 18141 /* 18142 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18143 * aborted because another target on this bus caused it. The target 18144 * that caused it should get CMD_TIMEOUT with pkt_statistics 18145 * of STAT_TIMEOUT/STAT_DEV_RESET. 18146 */ 18147 18148 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18149 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18150 } 18151 18152 18153 18154 /* 18155 * Function: sd_pkt_reason_cmd_timeout 18156 * 18157 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18158 * 18159 * Context: May be called from interrupt context 18160 */ 18161 18162 static void 18163 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18164 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18165 { 18166 ASSERT(un != NULL); 18167 ASSERT(mutex_owned(SD_MUTEX(un))); 18168 ASSERT(bp != NULL); 18169 ASSERT(xp != NULL); 18170 ASSERT(pktp != NULL); 18171 18172 18173 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18174 sd_reset_target(un, pktp); 18175 18176 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18177 18178 /* 18179 * A command timeout indicates that we could not establish 18180 * communication with the target, so set SD_RETRIES_FAILFAST 18181 * as further retries/commands are likely to take a long time. 18182 */ 18183 sd_retry_command(un, bp, 18184 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18185 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18186 } 18187 18188 18189 18190 /* 18191 * Function: sd_pkt_reason_cmd_unx_bus_free 18192 * 18193 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18194 * 18195 * Context: May be called from interrupt context 18196 */ 18197 18198 static void 18199 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18200 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18201 { 18202 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18203 18204 ASSERT(un != NULL); 18205 ASSERT(mutex_owned(SD_MUTEX(un))); 18206 ASSERT(bp != NULL); 18207 ASSERT(xp != NULL); 18208 ASSERT(pktp != NULL); 18209 18210 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18211 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18212 18213 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18214 sd_print_retry_msg : NULL; 18215 18216 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18217 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18218 } 18219 18220 18221 /* 18222 * Function: sd_pkt_reason_cmd_tag_reject 18223 * 18224 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18225 * 18226 * Context: May be called from interrupt context 18227 */ 18228 18229 static void 18230 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18231 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18232 { 18233 ASSERT(un != NULL); 18234 ASSERT(mutex_owned(SD_MUTEX(un))); 18235 ASSERT(bp != NULL); 18236 ASSERT(xp != NULL); 18237 ASSERT(pktp != NULL); 18238 18239 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18240 pktp->pkt_flags = 0; 18241 un->un_tagflags = 0; 18242 if (un->un_f_opt_queueing == TRUE) { 18243 un->un_throttle = min(un->un_throttle, 3); 18244 } else { 18245 un->un_throttle = 1; 18246 } 18247 mutex_exit(SD_MUTEX(un)); 18248 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18249 mutex_enter(SD_MUTEX(un)); 18250 18251 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18252 18253 /* Legacy behavior not to check retry counts here. */ 18254 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18255 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18256 } 18257 18258 18259 /* 18260 * Function: sd_pkt_reason_default 18261 * 18262 * Description: Default recovery actions for SCSA pkt_reason values that 18263 * do not have more explicit recovery actions. 18264 * 18265 * Context: May be called from interrupt context 18266 */ 18267 18268 static void 18269 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18270 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18271 { 18272 ASSERT(un != NULL); 18273 ASSERT(mutex_owned(SD_MUTEX(un))); 18274 ASSERT(bp != NULL); 18275 ASSERT(xp != NULL); 18276 ASSERT(pktp != NULL); 18277 18278 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18279 sd_reset_target(un, pktp); 18280 18281 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18282 18283 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18284 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18285 } 18286 18287 18288 18289 /* 18290 * Function: sd_pkt_status_check_condition 18291 * 18292 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18293 * 18294 * Context: May be called from interrupt context 18295 */ 18296 18297 static void 18298 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18299 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18300 { 18301 ASSERT(un != NULL); 18302 ASSERT(mutex_owned(SD_MUTEX(un))); 18303 ASSERT(bp != NULL); 18304 ASSERT(xp != NULL); 18305 ASSERT(pktp != NULL); 18306 18307 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18308 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18309 18310 /* 18311 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18312 * command will be retried after the request sense). Otherwise, retry 18313 * the command. Note: we are issuing the request sense even though the 18314 * retry limit may have been reached for the failed command. 18315 */ 18316 if (un->un_f_arq_enabled == FALSE) { 18317 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18318 "no ARQ, sending request sense command\n"); 18319 sd_send_request_sense_command(un, bp, pktp); 18320 } else { 18321 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18322 "ARQ,retrying request sense command\n"); 18323 #if defined(__i386) || defined(__amd64) 18324 /* 18325 * The SD_RETRY_DELAY value need to be adjusted here 18326 * when SD_RETRY_DELAY change in sddef.h 18327 */ 18328 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 0, 18329 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18330 NULL); 18331 #else 18332 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18333 0, SD_RETRY_DELAY, NULL); 18334 #endif 18335 } 18336 18337 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18338 } 18339 18340 18341 /* 18342 * Function: sd_pkt_status_busy 18343 * 18344 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18345 * 18346 * Context: May be called from interrupt context 18347 */ 18348 18349 static void 18350 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18351 struct scsi_pkt *pktp) 18352 { 18353 ASSERT(un != NULL); 18354 ASSERT(mutex_owned(SD_MUTEX(un))); 18355 ASSERT(bp != NULL); 18356 ASSERT(xp != NULL); 18357 ASSERT(pktp != NULL); 18358 18359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18360 "sd_pkt_status_busy: entry\n"); 18361 18362 /* If retries are exhausted, just fail the command. */ 18363 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18365 "device busy too long\n"); 18366 sd_return_failed_command(un, bp, EIO); 18367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18368 "sd_pkt_status_busy: exit\n"); 18369 return; 18370 } 18371 xp->xb_retry_count++; 18372 18373 /* 18374 * Try to reset the target. However, we do not want to perform 18375 * more than one reset if the device continues to fail. The reset 18376 * will be performed when the retry count reaches the reset 18377 * threshold. This threshold should be set such that at least 18378 * one retry is issued before the reset is performed. 18379 */ 18380 if (xp->xb_retry_count == 18381 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18382 int rval = 0; 18383 mutex_exit(SD_MUTEX(un)); 18384 if (un->un_f_allow_bus_device_reset == TRUE) { 18385 /* 18386 * First try to reset the LUN; if we cannot then 18387 * try to reset the target. 18388 */ 18389 if (un->un_f_lun_reset_enabled == TRUE) { 18390 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18391 "sd_pkt_status_busy: RESET_LUN\n"); 18392 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18393 } 18394 if (rval == 0) { 18395 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18396 "sd_pkt_status_busy: RESET_TARGET\n"); 18397 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18398 } 18399 } 18400 if (rval == 0) { 18401 /* 18402 * If the RESET_LUN and/or RESET_TARGET failed, 18403 * try RESET_ALL 18404 */ 18405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18406 "sd_pkt_status_busy: RESET_ALL\n"); 18407 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18408 } 18409 mutex_enter(SD_MUTEX(un)); 18410 if (rval == 0) { 18411 /* 18412 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18413 * At this point we give up & fail the command. 18414 */ 18415 sd_return_failed_command(un, bp, EIO); 18416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18417 "sd_pkt_status_busy: exit (failed cmd)\n"); 18418 return; 18419 } 18420 } 18421 18422 /* 18423 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18424 * we have already checked the retry counts above. 18425 */ 18426 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18427 EIO, SD_BSY_TIMEOUT, NULL); 18428 18429 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18430 "sd_pkt_status_busy: exit\n"); 18431 } 18432 18433 18434 /* 18435 * Function: sd_pkt_status_reservation_conflict 18436 * 18437 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18438 * command status. 18439 * 18440 * Context: May be called from interrupt context 18441 */ 18442 18443 static void 18444 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18445 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18446 { 18447 ASSERT(un != NULL); 18448 ASSERT(mutex_owned(SD_MUTEX(un))); 18449 ASSERT(bp != NULL); 18450 ASSERT(xp != NULL); 18451 ASSERT(pktp != NULL); 18452 18453 /* 18454 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18455 * conflict could be due to various reasons like incorrect keys, not 18456 * registered or not reserved etc. So, we return EACCES to the caller. 18457 */ 18458 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18459 int cmd = SD_GET_PKT_OPCODE(pktp); 18460 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18461 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18462 sd_return_failed_command(un, bp, EACCES); 18463 return; 18464 } 18465 } 18466 18467 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18468 18469 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18470 if (sd_failfast_enable != 0) { 18471 /* By definition, we must panic here.... */ 18472 panic("Reservation Conflict"); 18473 /*NOTREACHED*/ 18474 } 18475 SD_ERROR(SD_LOG_IO, un, 18476 "sd_handle_resv_conflict: Disk Reserved\n"); 18477 sd_return_failed_command(un, bp, EACCES); 18478 return; 18479 } 18480 18481 /* 18482 * 1147670: retry only if sd_retry_on_reservation_conflict 18483 * property is set (default is 1). Retries will not succeed 18484 * on a disk reserved by another initiator. HA systems 18485 * may reset this via sd.conf to avoid these retries. 18486 * 18487 * Note: The legacy return code for this failure is EIO, however EACCES 18488 * seems more appropriate for a reservation conflict. 18489 */ 18490 if (sd_retry_on_reservation_conflict == 0) { 18491 SD_ERROR(SD_LOG_IO, un, 18492 "sd_handle_resv_conflict: Device Reserved\n"); 18493 sd_return_failed_command(un, bp, EIO); 18494 return; 18495 } 18496 18497 /* 18498 * Retry the command if we can. 18499 * 18500 * Note: The legacy return code for this failure is EIO, however EACCES 18501 * seems more appropriate for a reservation conflict. 18502 */ 18503 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18504 (clock_t)2, NULL); 18505 } 18506 18507 18508 18509 /* 18510 * Function: sd_pkt_status_qfull 18511 * 18512 * Description: Handle a QUEUE FULL condition from the target. This can 18513 * occur if the HBA does not handle the queue full condition. 18514 * (Basically this means third-party HBAs as Sun HBAs will 18515 * handle the queue full condition.) Note that if there are 18516 * some commands already in the transport, then the queue full 18517 * has occurred because the queue for this nexus is actually 18518 * full. If there are no commands in the transport, then the 18519 * queue full is resulting from some other initiator or lun 18520 * consuming all the resources at the target. 18521 * 18522 * Context: May be called from interrupt context 18523 */ 18524 18525 static void 18526 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18527 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18528 { 18529 ASSERT(un != NULL); 18530 ASSERT(mutex_owned(SD_MUTEX(un))); 18531 ASSERT(bp != NULL); 18532 ASSERT(xp != NULL); 18533 ASSERT(pktp != NULL); 18534 18535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18536 "sd_pkt_status_qfull: entry\n"); 18537 18538 /* 18539 * Just lower the QFULL throttle and retry the command. Note that 18540 * we do not limit the number of retries here. 18541 */ 18542 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18543 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18544 SD_RESTART_TIMEOUT, NULL); 18545 18546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18547 "sd_pkt_status_qfull: exit\n"); 18548 } 18549 18550 18551 /* 18552 * Function: sd_reset_target 18553 * 18554 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18555 * RESET_TARGET, or RESET_ALL. 18556 * 18557 * Context: May be called under interrupt context. 18558 */ 18559 18560 static void 18561 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18562 { 18563 int rval = 0; 18564 18565 ASSERT(un != NULL); 18566 ASSERT(mutex_owned(SD_MUTEX(un))); 18567 ASSERT(pktp != NULL); 18568 18569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18570 18571 /* 18572 * No need to reset if the transport layer has already done so. 18573 */ 18574 if ((pktp->pkt_statistics & 18575 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18576 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18577 "sd_reset_target: no reset\n"); 18578 return; 18579 } 18580 18581 mutex_exit(SD_MUTEX(un)); 18582 18583 if (un->un_f_allow_bus_device_reset == TRUE) { 18584 if (un->un_f_lun_reset_enabled == TRUE) { 18585 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18586 "sd_reset_target: RESET_LUN\n"); 18587 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18588 } 18589 if (rval == 0) { 18590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18591 "sd_reset_target: RESET_TARGET\n"); 18592 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18593 } 18594 } 18595 18596 if (rval == 0) { 18597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18598 "sd_reset_target: RESET_ALL\n"); 18599 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18600 } 18601 18602 mutex_enter(SD_MUTEX(un)); 18603 18604 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18605 } 18606 18607 18608 /* 18609 * Function: sd_media_change_task 18610 * 18611 * Description: Recovery action for CDROM to become available. 18612 * 18613 * Context: Executes in a taskq() thread context 18614 */ 18615 18616 static void 18617 sd_media_change_task(void *arg) 18618 { 18619 struct scsi_pkt *pktp = arg; 18620 struct sd_lun *un; 18621 struct buf *bp; 18622 struct sd_xbuf *xp; 18623 int err = 0; 18624 int retry_count = 0; 18625 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18626 struct sd_sense_info si; 18627 18628 ASSERT(pktp != NULL); 18629 bp = (struct buf *)pktp->pkt_private; 18630 ASSERT(bp != NULL); 18631 xp = SD_GET_XBUF(bp); 18632 ASSERT(xp != NULL); 18633 un = SD_GET_UN(bp); 18634 ASSERT(un != NULL); 18635 ASSERT(!mutex_owned(SD_MUTEX(un))); 18636 ASSERT(ISREMOVABLE(un)); 18637 18638 si.ssi_severity = SCSI_ERR_INFO; 18639 si.ssi_pfa_flag = FALSE; 18640 18641 /* 18642 * When a reset is issued on a CDROM, it takes a long time to 18643 * recover. First few attempts to read capacity and other things 18644 * related to handling unit attention fail (with a ASC 0x4 and 18645 * ASCQ 0x1). In that case we want to do enough retries and we want 18646 * to limit the retries in other cases of genuine failures like 18647 * no media in drive. 18648 */ 18649 while (retry_count++ < retry_limit) { 18650 if ((err = sd_handle_mchange(un)) == 0) { 18651 break; 18652 } 18653 if (err == EAGAIN) { 18654 retry_limit = SD_UNIT_ATTENTION_RETRY; 18655 } 18656 /* Sleep for 0.5 sec. & try again */ 18657 delay(drv_usectohz(500000)); 18658 } 18659 18660 /* 18661 * Dispatch (retry or fail) the original command here, 18662 * along with appropriate console messages.... 18663 * 18664 * Must grab the mutex before calling sd_retry_command, 18665 * sd_print_sense_msg and sd_return_failed_command. 18666 */ 18667 mutex_enter(SD_MUTEX(un)); 18668 if (err != SD_CMD_SUCCESS) { 18669 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18670 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18671 si.ssi_severity = SCSI_ERR_FATAL; 18672 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18673 sd_return_failed_command(un, bp, EIO); 18674 } else { 18675 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18676 &si, EIO, (clock_t)0, NULL); 18677 } 18678 mutex_exit(SD_MUTEX(un)); 18679 } 18680 18681 18682 18683 /* 18684 * Function: sd_handle_mchange 18685 * 18686 * Description: Perform geometry validation & other recovery when CDROM 18687 * has been removed from drive. 18688 * 18689 * Return Code: 0 for success 18690 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18691 * sd_send_scsi_READ_CAPACITY() 18692 * 18693 * Context: Executes in a taskq() thread context 18694 */ 18695 18696 static int 18697 sd_handle_mchange(struct sd_lun *un) 18698 { 18699 uint64_t capacity; 18700 uint32_t lbasize; 18701 int rval; 18702 18703 ASSERT(!mutex_owned(SD_MUTEX(un))); 18704 ASSERT(ISREMOVABLE(un)); 18705 18706 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 18707 SD_PATH_DIRECT_PRIORITY)) != 0) { 18708 return (rval); 18709 } 18710 18711 mutex_enter(SD_MUTEX(un)); 18712 sd_update_block_info(un, lbasize, capacity); 18713 18714 if (un->un_errstats != NULL) { 18715 struct sd_errstats *stp = 18716 (struct sd_errstats *)un->un_errstats->ks_data; 18717 stp->sd_capacity.value.ui64 = (uint64_t) 18718 ((uint64_t)un->un_blockcount * 18719 (uint64_t)un->un_tgt_blocksize); 18720 } 18721 18722 /* 18723 * Note: Maybe let the strategy/partitioning chain worry about getting 18724 * valid geometry. 18725 */ 18726 un->un_f_geometry_is_valid = FALSE; 18727 (void) sd_validate_geometry(un, SD_PATH_DIRECT_PRIORITY); 18728 if (un->un_f_geometry_is_valid == FALSE) { 18729 mutex_exit(SD_MUTEX(un)); 18730 return (EIO); 18731 } 18732 18733 mutex_exit(SD_MUTEX(un)); 18734 18735 /* 18736 * Try to lock the door 18737 */ 18738 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18739 SD_PATH_DIRECT_PRIORITY)); 18740 } 18741 18742 18743 /* 18744 * Function: sd_send_scsi_DOORLOCK 18745 * 18746 * Description: Issue the scsi DOOR LOCK command 18747 * 18748 * Arguments: un - pointer to driver soft state (unit) structure for 18749 * this target. 18750 * flag - SD_REMOVAL_ALLOW 18751 * SD_REMOVAL_PREVENT 18752 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18753 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18754 * to use the USCSI "direct" chain and bypass the normal 18755 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18756 * command is issued as part of an error recovery action. 18757 * 18758 * Return Code: 0 - Success 18759 * errno return code from sd_send_scsi_cmd() 18760 * 18761 * Context: Can sleep. 18762 */ 18763 18764 static int 18765 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18766 { 18767 union scsi_cdb cdb; 18768 struct uscsi_cmd ucmd_buf; 18769 struct scsi_extended_sense sense_buf; 18770 int status; 18771 18772 ASSERT(un != NULL); 18773 ASSERT(!mutex_owned(SD_MUTEX(un))); 18774 18775 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18776 18777 /* already determined doorlock is not supported, fake success */ 18778 if (un->un_f_doorlock_supported == FALSE) { 18779 return (0); 18780 } 18781 18782 bzero(&cdb, sizeof (cdb)); 18783 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18784 18785 cdb.scc_cmd = SCMD_DOORLOCK; 18786 cdb.cdb_opaque[4] = (uchar_t)flag; 18787 18788 ucmd_buf.uscsi_cdb = (char *)&cdb; 18789 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18790 ucmd_buf.uscsi_bufaddr = NULL; 18791 ucmd_buf.uscsi_buflen = 0; 18792 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18793 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18794 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18795 ucmd_buf.uscsi_timeout = 15; 18796 18797 SD_TRACE(SD_LOG_IO, un, 18798 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18799 18800 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18801 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18802 18803 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18804 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18805 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 18806 /* fake success and skip subsequent doorlock commands */ 18807 un->un_f_doorlock_supported = FALSE; 18808 return (0); 18809 } 18810 18811 return (status); 18812 } 18813 18814 18815 /* 18816 * Function: sd_send_scsi_READ_CAPACITY 18817 * 18818 * Description: This routine uses the scsi READ CAPACITY command to determine 18819 * the device capacity in number of blocks and the device native 18820 * block size. If this function returns a failure, then the 18821 * values in *capp and *lbap are undefined. If the capacity 18822 * returned is 0xffffffff then the lun is too large for a 18823 * normal READ CAPACITY command and the results of a 18824 * READ CAPACITY 16 will be used instead. 18825 * 18826 * Arguments: un - ptr to soft state struct for the target 18827 * capp - ptr to unsigned 64-bit variable to receive the 18828 * capacity value from the command. 18829 * lbap - ptr to unsigned 32-bit varaible to receive the 18830 * block size value from the command 18831 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18832 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18833 * to use the USCSI "direct" chain and bypass the normal 18834 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18835 * command is issued as part of an error recovery action. 18836 * 18837 * Return Code: 0 - Success 18838 * EIO - IO error 18839 * EACCES - Reservation conflict detected 18840 * EAGAIN - Device is becoming ready 18841 * errno return code from sd_send_scsi_cmd() 18842 * 18843 * Context: Can sleep. Blocks until command completes. 18844 */ 18845 18846 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18847 18848 static int 18849 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18850 int path_flag) 18851 { 18852 struct scsi_extended_sense sense_buf; 18853 struct uscsi_cmd ucmd_buf; 18854 union scsi_cdb cdb; 18855 uint32_t *capacity_buf; 18856 uint64_t capacity; 18857 uint32_t lbasize; 18858 int status; 18859 18860 ASSERT(un != NULL); 18861 ASSERT(!mutex_owned(SD_MUTEX(un))); 18862 ASSERT(capp != NULL); 18863 ASSERT(lbap != NULL); 18864 18865 SD_TRACE(SD_LOG_IO, un, 18866 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18867 18868 /* 18869 * First send a READ_CAPACITY command to the target. 18870 * (This command is mandatory under SCSI-2.) 18871 * 18872 * Set up the CDB for the READ_CAPACITY command. The Partial 18873 * Medium Indicator bit is cleared. The address field must be 18874 * zero if the PMI bit is zero. 18875 */ 18876 bzero(&cdb, sizeof (cdb)); 18877 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18878 18879 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18880 18881 cdb.scc_cmd = SCMD_READ_CAPACITY; 18882 18883 ucmd_buf.uscsi_cdb = (char *)&cdb; 18884 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18885 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18886 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18887 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18888 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18889 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18890 ucmd_buf.uscsi_timeout = 60; 18891 18892 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18893 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18894 18895 switch (status) { 18896 case 0: 18897 /* Return failure if we did not get valid capacity data. */ 18898 if (ucmd_buf.uscsi_resid != 0) { 18899 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18900 return (EIO); 18901 } 18902 18903 /* 18904 * Read capacity and block size from the READ CAPACITY 10 data. 18905 * This data may be adjusted later due to device specific 18906 * issues. 18907 * 18908 * According to the SCSI spec, the READ CAPACITY 10 18909 * command returns the following: 18910 * 18911 * bytes 0-3: Maximum logical block address available. 18912 * (MSB in byte:0 & LSB in byte:3) 18913 * 18914 * bytes 4-7: Block length in bytes 18915 * (MSB in byte:4 & LSB in byte:7) 18916 * 18917 */ 18918 capacity = BE_32(capacity_buf[0]); 18919 lbasize = BE_32(capacity_buf[1]); 18920 18921 /* 18922 * Done with capacity_buf 18923 */ 18924 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18925 18926 /* 18927 * if the reported capacity is set to all 0xf's, then 18928 * this disk is too large and requires SBC-2 commands. 18929 * Reissue the request using READ CAPACITY 16. 18930 */ 18931 if (capacity == 0xffffffff) { 18932 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18933 &lbasize, path_flag); 18934 if (status != 0) { 18935 return (status); 18936 } 18937 } 18938 break; /* Success! */ 18939 case EIO: 18940 switch (ucmd_buf.uscsi_status) { 18941 case STATUS_RESERVATION_CONFLICT: 18942 status = EACCES; 18943 break; 18944 case STATUS_CHECK: 18945 /* 18946 * Check condition; look for ASC/ASCQ of 0x04/0x01 18947 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18948 */ 18949 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18950 (sense_buf.es_add_code == 0x04) && 18951 (sense_buf.es_qual_code == 0x01)) { 18952 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18953 return (EAGAIN); 18954 } 18955 break; 18956 default: 18957 break; 18958 } 18959 /* FALLTHRU */ 18960 default: 18961 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18962 return (status); 18963 } 18964 18965 /* 18966 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18967 * (2352 and 0 are common) so for these devices always force the value 18968 * to 2048 as required by the ATAPI specs. 18969 */ 18970 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18971 lbasize = 2048; 18972 } 18973 18974 /* 18975 * Get the maximum LBA value from the READ CAPACITY data. 18976 * Here we assume that the Partial Medium Indicator (PMI) bit 18977 * was cleared when issuing the command. This means that the LBA 18978 * returned from the device is the LBA of the last logical block 18979 * on the logical unit. The actual logical block count will be 18980 * this value plus one. 18981 * 18982 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18983 * so scale the capacity value to reflect this. 18984 */ 18985 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18986 18987 #if defined(__i386) || defined(__amd64) 18988 /* 18989 * On x86, compensate for off-by-1 error (number of sectors on 18990 * media) (1175930) 18991 */ 18992 if (!ISREMOVABLE(un) && (lbasize == un->un_sys_blocksize)) { 18993 capacity -= 1; 18994 } 18995 #endif 18996 18997 /* 18998 * Copy the values from the READ CAPACITY command into the space 18999 * provided by the caller. 19000 */ 19001 *capp = capacity; 19002 *lbap = lbasize; 19003 19004 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 19005 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19006 19007 /* 19008 * Both the lbasize and capacity from the device must be nonzero, 19009 * otherwise we assume that the values are not valid and return 19010 * failure to the caller. (4203735) 19011 */ 19012 if ((capacity == 0) || (lbasize == 0)) { 19013 return (EIO); 19014 } 19015 19016 return (0); 19017 } 19018 19019 /* 19020 * Function: sd_send_scsi_READ_CAPACITY_16 19021 * 19022 * Description: This routine uses the scsi READ CAPACITY 16 command to 19023 * determine the device capacity in number of blocks and the 19024 * device native block size. If this function returns a failure, 19025 * then the values in *capp and *lbap are undefined. 19026 * This routine should always be called by 19027 * sd_send_scsi_READ_CAPACITY which will appy any device 19028 * specific adjustments to capacity and lbasize. 19029 * 19030 * Arguments: un - ptr to soft state struct for the target 19031 * capp - ptr to unsigned 64-bit variable to receive the 19032 * capacity value from the command. 19033 * lbap - ptr to unsigned 32-bit varaible to receive the 19034 * block size value from the command 19035 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19036 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19037 * to use the USCSI "direct" chain and bypass the normal 19038 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 19039 * this command is issued as part of an error recovery 19040 * action. 19041 * 19042 * Return Code: 0 - Success 19043 * EIO - IO error 19044 * EACCES - Reservation conflict detected 19045 * EAGAIN - Device is becoming ready 19046 * errno return code from sd_send_scsi_cmd() 19047 * 19048 * Context: Can sleep. Blocks until command completes. 19049 */ 19050 19051 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 19052 19053 static int 19054 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 19055 uint32_t *lbap, int path_flag) 19056 { 19057 struct scsi_extended_sense sense_buf; 19058 struct uscsi_cmd ucmd_buf; 19059 union scsi_cdb cdb; 19060 uint64_t *capacity16_buf; 19061 uint64_t capacity; 19062 uint32_t lbasize; 19063 int status; 19064 19065 ASSERT(un != NULL); 19066 ASSERT(!mutex_owned(SD_MUTEX(un))); 19067 ASSERT(capp != NULL); 19068 ASSERT(lbap != NULL); 19069 19070 SD_TRACE(SD_LOG_IO, un, 19071 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 19072 19073 /* 19074 * First send a READ_CAPACITY_16 command to the target. 19075 * 19076 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 19077 * Medium Indicator bit is cleared. The address field must be 19078 * zero if the PMI bit is zero. 19079 */ 19080 bzero(&cdb, sizeof (cdb)); 19081 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19082 19083 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 19084 19085 ucmd_buf.uscsi_cdb = (char *)&cdb; 19086 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 19087 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 19088 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 19089 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19090 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 19091 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19092 ucmd_buf.uscsi_timeout = 60; 19093 19094 /* 19095 * Read Capacity (16) is a Service Action In command. One 19096 * command byte (0x9E) is overloaded for multiple operations, 19097 * with the second CDB byte specifying the desired operation 19098 */ 19099 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 19100 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 19101 19102 /* 19103 * Fill in allocation length field 19104 */ 19105 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 19106 19107 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19108 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19109 19110 switch (status) { 19111 case 0: 19112 /* Return failure if we did not get valid capacity data. */ 19113 if (ucmd_buf.uscsi_resid > 20) { 19114 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19115 return (EIO); 19116 } 19117 19118 /* 19119 * Read capacity and block size from the READ CAPACITY 10 data. 19120 * This data may be adjusted later due to device specific 19121 * issues. 19122 * 19123 * According to the SCSI spec, the READ CAPACITY 10 19124 * command returns the following: 19125 * 19126 * bytes 0-7: Maximum logical block address available. 19127 * (MSB in byte:0 & LSB in byte:7) 19128 * 19129 * bytes 8-11: Block length in bytes 19130 * (MSB in byte:8 & LSB in byte:11) 19131 * 19132 */ 19133 capacity = BE_64(capacity16_buf[0]); 19134 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19135 19136 /* 19137 * Done with capacity16_buf 19138 */ 19139 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19140 19141 /* 19142 * if the reported capacity is set to all 0xf's, then 19143 * this disk is too large. This could only happen with 19144 * a device that supports LBAs larger than 64 bits which 19145 * are not defined by any current T10 standards. 19146 */ 19147 if (capacity == 0xffffffffffffffff) { 19148 return (EIO); 19149 } 19150 break; /* Success! */ 19151 case EIO: 19152 switch (ucmd_buf.uscsi_status) { 19153 case STATUS_RESERVATION_CONFLICT: 19154 status = EACCES; 19155 break; 19156 case STATUS_CHECK: 19157 /* 19158 * Check condition; look for ASC/ASCQ of 0x04/0x01 19159 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19160 */ 19161 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19162 (sense_buf.es_add_code == 0x04) && 19163 (sense_buf.es_qual_code == 0x01)) { 19164 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19165 return (EAGAIN); 19166 } 19167 break; 19168 default: 19169 break; 19170 } 19171 /* FALLTHRU */ 19172 default: 19173 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19174 return (status); 19175 } 19176 19177 *capp = capacity; 19178 *lbap = lbasize; 19179 19180 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19181 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19182 19183 return (0); 19184 } 19185 19186 19187 /* 19188 * Function: sd_send_scsi_START_STOP_UNIT 19189 * 19190 * Description: Issue a scsi START STOP UNIT command to the target. 19191 * 19192 * Arguments: un - pointer to driver soft state (unit) structure for 19193 * this target. 19194 * flag - SD_TARGET_START 19195 * SD_TARGET_STOP 19196 * SD_TARGET_EJECT 19197 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19198 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19199 * to use the USCSI "direct" chain and bypass the normal 19200 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19201 * command is issued as part of an error recovery action. 19202 * 19203 * Return Code: 0 - Success 19204 * EIO - IO error 19205 * EACCES - Reservation conflict detected 19206 * ENXIO - Not Ready, medium not present 19207 * errno return code from sd_send_scsi_cmd() 19208 * 19209 * Context: Can sleep. 19210 */ 19211 19212 static int 19213 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 19214 { 19215 struct scsi_extended_sense sense_buf; 19216 union scsi_cdb cdb; 19217 struct uscsi_cmd ucmd_buf; 19218 int status; 19219 19220 ASSERT(un != NULL); 19221 ASSERT(!mutex_owned(SD_MUTEX(un))); 19222 19223 SD_TRACE(SD_LOG_IO, un, 19224 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19225 19226 if (ISREMOVABLE(un) && 19227 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19228 (un->un_f_start_stop_supported != TRUE)) { 19229 return (0); 19230 } 19231 19232 bzero(&cdb, sizeof (cdb)); 19233 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19234 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19235 19236 cdb.scc_cmd = SCMD_START_STOP; 19237 cdb.cdb_opaque[4] = (uchar_t)flag; 19238 19239 ucmd_buf.uscsi_cdb = (char *)&cdb; 19240 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19241 ucmd_buf.uscsi_bufaddr = NULL; 19242 ucmd_buf.uscsi_buflen = 0; 19243 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19244 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19245 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19246 ucmd_buf.uscsi_timeout = 200; 19247 19248 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19249 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19250 19251 switch (status) { 19252 case 0: 19253 break; /* Success! */ 19254 case EIO: 19255 switch (ucmd_buf.uscsi_status) { 19256 case STATUS_RESERVATION_CONFLICT: 19257 status = EACCES; 19258 break; 19259 case STATUS_CHECK: 19260 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19261 switch (sense_buf.es_key) { 19262 case KEY_ILLEGAL_REQUEST: 19263 status = ENOTSUP; 19264 break; 19265 case KEY_NOT_READY: 19266 if (sense_buf.es_add_code == 0x3A) { 19267 status = ENXIO; 19268 } 19269 break; 19270 default: 19271 break; 19272 } 19273 } 19274 break; 19275 default: 19276 break; 19277 } 19278 break; 19279 default: 19280 break; 19281 } 19282 19283 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19284 19285 return (status); 19286 } 19287 19288 19289 /* 19290 * Function: sd_start_stop_unit_callback 19291 * 19292 * Description: timeout(9F) callback to begin recovery process for a 19293 * device that has spun down. 19294 * 19295 * Arguments: arg - pointer to associated softstate struct. 19296 * 19297 * Context: Executes in a timeout(9F) thread context 19298 */ 19299 19300 static void 19301 sd_start_stop_unit_callback(void *arg) 19302 { 19303 struct sd_lun *un = arg; 19304 ASSERT(un != NULL); 19305 ASSERT(!mutex_owned(SD_MUTEX(un))); 19306 19307 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19308 19309 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19310 } 19311 19312 19313 /* 19314 * Function: sd_start_stop_unit_task 19315 * 19316 * Description: Recovery procedure when a drive is spun down. 19317 * 19318 * Arguments: arg - pointer to associated softstate struct. 19319 * 19320 * Context: Executes in a taskq() thread context 19321 */ 19322 19323 static void 19324 sd_start_stop_unit_task(void *arg) 19325 { 19326 struct sd_lun *un = arg; 19327 19328 ASSERT(un != NULL); 19329 ASSERT(!mutex_owned(SD_MUTEX(un))); 19330 19331 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19332 19333 /* 19334 * Some unformatted drives report not ready error, no need to 19335 * restart if format has been initiated. 19336 */ 19337 mutex_enter(SD_MUTEX(un)); 19338 if (un->un_f_format_in_progress == TRUE) { 19339 mutex_exit(SD_MUTEX(un)); 19340 return; 19341 } 19342 mutex_exit(SD_MUTEX(un)); 19343 19344 /* 19345 * When a START STOP command is issued from here, it is part of a 19346 * failure recovery operation and must be issued before any other 19347 * commands, including any pending retries. Thus it must be sent 19348 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19349 * succeeds or not, we will start I/O after the attempt. 19350 */ 19351 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19352 SD_PATH_DIRECT_PRIORITY); 19353 19354 /* 19355 * The above call blocks until the START_STOP_UNIT command completes. 19356 * Now that it has completed, we must re-try the original IO that 19357 * received the NOT READY condition in the first place. There are 19358 * three possible conditions here: 19359 * 19360 * (1) The original IO is on un_retry_bp. 19361 * (2) The original IO is on the regular wait queue, and un_retry_bp 19362 * is NULL. 19363 * (3) The original IO is on the regular wait queue, and un_retry_bp 19364 * points to some other, unrelated bp. 19365 * 19366 * For each case, we must call sd_start_cmds() with un_retry_bp 19367 * as the argument. If un_retry_bp is NULL, this will initiate 19368 * processing of the regular wait queue. If un_retry_bp is not NULL, 19369 * then this will process the bp on un_retry_bp. That may or may not 19370 * be the original IO, but that does not matter: the important thing 19371 * is to keep the IO processing going at this point. 19372 * 19373 * Note: This is a very specific error recovery sequence associated 19374 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19375 * serialize the I/O with completion of the spin-up. 19376 */ 19377 mutex_enter(SD_MUTEX(un)); 19378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19379 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19380 un, un->un_retry_bp); 19381 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19382 sd_start_cmds(un, un->un_retry_bp); 19383 mutex_exit(SD_MUTEX(un)); 19384 19385 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19386 } 19387 19388 19389 /* 19390 * Function: sd_send_scsi_INQUIRY 19391 * 19392 * Description: Issue the scsi INQUIRY command. 19393 * 19394 * Arguments: un 19395 * bufaddr 19396 * buflen 19397 * evpd 19398 * page_code 19399 * page_length 19400 * 19401 * Return Code: 0 - Success 19402 * errno return code from sd_send_scsi_cmd() 19403 * 19404 * Context: Can sleep. Does not return until command is completed. 19405 */ 19406 19407 static int 19408 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 19409 uchar_t evpd, uchar_t page_code, size_t *residp) 19410 { 19411 union scsi_cdb cdb; 19412 struct uscsi_cmd ucmd_buf; 19413 int status; 19414 19415 ASSERT(un != NULL); 19416 ASSERT(!mutex_owned(SD_MUTEX(un))); 19417 ASSERT(bufaddr != NULL); 19418 19419 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19420 19421 bzero(&cdb, sizeof (cdb)); 19422 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19423 bzero(bufaddr, buflen); 19424 19425 cdb.scc_cmd = SCMD_INQUIRY; 19426 cdb.cdb_opaque[1] = evpd; 19427 cdb.cdb_opaque[2] = page_code; 19428 FORMG0COUNT(&cdb, buflen); 19429 19430 ucmd_buf.uscsi_cdb = (char *)&cdb; 19431 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19432 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19433 ucmd_buf.uscsi_buflen = buflen; 19434 ucmd_buf.uscsi_rqbuf = NULL; 19435 ucmd_buf.uscsi_rqlen = 0; 19436 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19437 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19438 19439 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19440 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19441 19442 if ((status == 0) && (residp != NULL)) { 19443 *residp = ucmd_buf.uscsi_resid; 19444 } 19445 19446 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19447 19448 return (status); 19449 } 19450 19451 19452 /* 19453 * Function: sd_send_scsi_TEST_UNIT_READY 19454 * 19455 * Description: Issue the scsi TEST UNIT READY command. 19456 * This routine can be told to set the flag USCSI_DIAGNOSE to 19457 * prevent retrying failed commands. Use this when the intent 19458 * is either to check for device readiness, to clear a Unit 19459 * Attention, or to clear any outstanding sense data. 19460 * However under specific conditions the expected behavior 19461 * is for retries to bring a device ready, so use the flag 19462 * with caution. 19463 * 19464 * Arguments: un 19465 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19466 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19467 * 0: dont check for media present, do retries on cmd. 19468 * 19469 * Return Code: 0 - Success 19470 * EIO - IO error 19471 * EACCES - Reservation conflict detected 19472 * ENXIO - Not Ready, medium not present 19473 * errno return code from sd_send_scsi_cmd() 19474 * 19475 * Context: Can sleep. Does not return until command is completed. 19476 */ 19477 19478 static int 19479 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 19480 { 19481 struct scsi_extended_sense sense_buf; 19482 union scsi_cdb cdb; 19483 struct uscsi_cmd ucmd_buf; 19484 int status; 19485 19486 ASSERT(un != NULL); 19487 ASSERT(!mutex_owned(SD_MUTEX(un))); 19488 19489 SD_TRACE(SD_LOG_IO, un, 19490 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19491 19492 /* 19493 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19494 * timeouts when they receive a TUR and the queue is not empty. Check 19495 * the configuration flag set during attach (indicating the drive has 19496 * this firmware bug) and un_ncmds_in_transport before issuing the 19497 * TUR. If there are 19498 * pending commands return success, this is a bit arbitrary but is ok 19499 * for non-removables (i.e. the eliteI disks) and non-clustering 19500 * configurations. 19501 */ 19502 if (un->un_f_cfg_tur_check == TRUE) { 19503 mutex_enter(SD_MUTEX(un)); 19504 if (un->un_ncmds_in_transport != 0) { 19505 mutex_exit(SD_MUTEX(un)); 19506 return (0); 19507 } 19508 mutex_exit(SD_MUTEX(un)); 19509 } 19510 19511 bzero(&cdb, sizeof (cdb)); 19512 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19513 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19514 19515 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19516 19517 ucmd_buf.uscsi_cdb = (char *)&cdb; 19518 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19519 ucmd_buf.uscsi_bufaddr = NULL; 19520 ucmd_buf.uscsi_buflen = 0; 19521 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19522 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19523 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19524 19525 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19526 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19527 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19528 } 19529 ucmd_buf.uscsi_timeout = 60; 19530 19531 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19532 UIO_SYSSPACE, UIO_SYSSPACE, 19533 ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); 19534 19535 switch (status) { 19536 case 0: 19537 break; /* Success! */ 19538 case EIO: 19539 switch (ucmd_buf.uscsi_status) { 19540 case STATUS_RESERVATION_CONFLICT: 19541 status = EACCES; 19542 break; 19543 case STATUS_CHECK: 19544 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19545 break; 19546 } 19547 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19548 (sense_buf.es_key == KEY_NOT_READY) && 19549 (sense_buf.es_add_code == 0x3A)) { 19550 status = ENXIO; 19551 } 19552 break; 19553 default: 19554 break; 19555 } 19556 break; 19557 default: 19558 break; 19559 } 19560 19561 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19562 19563 return (status); 19564 } 19565 19566 19567 /* 19568 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19569 * 19570 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19571 * 19572 * Arguments: un 19573 * 19574 * Return Code: 0 - Success 19575 * EACCES 19576 * ENOTSUP 19577 * errno return code from sd_send_scsi_cmd() 19578 * 19579 * Context: Can sleep. Does not return until command is completed. 19580 */ 19581 19582 static int 19583 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 19584 uint16_t data_len, uchar_t *data_bufp) 19585 { 19586 struct scsi_extended_sense sense_buf; 19587 union scsi_cdb cdb; 19588 struct uscsi_cmd ucmd_buf; 19589 int status; 19590 int no_caller_buf = FALSE; 19591 19592 ASSERT(un != NULL); 19593 ASSERT(!mutex_owned(SD_MUTEX(un))); 19594 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19595 19596 SD_TRACE(SD_LOG_IO, un, 19597 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19598 19599 bzero(&cdb, sizeof (cdb)); 19600 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19601 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19602 if (data_bufp == NULL) { 19603 /* Allocate a default buf if the caller did not give one */ 19604 ASSERT(data_len == 0); 19605 data_len = MHIOC_RESV_KEY_SIZE; 19606 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19607 no_caller_buf = TRUE; 19608 } 19609 19610 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19611 cdb.cdb_opaque[1] = usr_cmd; 19612 FORMG1COUNT(&cdb, data_len); 19613 19614 ucmd_buf.uscsi_cdb = (char *)&cdb; 19615 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19616 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19617 ucmd_buf.uscsi_buflen = data_len; 19618 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19619 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19620 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19621 ucmd_buf.uscsi_timeout = 60; 19622 19623 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19624 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19625 19626 switch (status) { 19627 case 0: 19628 break; /* Success! */ 19629 case EIO: 19630 switch (ucmd_buf.uscsi_status) { 19631 case STATUS_RESERVATION_CONFLICT: 19632 status = EACCES; 19633 break; 19634 case STATUS_CHECK: 19635 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19636 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19637 status = ENOTSUP; 19638 } 19639 break; 19640 default: 19641 break; 19642 } 19643 break; 19644 default: 19645 break; 19646 } 19647 19648 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 19649 19650 if (no_caller_buf == TRUE) { 19651 kmem_free(data_bufp, data_len); 19652 } 19653 19654 return (status); 19655 } 19656 19657 19658 /* 19659 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 19660 * 19661 * Description: This routine is the driver entry point for handling CD-ROM 19662 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 19663 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 19664 * device. 19665 * 19666 * Arguments: un - Pointer to soft state struct for the target. 19667 * usr_cmd SCSI-3 reservation facility command (one of 19668 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 19669 * SD_SCSI3_PREEMPTANDABORT) 19670 * usr_bufp - user provided pointer register, reserve descriptor or 19671 * preempt and abort structure (mhioc_register_t, 19672 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 19673 * 19674 * Return Code: 0 - Success 19675 * EACCES 19676 * ENOTSUP 19677 * errno return code from sd_send_scsi_cmd() 19678 * 19679 * Context: Can sleep. Does not return until command is completed. 19680 */ 19681 19682 static int 19683 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19684 uchar_t *usr_bufp) 19685 { 19686 struct scsi_extended_sense sense_buf; 19687 union scsi_cdb cdb; 19688 struct uscsi_cmd ucmd_buf; 19689 int status; 19690 uchar_t data_len = sizeof (sd_prout_t); 19691 sd_prout_t *prp; 19692 19693 ASSERT(un != NULL); 19694 ASSERT(!mutex_owned(SD_MUTEX(un))); 19695 ASSERT(data_len == 24); /* required by scsi spec */ 19696 19697 SD_TRACE(SD_LOG_IO, un, 19698 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19699 19700 if (usr_bufp == NULL) { 19701 return (EINVAL); 19702 } 19703 19704 bzero(&cdb, sizeof (cdb)); 19705 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19706 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19707 prp = kmem_zalloc(data_len, KM_SLEEP); 19708 19709 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19710 cdb.cdb_opaque[1] = usr_cmd; 19711 FORMG1COUNT(&cdb, data_len); 19712 19713 ucmd_buf.uscsi_cdb = (char *)&cdb; 19714 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19715 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19716 ucmd_buf.uscsi_buflen = data_len; 19717 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19718 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19719 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19720 ucmd_buf.uscsi_timeout = 60; 19721 19722 switch (usr_cmd) { 19723 case SD_SCSI3_REGISTER: { 19724 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19725 19726 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19727 bcopy(ptr->newkey.key, prp->service_key, 19728 MHIOC_RESV_KEY_SIZE); 19729 prp->aptpl = ptr->aptpl; 19730 break; 19731 } 19732 case SD_SCSI3_RESERVE: 19733 case SD_SCSI3_RELEASE: { 19734 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19735 19736 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19737 prp->scope_address = BE_32(ptr->scope_specific_addr); 19738 cdb.cdb_opaque[2] = ptr->type; 19739 break; 19740 } 19741 case SD_SCSI3_PREEMPTANDABORT: { 19742 mhioc_preemptandabort_t *ptr = 19743 (mhioc_preemptandabort_t *)usr_bufp; 19744 19745 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19746 bcopy(ptr->victim_key.key, prp->service_key, 19747 MHIOC_RESV_KEY_SIZE); 19748 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19749 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19750 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19751 break; 19752 } 19753 case SD_SCSI3_REGISTERANDIGNOREKEY: 19754 { 19755 mhioc_registerandignorekey_t *ptr; 19756 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19757 bcopy(ptr->newkey.key, 19758 prp->service_key, MHIOC_RESV_KEY_SIZE); 19759 prp->aptpl = ptr->aptpl; 19760 break; 19761 } 19762 default: 19763 ASSERT(FALSE); 19764 break; 19765 } 19766 19767 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19768 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19769 19770 switch (status) { 19771 case 0: 19772 break; /* Success! */ 19773 case EIO: 19774 switch (ucmd_buf.uscsi_status) { 19775 case STATUS_RESERVATION_CONFLICT: 19776 status = EACCES; 19777 break; 19778 case STATUS_CHECK: 19779 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19780 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19781 status = ENOTSUP; 19782 } 19783 break; 19784 default: 19785 break; 19786 } 19787 break; 19788 default: 19789 break; 19790 } 19791 19792 kmem_free(prp, data_len); 19793 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19794 return (status); 19795 } 19796 19797 19798 /* 19799 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19800 * 19801 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19802 * 19803 * Arguments: un - pointer to the target's soft state struct 19804 * 19805 * Return Code: 0 - success 19806 * errno-type error code 19807 * 19808 * Context: kernel thread context only. 19809 */ 19810 19811 static int 19812 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un) 19813 { 19814 struct scsi_extended_sense sense_buf; 19815 union scsi_cdb cdb; 19816 struct uscsi_cmd ucmd_buf; 19817 int status; 19818 19819 ASSERT(un != NULL); 19820 ASSERT(!mutex_owned(SD_MUTEX(un))); 19821 19822 SD_TRACE(SD_LOG_IO, un, 19823 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19824 19825 bzero(&cdb, sizeof (cdb)); 19826 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19827 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19828 19829 cdb.scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19830 19831 ucmd_buf.uscsi_cdb = (char *)&cdb; 19832 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19833 ucmd_buf.uscsi_bufaddr = NULL; 19834 ucmd_buf.uscsi_buflen = 0; 19835 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19836 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19837 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19838 ucmd_buf.uscsi_timeout = 240; 19839 19840 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19841 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19842 19843 switch (status) { 19844 case 0: 19845 break; /* Success! */ 19846 case EIO: 19847 switch (ucmd_buf.uscsi_status) { 19848 case STATUS_RESERVATION_CONFLICT: 19849 /* Ignore reservation conflict */ 19850 status = 0; 19851 goto done; 19852 19853 case STATUS_CHECK: 19854 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19855 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19856 /* Ignore Illegal Request error */ 19857 status = 0; 19858 goto done; 19859 } 19860 break; 19861 default: 19862 break; 19863 } 19864 /* FALLTHRU */ 19865 default: 19866 /* Ignore error if the media is not present. */ 19867 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 19868 status = 0; 19869 goto done; 19870 } 19871 /* If we reach this, we had an error */ 19872 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19873 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19874 break; 19875 } 19876 19877 done: 19878 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: exit\n"); 19879 19880 return (status); 19881 } 19882 19883 19884 /* 19885 * Function: sd_send_scsi_GET_CONFIGURATION 19886 * 19887 * Description: Issues the get configuration command to the device. 19888 * Called from sd_check_for_writable_cd & sd_get_media_info 19889 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19890 * Arguments: un 19891 * ucmdbuf 19892 * rqbuf 19893 * rqbuflen 19894 * bufaddr 19895 * buflen 19896 * 19897 * Return Code: 0 - Success 19898 * errno return code from sd_send_scsi_cmd() 19899 * 19900 * Context: Can sleep. Does not return until command is completed. 19901 * 19902 */ 19903 19904 static int 19905 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19906 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen) 19907 { 19908 char cdb[CDB_GROUP1]; 19909 int status; 19910 19911 ASSERT(un != NULL); 19912 ASSERT(!mutex_owned(SD_MUTEX(un))); 19913 ASSERT(bufaddr != NULL); 19914 ASSERT(ucmdbuf != NULL); 19915 ASSERT(rqbuf != NULL); 19916 19917 SD_TRACE(SD_LOG_IO, un, 19918 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19919 19920 bzero(cdb, sizeof (cdb)); 19921 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19922 bzero(rqbuf, rqbuflen); 19923 bzero(bufaddr, buflen); 19924 19925 /* 19926 * Set up cdb field for the get configuration command. 19927 */ 19928 cdb[0] = SCMD_GET_CONFIGURATION; 19929 cdb[1] = 0x02; /* Requested Type */ 19930 cdb[8] = SD_PROFILE_HEADER_LEN; 19931 ucmdbuf->uscsi_cdb = cdb; 19932 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19933 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19934 ucmdbuf->uscsi_buflen = buflen; 19935 ucmdbuf->uscsi_timeout = sd_io_time; 19936 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19937 ucmdbuf->uscsi_rqlen = rqbuflen; 19938 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19939 19940 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19941 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19942 19943 switch (status) { 19944 case 0: 19945 break; /* Success! */ 19946 case EIO: 19947 switch (ucmdbuf->uscsi_status) { 19948 case STATUS_RESERVATION_CONFLICT: 19949 status = EACCES; 19950 break; 19951 default: 19952 break; 19953 } 19954 break; 19955 default: 19956 break; 19957 } 19958 19959 if (status == 0) { 19960 SD_DUMP_MEMORY(un, SD_LOG_IO, 19961 "sd_send_scsi_GET_CONFIGURATION: data", 19962 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19963 } 19964 19965 SD_TRACE(SD_LOG_IO, un, 19966 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19967 19968 return (status); 19969 } 19970 19971 /* 19972 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19973 * 19974 * Description: Issues the get configuration command to the device to 19975 * retrieve a specfic feature. Called from 19976 * sd_check_for_writable_cd & sd_set_mmc_caps. 19977 * Arguments: un 19978 * ucmdbuf 19979 * rqbuf 19980 * rqbuflen 19981 * bufaddr 19982 * buflen 19983 * feature 19984 * 19985 * Return Code: 0 - Success 19986 * errno return code from sd_send_scsi_cmd() 19987 * 19988 * Context: Can sleep. Does not return until command is completed. 19989 * 19990 */ 19991 static int 19992 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19993 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19994 uchar_t *bufaddr, uint_t buflen, char feature) 19995 { 19996 char cdb[CDB_GROUP1]; 19997 int status; 19998 19999 ASSERT(un != NULL); 20000 ASSERT(!mutex_owned(SD_MUTEX(un))); 20001 ASSERT(bufaddr != NULL); 20002 ASSERT(ucmdbuf != NULL); 20003 ASSERT(rqbuf != NULL); 20004 20005 SD_TRACE(SD_LOG_IO, un, 20006 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 20007 20008 bzero(cdb, sizeof (cdb)); 20009 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 20010 bzero(rqbuf, rqbuflen); 20011 bzero(bufaddr, buflen); 20012 20013 /* 20014 * Set up cdb field for the get configuration command. 20015 */ 20016 cdb[0] = SCMD_GET_CONFIGURATION; 20017 cdb[1] = 0x02; /* Requested Type */ 20018 cdb[3] = feature; 20019 cdb[8] = buflen; 20020 ucmdbuf->uscsi_cdb = cdb; 20021 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 20022 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 20023 ucmdbuf->uscsi_buflen = buflen; 20024 ucmdbuf->uscsi_timeout = sd_io_time; 20025 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 20026 ucmdbuf->uscsi_rqlen = rqbuflen; 20027 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 20028 20029 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 20030 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 20031 20032 switch (status) { 20033 case 0: 20034 break; /* Success! */ 20035 case EIO: 20036 switch (ucmdbuf->uscsi_status) { 20037 case STATUS_RESERVATION_CONFLICT: 20038 status = EACCES; 20039 break; 20040 default: 20041 break; 20042 } 20043 break; 20044 default: 20045 break; 20046 } 20047 20048 if (status == 0) { 20049 SD_DUMP_MEMORY(un, SD_LOG_IO, 20050 "sd_send_scsi_feature_GET_CONFIGURATION: data", 20051 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 20052 } 20053 20054 SD_TRACE(SD_LOG_IO, un, 20055 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 20056 20057 return (status); 20058 } 20059 20060 20061 /* 20062 * Function: sd_send_scsi_MODE_SENSE 20063 * 20064 * Description: Utility function for issuing a scsi MODE SENSE command. 20065 * Note: This routine uses a consistent implementation for Group0, 20066 * Group1, and Group2 commands across all platforms. ATAPI devices 20067 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20068 * 20069 * Arguments: un - pointer to the softstate struct for the target. 20070 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20071 * CDB_GROUP[1|2] (10 byte). 20072 * bufaddr - buffer for page data retrieved from the target. 20073 * buflen - size of page to be retrieved. 20074 * page_code - page code of data to be retrieved from the target. 20075 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20076 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20077 * to use the USCSI "direct" chain and bypass the normal 20078 * command waitq. 20079 * 20080 * Return Code: 0 - Success 20081 * errno return code from sd_send_scsi_cmd() 20082 * 20083 * Context: Can sleep. Does not return until command is completed. 20084 */ 20085 20086 static int 20087 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20088 size_t buflen, uchar_t page_code, int path_flag) 20089 { 20090 struct scsi_extended_sense sense_buf; 20091 union scsi_cdb cdb; 20092 struct uscsi_cmd ucmd_buf; 20093 int status; 20094 20095 ASSERT(un != NULL); 20096 ASSERT(!mutex_owned(SD_MUTEX(un))); 20097 ASSERT(bufaddr != NULL); 20098 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20099 (cdbsize == CDB_GROUP2)); 20100 20101 SD_TRACE(SD_LOG_IO, un, 20102 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 20103 20104 bzero(&cdb, sizeof (cdb)); 20105 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20106 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20107 bzero(bufaddr, buflen); 20108 20109 if (cdbsize == CDB_GROUP0) { 20110 cdb.scc_cmd = SCMD_MODE_SENSE; 20111 cdb.cdb_opaque[2] = page_code; 20112 FORMG0COUNT(&cdb, buflen); 20113 } else { 20114 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20115 cdb.cdb_opaque[2] = page_code; 20116 FORMG1COUNT(&cdb, buflen); 20117 } 20118 20119 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20120 20121 ucmd_buf.uscsi_cdb = (char *)&cdb; 20122 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20123 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20124 ucmd_buf.uscsi_buflen = buflen; 20125 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20126 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20127 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20128 ucmd_buf.uscsi_timeout = 60; 20129 20130 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20131 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20132 20133 switch (status) { 20134 case 0: 20135 break; /* Success! */ 20136 case EIO: 20137 switch (ucmd_buf.uscsi_status) { 20138 case STATUS_RESERVATION_CONFLICT: 20139 status = EACCES; 20140 break; 20141 default: 20142 break; 20143 } 20144 break; 20145 default: 20146 break; 20147 } 20148 20149 if (status == 0) { 20150 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20151 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20152 } 20153 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20154 20155 return (status); 20156 } 20157 20158 20159 /* 20160 * Function: sd_send_scsi_MODE_SELECT 20161 * 20162 * Description: Utility function for issuing a scsi MODE SELECT command. 20163 * Note: This routine uses a consistent implementation for Group0, 20164 * Group1, and Group2 commands across all platforms. ATAPI devices 20165 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20166 * 20167 * Arguments: un - pointer to the softstate struct for the target. 20168 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20169 * CDB_GROUP[1|2] (10 byte). 20170 * bufaddr - buffer for page data retrieved from the target. 20171 * buflen - size of page to be retrieved. 20172 * save_page - boolean to determin if SP bit should be set. 20173 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20174 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20175 * to use the USCSI "direct" chain and bypass the normal 20176 * command waitq. 20177 * 20178 * Return Code: 0 - Success 20179 * errno return code from sd_send_scsi_cmd() 20180 * 20181 * Context: Can sleep. Does not return until command is completed. 20182 */ 20183 20184 static int 20185 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20186 size_t buflen, uchar_t save_page, int path_flag) 20187 { 20188 struct scsi_extended_sense sense_buf; 20189 union scsi_cdb cdb; 20190 struct uscsi_cmd ucmd_buf; 20191 int status; 20192 20193 ASSERT(un != NULL); 20194 ASSERT(!mutex_owned(SD_MUTEX(un))); 20195 ASSERT(bufaddr != NULL); 20196 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20197 (cdbsize == CDB_GROUP2)); 20198 20199 SD_TRACE(SD_LOG_IO, un, 20200 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20201 20202 bzero(&cdb, sizeof (cdb)); 20203 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20204 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20205 20206 /* Set the PF bit for many third party drives */ 20207 cdb.cdb_opaque[1] = 0x10; 20208 20209 /* Set the savepage(SP) bit if given */ 20210 if (save_page == SD_SAVE_PAGE) { 20211 cdb.cdb_opaque[1] |= 0x01; 20212 } 20213 20214 if (cdbsize == CDB_GROUP0) { 20215 cdb.scc_cmd = SCMD_MODE_SELECT; 20216 FORMG0COUNT(&cdb, buflen); 20217 } else { 20218 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20219 FORMG1COUNT(&cdb, buflen); 20220 } 20221 20222 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20223 20224 ucmd_buf.uscsi_cdb = (char *)&cdb; 20225 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20226 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20227 ucmd_buf.uscsi_buflen = buflen; 20228 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20229 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20230 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20231 ucmd_buf.uscsi_timeout = 60; 20232 20233 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20234 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20235 20236 switch (status) { 20237 case 0: 20238 break; /* Success! */ 20239 case EIO: 20240 switch (ucmd_buf.uscsi_status) { 20241 case STATUS_RESERVATION_CONFLICT: 20242 status = EACCES; 20243 break; 20244 default: 20245 break; 20246 } 20247 break; 20248 default: 20249 break; 20250 } 20251 20252 if (status == 0) { 20253 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20254 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20255 } 20256 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20257 20258 return (status); 20259 } 20260 20261 20262 /* 20263 * Function: sd_send_scsi_RDWR 20264 * 20265 * Description: Issue a scsi READ or WRITE command with the given parameters. 20266 * 20267 * Arguments: un: Pointer to the sd_lun struct for the target. 20268 * cmd: SCMD_READ or SCMD_WRITE 20269 * bufaddr: Address of caller's buffer to receive the RDWR data 20270 * buflen: Length of caller's buffer receive the RDWR data. 20271 * start_block: Block number for the start of the RDWR operation. 20272 * (Assumes target-native block size.) 20273 * residp: Pointer to variable to receive the redisual of the 20274 * RDWR operation (may be NULL of no residual requested). 20275 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20276 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20277 * to use the USCSI "direct" chain and bypass the normal 20278 * command waitq. 20279 * 20280 * Return Code: 0 - Success 20281 * errno return code from sd_send_scsi_cmd() 20282 * 20283 * Context: Can sleep. Does not return until command is completed. 20284 */ 20285 20286 static int 20287 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 20288 size_t buflen, daddr_t start_block, int path_flag) 20289 { 20290 struct scsi_extended_sense sense_buf; 20291 union scsi_cdb cdb; 20292 struct uscsi_cmd ucmd_buf; 20293 uint32_t block_count; 20294 int status; 20295 int cdbsize; 20296 uchar_t flag; 20297 20298 ASSERT(un != NULL); 20299 ASSERT(!mutex_owned(SD_MUTEX(un))); 20300 ASSERT(bufaddr != NULL); 20301 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20302 20303 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20304 20305 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20306 return (EINVAL); 20307 } 20308 20309 mutex_enter(SD_MUTEX(un)); 20310 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20311 mutex_exit(SD_MUTEX(un)); 20312 20313 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20314 20315 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20316 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20317 bufaddr, buflen, start_block, block_count); 20318 20319 bzero(&cdb, sizeof (cdb)); 20320 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20321 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20322 20323 /* Compute CDB size to use */ 20324 if (start_block > 0xffffffff) 20325 cdbsize = CDB_GROUP4; 20326 else if ((start_block & 0xFFE00000) || 20327 (un->un_f_cfg_is_atapi == TRUE)) 20328 cdbsize = CDB_GROUP1; 20329 else 20330 cdbsize = CDB_GROUP0; 20331 20332 switch (cdbsize) { 20333 case CDB_GROUP0: /* 6-byte CDBs */ 20334 cdb.scc_cmd = cmd; 20335 FORMG0ADDR(&cdb, start_block); 20336 FORMG0COUNT(&cdb, block_count); 20337 break; 20338 case CDB_GROUP1: /* 10-byte CDBs */ 20339 cdb.scc_cmd = cmd | SCMD_GROUP1; 20340 FORMG1ADDR(&cdb, start_block); 20341 FORMG1COUNT(&cdb, block_count); 20342 break; 20343 case CDB_GROUP4: /* 16-byte CDBs */ 20344 cdb.scc_cmd = cmd | SCMD_GROUP4; 20345 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20346 FORMG4COUNT(&cdb, block_count); 20347 break; 20348 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20349 default: 20350 /* All others reserved */ 20351 return (EINVAL); 20352 } 20353 20354 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20355 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20356 20357 ucmd_buf.uscsi_cdb = (char *)&cdb; 20358 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20359 ucmd_buf.uscsi_bufaddr = bufaddr; 20360 ucmd_buf.uscsi_buflen = buflen; 20361 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20362 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20363 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20364 ucmd_buf.uscsi_timeout = 60; 20365 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20366 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20367 switch (status) { 20368 case 0: 20369 break; /* Success! */ 20370 case EIO: 20371 switch (ucmd_buf.uscsi_status) { 20372 case STATUS_RESERVATION_CONFLICT: 20373 status = EACCES; 20374 break; 20375 default: 20376 break; 20377 } 20378 break; 20379 default: 20380 break; 20381 } 20382 20383 if (status == 0) { 20384 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20385 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20386 } 20387 20388 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20389 20390 return (status); 20391 } 20392 20393 20394 /* 20395 * Function: sd_send_scsi_LOG_SENSE 20396 * 20397 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20398 * 20399 * Arguments: un: Pointer to the sd_lun struct for the target. 20400 * 20401 * Return Code: 0 - Success 20402 * errno return code from sd_send_scsi_cmd() 20403 * 20404 * Context: Can sleep. Does not return until command is completed. 20405 */ 20406 20407 static int 20408 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 20409 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20410 int path_flag) 20411 20412 { 20413 struct scsi_extended_sense sense_buf; 20414 union scsi_cdb cdb; 20415 struct uscsi_cmd ucmd_buf; 20416 int status; 20417 20418 ASSERT(un != NULL); 20419 ASSERT(!mutex_owned(SD_MUTEX(un))); 20420 20421 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 20422 20423 bzero(&cdb, sizeof (cdb)); 20424 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20425 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20426 20427 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 20428 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 20429 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 20430 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 20431 FORMG1COUNT(&cdb, buflen); 20432 20433 ucmd_buf.uscsi_cdb = (char *)&cdb; 20434 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20435 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20436 ucmd_buf.uscsi_buflen = buflen; 20437 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20438 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20439 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20440 ucmd_buf.uscsi_timeout = 60; 20441 20442 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20443 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20444 20445 switch (status) { 20446 case 0: 20447 break; 20448 case EIO: 20449 switch (ucmd_buf.uscsi_status) { 20450 case STATUS_RESERVATION_CONFLICT: 20451 status = EACCES; 20452 break; 20453 case STATUS_CHECK: 20454 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20455 (sense_buf.es_key == KEY_ILLEGAL_REQUEST) && 20456 (sense_buf.es_add_code == 0x24)) { 20457 /* 20458 * ASC 0x24: INVALID FIELD IN CDB 20459 */ 20460 switch (page_code) { 20461 case START_STOP_CYCLE_PAGE: 20462 /* 20463 * The start stop cycle counter is 20464 * implemented as page 0x31 in earlier 20465 * generation disks. In new generation 20466 * disks the start stop cycle counter is 20467 * implemented as page 0xE. To properly 20468 * handle this case if an attempt for 20469 * log page 0xE is made and fails we 20470 * will try again using page 0x31. 20471 * 20472 * Network storage BU committed to 20473 * maintain the page 0x31 for this 20474 * purpose and will not have any other 20475 * page implemented with page code 0x31 20476 * until all disks transition to the 20477 * standard page. 20478 */ 20479 mutex_enter(SD_MUTEX(un)); 20480 un->un_start_stop_cycle_page = 20481 START_STOP_CYCLE_VU_PAGE; 20482 cdb.cdb_opaque[2] = 20483 (char)(page_control << 6) | 20484 un->un_start_stop_cycle_page; 20485 mutex_exit(SD_MUTEX(un)); 20486 status = sd_send_scsi_cmd( 20487 SD_GET_DEV(un), &ucmd_buf, 20488 UIO_SYSSPACE, UIO_SYSSPACE, 20489 UIO_SYSSPACE, path_flag); 20490 20491 break; 20492 case TEMPERATURE_PAGE: 20493 status = ENOTTY; 20494 break; 20495 default: 20496 break; 20497 } 20498 } 20499 break; 20500 default: 20501 break; 20502 } 20503 break; 20504 default: 20505 break; 20506 } 20507 20508 if (status == 0) { 20509 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20510 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20511 } 20512 20513 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20514 20515 return (status); 20516 } 20517 20518 20519 /* 20520 * Function: sdioctl 20521 * 20522 * Description: Driver's ioctl(9e) entry point function. 20523 * 20524 * Arguments: dev - device number 20525 * cmd - ioctl operation to be performed 20526 * arg - user argument, contains data to be set or reference 20527 * parameter for get 20528 * flag - bit flag, indicating open settings, 32/64 bit type 20529 * cred_p - user credential pointer 20530 * rval_p - calling process return value (OPT) 20531 * 20532 * Return Code: EINVAL 20533 * ENOTTY 20534 * ENXIO 20535 * EIO 20536 * EFAULT 20537 * ENOTSUP 20538 * EPERM 20539 * 20540 * Context: Called from the device switch at normal priority. 20541 */ 20542 20543 static int 20544 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20545 { 20546 struct sd_lun *un = NULL; 20547 int geom_validated = FALSE; 20548 int err = 0; 20549 int i = 0; 20550 cred_t *cr; 20551 20552 /* 20553 * All device accesses go thru sdstrategy where we check on suspend 20554 * status 20555 */ 20556 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20557 return (ENXIO); 20558 } 20559 20560 ASSERT(!mutex_owned(SD_MUTEX(un))); 20561 20562 /* 20563 * Moved this wait from sd_uscsi_strategy to here for 20564 * reasons of deadlock prevention. Internal driver commands, 20565 * specifically those to change a devices power level, result 20566 * in a call to sd_uscsi_strategy. 20567 */ 20568 mutex_enter(SD_MUTEX(un)); 20569 while ((un->un_state == SD_STATE_SUSPENDED) || 20570 (un->un_state == SD_STATE_PM_CHANGING)) { 20571 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20572 } 20573 /* 20574 * Twiddling the counter here protects commands from now 20575 * through to the top of sd_uscsi_strategy. Without the 20576 * counter inc. a power down, for example, could get in 20577 * after the above check for state is made and before 20578 * execution gets to the top of sd_uscsi_strategy. 20579 * That would cause problems. 20580 */ 20581 un->un_ncmds_in_driver++; 20582 20583 if ((un->un_f_geometry_is_valid == FALSE) && 20584 (flag & (FNDELAY | FNONBLOCK))) { 20585 switch (cmd) { 20586 case CDROMPAUSE: 20587 case CDROMRESUME: 20588 case CDROMPLAYMSF: 20589 case CDROMPLAYTRKIND: 20590 case CDROMREADTOCHDR: 20591 case CDROMREADTOCENTRY: 20592 case CDROMSTOP: 20593 case CDROMSTART: 20594 case CDROMVOLCTRL: 20595 case CDROMSUBCHNL: 20596 case CDROMREADMODE2: 20597 case CDROMREADMODE1: 20598 case CDROMREADOFFSET: 20599 case CDROMSBLKMODE: 20600 case CDROMGBLKMODE: 20601 case CDROMGDRVSPEED: 20602 case CDROMSDRVSPEED: 20603 case CDROMCDDA: 20604 case CDROMCDXA: 20605 case CDROMSUBCODE: 20606 if (!ISCD(un)) { 20607 un->un_ncmds_in_driver--; 20608 ASSERT(un->un_ncmds_in_driver >= 0); 20609 mutex_exit(SD_MUTEX(un)); 20610 return (ENOTTY); 20611 } 20612 break; 20613 case FDEJECT: 20614 case DKIOCEJECT: 20615 case CDROMEJECT: 20616 if (!ISREMOVABLE(un)) { 20617 un->un_ncmds_in_driver--; 20618 ASSERT(un->un_ncmds_in_driver >= 0); 20619 mutex_exit(SD_MUTEX(un)); 20620 return (ENOTTY); 20621 } 20622 break; 20623 case DKIOCSVTOC: 20624 case DKIOCSETEFI: 20625 case DKIOCSMBOOT: 20626 mutex_exit(SD_MUTEX(un)); 20627 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20628 if (err != 0) { 20629 mutex_enter(SD_MUTEX(un)); 20630 un->un_ncmds_in_driver--; 20631 ASSERT(un->un_ncmds_in_driver >= 0); 20632 mutex_exit(SD_MUTEX(un)); 20633 return (EIO); 20634 } 20635 mutex_enter(SD_MUTEX(un)); 20636 /* FALLTHROUGH */ 20637 case DKIOCREMOVABLE: 20638 case DKIOCINFO: 20639 case DKIOCGMEDIAINFO: 20640 case MHIOCENFAILFAST: 20641 case MHIOCSTATUS: 20642 case MHIOCTKOWN: 20643 case MHIOCRELEASE: 20644 case MHIOCGRP_INKEYS: 20645 case MHIOCGRP_INRESV: 20646 case MHIOCGRP_REGISTER: 20647 case MHIOCGRP_RESERVE: 20648 case MHIOCGRP_PREEMPTANDABORT: 20649 case MHIOCGRP_REGISTERANDIGNOREKEY: 20650 case CDROMCLOSETRAY: 20651 case USCSICMD: 20652 goto skip_ready_valid; 20653 default: 20654 break; 20655 } 20656 20657 mutex_exit(SD_MUTEX(un)); 20658 err = sd_ready_and_valid(un); 20659 mutex_enter(SD_MUTEX(un)); 20660 if (err == SD_READY_NOT_VALID) { 20661 switch (cmd) { 20662 case DKIOCGAPART: 20663 case DKIOCGGEOM: 20664 case DKIOCSGEOM: 20665 case DKIOCGVTOC: 20666 case DKIOCSVTOC: 20667 case DKIOCSAPART: 20668 case DKIOCG_PHYGEOM: 20669 case DKIOCG_VIRTGEOM: 20670 err = ENOTSUP; 20671 un->un_ncmds_in_driver--; 20672 ASSERT(un->un_ncmds_in_driver >= 0); 20673 mutex_exit(SD_MUTEX(un)); 20674 return (err); 20675 } 20676 } 20677 if (err != SD_READY_VALID) { 20678 switch (cmd) { 20679 case DKIOCSTATE: 20680 case CDROMGDRVSPEED: 20681 case CDROMSDRVSPEED: 20682 case FDEJECT: /* for eject command */ 20683 case DKIOCEJECT: 20684 case CDROMEJECT: 20685 case DKIOCGETEFI: 20686 case DKIOCSGEOM: 20687 case DKIOCREMOVABLE: 20688 case DKIOCSAPART: 20689 case DKIOCSETEFI: 20690 break; 20691 default: 20692 if (ISREMOVABLE(un)) { 20693 err = ENXIO; 20694 } else { 20695 /* Do not map EACCES to EIO */ 20696 if (err != EACCES) 20697 err = EIO; 20698 } 20699 un->un_ncmds_in_driver--; 20700 ASSERT(un->un_ncmds_in_driver >= 0); 20701 mutex_exit(SD_MUTEX(un)); 20702 return (err); 20703 } 20704 } 20705 geom_validated = TRUE; 20706 } 20707 if ((un->un_f_geometry_is_valid == TRUE) && 20708 (un->un_solaris_size > 0)) { 20709 /* 20710 * the "geometry_is_valid" flag could be true if we 20711 * have an fdisk table but no Solaris partition 20712 */ 20713 if (un->un_vtoc.v_sanity != VTOC_SANE) { 20714 /* it is EFI, so return ENOTSUP for these */ 20715 switch (cmd) { 20716 case DKIOCGAPART: 20717 case DKIOCGGEOM: 20718 case DKIOCGVTOC: 20719 case DKIOCSVTOC: 20720 case DKIOCSAPART: 20721 err = ENOTSUP; 20722 un->un_ncmds_in_driver--; 20723 ASSERT(un->un_ncmds_in_driver >= 0); 20724 mutex_exit(SD_MUTEX(un)); 20725 return (err); 20726 } 20727 } 20728 } 20729 20730 skip_ready_valid: 20731 mutex_exit(SD_MUTEX(un)); 20732 20733 switch (cmd) { 20734 case DKIOCINFO: 20735 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20736 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20737 break; 20738 20739 case DKIOCGMEDIAINFO: 20740 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20741 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20742 break; 20743 20744 case DKIOCGGEOM: 20745 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGGEOM\n"); 20746 err = sd_dkio_get_geometry(dev, (caddr_t)arg, flag, 20747 geom_validated); 20748 break; 20749 20750 case DKIOCSGEOM: 20751 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSGEOM\n"); 20752 err = sd_dkio_set_geometry(dev, (caddr_t)arg, flag); 20753 break; 20754 20755 case DKIOCGAPART: 20756 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGAPART\n"); 20757 err = sd_dkio_get_partition(dev, (caddr_t)arg, flag, 20758 geom_validated); 20759 break; 20760 20761 case DKIOCSAPART: 20762 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSAPART\n"); 20763 err = sd_dkio_set_partition(dev, (caddr_t)arg, flag); 20764 break; 20765 20766 case DKIOCGVTOC: 20767 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGVTOC\n"); 20768 err = sd_dkio_get_vtoc(dev, (caddr_t)arg, flag, 20769 geom_validated); 20770 break; 20771 20772 case DKIOCGETEFI: 20773 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGETEFI\n"); 20774 err = sd_dkio_get_efi(dev, (caddr_t)arg, flag); 20775 break; 20776 20777 case DKIOCPARTITION: 20778 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTITION\n"); 20779 err = sd_dkio_partition(dev, (caddr_t)arg, flag); 20780 break; 20781 20782 case DKIOCSVTOC: 20783 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSVTOC\n"); 20784 err = sd_dkio_set_vtoc(dev, (caddr_t)arg, flag); 20785 break; 20786 20787 case DKIOCSETEFI: 20788 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSETEFI\n"); 20789 err = sd_dkio_set_efi(dev, (caddr_t)arg, flag); 20790 break; 20791 20792 case DKIOCGMBOOT: 20793 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMBOOT\n"); 20794 err = sd_dkio_get_mboot(dev, (caddr_t)arg, flag); 20795 break; 20796 20797 case DKIOCSMBOOT: 20798 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSMBOOT\n"); 20799 err = sd_dkio_set_mboot(dev, (caddr_t)arg, flag); 20800 break; 20801 20802 case DKIOCLOCK: 20803 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20804 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20805 SD_PATH_STANDARD); 20806 break; 20807 20808 case DKIOCUNLOCK: 20809 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20810 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20811 SD_PATH_STANDARD); 20812 break; 20813 20814 case DKIOCSTATE: { 20815 enum dkio_state state; 20816 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20817 20818 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20819 err = EFAULT; 20820 } else { 20821 err = sd_check_media(dev, state); 20822 if (err == 0) { 20823 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20824 sizeof (int), flag) != 0) 20825 err = EFAULT; 20826 } 20827 } 20828 break; 20829 } 20830 20831 case DKIOCREMOVABLE: 20832 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20833 if (ISREMOVABLE(un)) { 20834 i = 1; 20835 } else { 20836 i = 0; 20837 } 20838 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20839 err = EFAULT; 20840 } else { 20841 err = 0; 20842 } 20843 break; 20844 20845 case DKIOCGTEMPERATURE: 20846 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20847 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20848 break; 20849 20850 case MHIOCENFAILFAST: 20851 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20852 if ((err = drv_priv(cred_p)) == 0) { 20853 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20854 } 20855 break; 20856 20857 case MHIOCTKOWN: 20858 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20859 if ((err = drv_priv(cred_p)) == 0) { 20860 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20861 } 20862 break; 20863 20864 case MHIOCRELEASE: 20865 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20866 if ((err = drv_priv(cred_p)) == 0) { 20867 err = sd_mhdioc_release(dev); 20868 } 20869 break; 20870 20871 case MHIOCSTATUS: 20872 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20873 if ((err = drv_priv(cred_p)) == 0) { 20874 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20875 case 0: 20876 err = 0; 20877 break; 20878 case EACCES: 20879 *rval_p = 1; 20880 err = 0; 20881 break; 20882 default: 20883 err = EIO; 20884 break; 20885 } 20886 } 20887 break; 20888 20889 case MHIOCQRESERVE: 20890 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20891 if ((err = drv_priv(cred_p)) == 0) { 20892 err = sd_reserve_release(dev, SD_RESERVE); 20893 } 20894 break; 20895 20896 case MHIOCREREGISTERDEVID: 20897 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20898 if (drv_priv(cred_p) == EPERM) { 20899 err = EPERM; 20900 } else if (ISREMOVABLE(un) || ISCD(un)) { 20901 err = ENOTTY; 20902 } else { 20903 err = sd_mhdioc_register_devid(dev); 20904 } 20905 break; 20906 20907 case MHIOCGRP_INKEYS: 20908 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20909 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20910 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20911 err = ENOTSUP; 20912 } else { 20913 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20914 flag); 20915 } 20916 } 20917 break; 20918 20919 case MHIOCGRP_INRESV: 20920 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20921 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20922 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20923 err = ENOTSUP; 20924 } else { 20925 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20926 } 20927 } 20928 break; 20929 20930 case MHIOCGRP_REGISTER: 20931 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20932 if ((err = drv_priv(cred_p)) != EPERM) { 20933 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20934 err = ENOTSUP; 20935 } else if (arg != NULL) { 20936 mhioc_register_t reg; 20937 if (ddi_copyin((void *)arg, ®, 20938 sizeof (mhioc_register_t), flag) != 0) { 20939 err = EFAULT; 20940 } else { 20941 err = 20942 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20943 un, SD_SCSI3_REGISTER, 20944 (uchar_t *)®); 20945 } 20946 } 20947 } 20948 break; 20949 20950 case MHIOCGRP_RESERVE: 20951 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20952 if ((err = drv_priv(cred_p)) != EPERM) { 20953 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20954 err = ENOTSUP; 20955 } else if (arg != NULL) { 20956 mhioc_resv_desc_t resv_desc; 20957 if (ddi_copyin((void *)arg, &resv_desc, 20958 sizeof (mhioc_resv_desc_t), flag) != 0) { 20959 err = EFAULT; 20960 } else { 20961 err = 20962 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20963 un, SD_SCSI3_RESERVE, 20964 (uchar_t *)&resv_desc); 20965 } 20966 } 20967 } 20968 break; 20969 20970 case MHIOCGRP_PREEMPTANDABORT: 20971 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20972 if ((err = drv_priv(cred_p)) != EPERM) { 20973 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20974 err = ENOTSUP; 20975 } else if (arg != NULL) { 20976 mhioc_preemptandabort_t preempt_abort; 20977 if (ddi_copyin((void *)arg, &preempt_abort, 20978 sizeof (mhioc_preemptandabort_t), 20979 flag) != 0) { 20980 err = EFAULT; 20981 } else { 20982 err = 20983 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20984 un, SD_SCSI3_PREEMPTANDABORT, 20985 (uchar_t *)&preempt_abort); 20986 } 20987 } 20988 } 20989 break; 20990 20991 case MHIOCGRP_REGISTERANDIGNOREKEY: 20992 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20993 if ((err = drv_priv(cred_p)) != EPERM) { 20994 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20995 err = ENOTSUP; 20996 } else if (arg != NULL) { 20997 mhioc_registerandignorekey_t r_and_i; 20998 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20999 sizeof (mhioc_registerandignorekey_t), 21000 flag) != 0) { 21001 err = EFAULT; 21002 } else { 21003 err = 21004 sd_send_scsi_PERSISTENT_RESERVE_OUT( 21005 un, SD_SCSI3_REGISTERANDIGNOREKEY, 21006 (uchar_t *)&r_and_i); 21007 } 21008 } 21009 } 21010 break; 21011 21012 case USCSICMD: 21013 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 21014 cr = ddi_get_cred(); 21015 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 21016 err = EPERM; 21017 } else { 21018 err = sd_uscsi_ioctl(dev, (caddr_t)arg, flag); 21019 } 21020 break; 21021 21022 case CDROMPAUSE: 21023 case CDROMRESUME: 21024 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 21025 if (!ISCD(un)) { 21026 err = ENOTTY; 21027 } else { 21028 err = sr_pause_resume(dev, cmd); 21029 } 21030 break; 21031 21032 case CDROMPLAYMSF: 21033 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 21034 if (!ISCD(un)) { 21035 err = ENOTTY; 21036 } else { 21037 err = sr_play_msf(dev, (caddr_t)arg, flag); 21038 } 21039 break; 21040 21041 case CDROMPLAYTRKIND: 21042 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 21043 #if defined(__i386) || defined(__amd64) 21044 /* 21045 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 21046 */ 21047 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21048 #else 21049 if (!ISCD(un)) { 21050 #endif 21051 err = ENOTTY; 21052 } else { 21053 err = sr_play_trkind(dev, (caddr_t)arg, flag); 21054 } 21055 break; 21056 21057 case CDROMREADTOCHDR: 21058 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 21059 if (!ISCD(un)) { 21060 err = ENOTTY; 21061 } else { 21062 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 21063 } 21064 break; 21065 21066 case CDROMREADTOCENTRY: 21067 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 21068 if (!ISCD(un)) { 21069 err = ENOTTY; 21070 } else { 21071 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 21072 } 21073 break; 21074 21075 case CDROMSTOP: 21076 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 21077 if (!ISCD(un)) { 21078 err = ENOTTY; 21079 } else { 21080 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 21081 SD_PATH_STANDARD); 21082 } 21083 break; 21084 21085 case CDROMSTART: 21086 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 21087 if (!ISCD(un)) { 21088 err = ENOTTY; 21089 } else { 21090 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 21091 SD_PATH_STANDARD); 21092 } 21093 break; 21094 21095 case CDROMCLOSETRAY: 21096 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 21097 if (!ISCD(un)) { 21098 err = ENOTTY; 21099 } else { 21100 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 21101 SD_PATH_STANDARD); 21102 } 21103 break; 21104 21105 case FDEJECT: /* for eject command */ 21106 case DKIOCEJECT: 21107 case CDROMEJECT: 21108 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 21109 if (!ISREMOVABLE(un)) { 21110 err = ENOTTY; 21111 } else { 21112 err = sr_eject(dev); 21113 } 21114 break; 21115 21116 case CDROMVOLCTRL: 21117 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21118 if (!ISCD(un)) { 21119 err = ENOTTY; 21120 } else { 21121 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21122 } 21123 break; 21124 21125 case CDROMSUBCHNL: 21126 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21127 if (!ISCD(un)) { 21128 err = ENOTTY; 21129 } else { 21130 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21131 } 21132 break; 21133 21134 case CDROMREADMODE2: 21135 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21136 if (!ISCD(un)) { 21137 err = ENOTTY; 21138 } else if (un->un_f_cfg_is_atapi == TRUE) { 21139 /* 21140 * If the drive supports READ CD, use that instead of 21141 * switching the LBA size via a MODE SELECT 21142 * Block Descriptor 21143 */ 21144 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21145 } else { 21146 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21147 } 21148 break; 21149 21150 case CDROMREADMODE1: 21151 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21152 if (!ISCD(un)) { 21153 err = ENOTTY; 21154 } else { 21155 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21156 } 21157 break; 21158 21159 case CDROMREADOFFSET: 21160 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21161 if (!ISCD(un)) { 21162 err = ENOTTY; 21163 } else { 21164 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21165 flag); 21166 } 21167 break; 21168 21169 case CDROMSBLKMODE: 21170 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21171 /* 21172 * There is no means of changing block size in case of atapi 21173 * drives, thus return ENOTTY if drive type is atapi 21174 */ 21175 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21176 err = ENOTTY; 21177 } else if (un->un_f_mmc_cap == TRUE) { 21178 21179 /* 21180 * MMC Devices do not support changing the 21181 * logical block size 21182 * 21183 * Note: EINVAL is being returned instead of ENOTTY to 21184 * maintain consistancy with the original mmc 21185 * driver update. 21186 */ 21187 err = EINVAL; 21188 } else { 21189 mutex_enter(SD_MUTEX(un)); 21190 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21191 (un->un_ncmds_in_transport > 0)) { 21192 mutex_exit(SD_MUTEX(un)); 21193 err = EINVAL; 21194 } else { 21195 mutex_exit(SD_MUTEX(un)); 21196 err = sr_change_blkmode(dev, cmd, arg, flag); 21197 } 21198 } 21199 break; 21200 21201 case CDROMGBLKMODE: 21202 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21203 if (!ISCD(un)) { 21204 err = ENOTTY; 21205 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21206 (un->un_f_blockcount_is_valid != FALSE)) { 21207 /* 21208 * Drive is an ATAPI drive so return target block 21209 * size for ATAPI drives since we cannot change the 21210 * blocksize on ATAPI drives. Used primarily to detect 21211 * if an ATAPI cdrom is present. 21212 */ 21213 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21214 sizeof (int), flag) != 0) { 21215 err = EFAULT; 21216 } else { 21217 err = 0; 21218 } 21219 21220 } else { 21221 /* 21222 * Drive supports changing block sizes via a Mode 21223 * Select. 21224 */ 21225 err = sr_change_blkmode(dev, cmd, arg, flag); 21226 } 21227 break; 21228 21229 case CDROMGDRVSPEED: 21230 case CDROMSDRVSPEED: 21231 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21232 if (!ISCD(un)) { 21233 err = ENOTTY; 21234 } else if (un->un_f_mmc_cap == TRUE) { 21235 /* 21236 * Note: In the future the driver implementation 21237 * for getting and 21238 * setting cd speed should entail: 21239 * 1) If non-mmc try the Toshiba mode page 21240 * (sr_change_speed) 21241 * 2) If mmc but no support for Real Time Streaming try 21242 * the SET CD SPEED (0xBB) command 21243 * (sr_atapi_change_speed) 21244 * 3) If mmc and support for Real Time Streaming 21245 * try the GET PERFORMANCE and SET STREAMING 21246 * commands (not yet implemented, 4380808) 21247 */ 21248 /* 21249 * As per recent MMC spec, CD-ROM speed is variable 21250 * and changes with LBA. Since there is no such 21251 * things as drive speed now, fail this ioctl. 21252 * 21253 * Note: EINVAL is returned for consistancy of original 21254 * implementation which included support for getting 21255 * the drive speed of mmc devices but not setting 21256 * the drive speed. Thus EINVAL would be returned 21257 * if a set request was made for an mmc device. 21258 * We no longer support get or set speed for 21259 * mmc but need to remain consistant with regard 21260 * to the error code returned. 21261 */ 21262 err = EINVAL; 21263 } else if (un->un_f_cfg_is_atapi == TRUE) { 21264 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21265 } else { 21266 err = sr_change_speed(dev, cmd, arg, flag); 21267 } 21268 break; 21269 21270 case CDROMCDDA: 21271 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21272 if (!ISCD(un)) { 21273 err = ENOTTY; 21274 } else { 21275 err = sr_read_cdda(dev, (void *)arg, flag); 21276 } 21277 break; 21278 21279 case CDROMCDXA: 21280 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21281 if (!ISCD(un)) { 21282 err = ENOTTY; 21283 } else { 21284 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21285 } 21286 break; 21287 21288 case CDROMSUBCODE: 21289 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21290 if (!ISCD(un)) { 21291 err = ENOTTY; 21292 } else { 21293 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21294 } 21295 break; 21296 21297 case DKIOCPARTINFO: { 21298 /* 21299 * Return parameters describing the selected disk slice. 21300 * Note: this ioctl is for the intel platform only 21301 */ 21302 #if defined(__i386) || defined(__amd64) 21303 int part; 21304 21305 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21306 part = SDPART(dev); 21307 21308 /* don't check un_solaris_size for pN */ 21309 if (part < P0_RAW_DISK && un->un_solaris_size == 0) { 21310 err = EIO; 21311 } else { 21312 struct part_info p; 21313 21314 p.p_start = (daddr_t)un->un_offset[part]; 21315 p.p_length = (int)un->un_map[part].dkl_nblk; 21316 #ifdef _MULTI_DATAMODEL 21317 switch (ddi_model_convert_from(flag & FMODELS)) { 21318 case DDI_MODEL_ILP32: 21319 { 21320 struct part_info32 p32; 21321 21322 p32.p_start = (daddr32_t)p.p_start; 21323 p32.p_length = p.p_length; 21324 if (ddi_copyout(&p32, (void *)arg, 21325 sizeof (p32), flag)) 21326 err = EFAULT; 21327 break; 21328 } 21329 21330 case DDI_MODEL_NONE: 21331 { 21332 if (ddi_copyout(&p, (void *)arg, sizeof (p), 21333 flag)) 21334 err = EFAULT; 21335 break; 21336 } 21337 } 21338 #else /* ! _MULTI_DATAMODEL */ 21339 if (ddi_copyout(&p, (void *)arg, sizeof (p), flag)) 21340 err = EFAULT; 21341 #endif /* _MULTI_DATAMODEL */ 21342 } 21343 #else 21344 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21345 err = ENOTTY; 21346 #endif 21347 break; 21348 } 21349 21350 case DKIOCG_PHYGEOM: { 21351 /* Return the driver's notion of the media physical geometry */ 21352 #if defined(__i386) || defined(__amd64) 21353 struct dk_geom disk_geom; 21354 struct dk_geom *dkgp = &disk_geom; 21355 21356 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21357 mutex_enter(SD_MUTEX(un)); 21358 21359 if (un->un_g.dkg_nhead != 0 && 21360 un->un_g.dkg_nsect != 0) { 21361 /* 21362 * We succeeded in getting a geometry, but 21363 * right now it is being reported as just the 21364 * Solaris fdisk partition, just like for 21365 * DKIOCGGEOM. We need to change that to be 21366 * correct for the entire disk now. 21367 */ 21368 bcopy(&un->un_g, dkgp, sizeof (*dkgp)); 21369 dkgp->dkg_acyl = 0; 21370 dkgp->dkg_ncyl = un->un_blockcount / 21371 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21372 } else { 21373 bzero(dkgp, sizeof (struct dk_geom)); 21374 /* 21375 * This disk does not have a Solaris VTOC 21376 * so we must present a physical geometry 21377 * that will remain consistent regardless 21378 * of how the disk is used. This will ensure 21379 * that the geometry does not change regardless 21380 * of the fdisk partition type (ie. EFI, FAT32, 21381 * Solaris, etc). 21382 */ 21383 if (ISCD(un)) { 21384 dkgp->dkg_nhead = un->un_pgeom.g_nhead; 21385 dkgp->dkg_nsect = un->un_pgeom.g_nsect; 21386 dkgp->dkg_ncyl = un->un_pgeom.g_ncyl; 21387 dkgp->dkg_acyl = un->un_pgeom.g_acyl; 21388 } else { 21389 sd_convert_geometry(un->un_blockcount, dkgp); 21390 dkgp->dkg_acyl = 0; 21391 dkgp->dkg_ncyl = un->un_blockcount / 21392 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21393 } 21394 } 21395 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21396 21397 if (ddi_copyout(dkgp, (void *)arg, 21398 sizeof (struct dk_geom), flag)) { 21399 mutex_exit(SD_MUTEX(un)); 21400 err = EFAULT; 21401 } else { 21402 mutex_exit(SD_MUTEX(un)); 21403 err = 0; 21404 } 21405 #else 21406 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21407 err = ENOTTY; 21408 #endif 21409 break; 21410 } 21411 21412 case DKIOCG_VIRTGEOM: { 21413 /* Return the driver's notion of the media's logical geometry */ 21414 #if defined(__i386) || defined(__amd64) 21415 struct dk_geom disk_geom; 21416 struct dk_geom *dkgp = &disk_geom; 21417 21418 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21419 mutex_enter(SD_MUTEX(un)); 21420 /* 21421 * If there is no HBA geometry available, or 21422 * if the HBA returned us something that doesn't 21423 * really fit into an Int 13/function 8 geometry 21424 * result, just fail the ioctl. See PSARC 1998/313. 21425 */ 21426 if (un->un_lgeom.g_nhead == 0 || 21427 un->un_lgeom.g_nsect == 0 || 21428 un->un_lgeom.g_ncyl > 1024) { 21429 mutex_exit(SD_MUTEX(un)); 21430 err = EINVAL; 21431 } else { 21432 dkgp->dkg_ncyl = un->un_lgeom.g_ncyl; 21433 dkgp->dkg_acyl = un->un_lgeom.g_acyl; 21434 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21435 dkgp->dkg_nhead = un->un_lgeom.g_nhead; 21436 dkgp->dkg_nsect = un->un_lgeom.g_nsect; 21437 21438 if (ddi_copyout(dkgp, (void *)arg, 21439 sizeof (struct dk_geom), flag)) { 21440 mutex_exit(SD_MUTEX(un)); 21441 err = EFAULT; 21442 } else { 21443 mutex_exit(SD_MUTEX(un)); 21444 err = 0; 21445 } 21446 } 21447 #else 21448 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21449 err = ENOTTY; 21450 #endif 21451 break; 21452 } 21453 #ifdef SDDEBUG 21454 /* RESET/ABORTS testing ioctls */ 21455 case DKIOCRESET: { 21456 int reset_level; 21457 21458 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21459 err = EFAULT; 21460 } else { 21461 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21462 "reset_level = 0x%lx\n", reset_level); 21463 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21464 err = 0; 21465 } else { 21466 err = EIO; 21467 } 21468 } 21469 break; 21470 } 21471 21472 case DKIOCABORT: 21473 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21474 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21475 err = 0; 21476 } else { 21477 err = EIO; 21478 } 21479 break; 21480 #endif 21481 21482 #ifdef SD_FAULT_INJECTION 21483 /* SDIOC FaultInjection testing ioctls */ 21484 case SDIOCSTART: 21485 case SDIOCSTOP: 21486 case SDIOCINSERTPKT: 21487 case SDIOCINSERTXB: 21488 case SDIOCINSERTUN: 21489 case SDIOCINSERTARQ: 21490 case SDIOCPUSH: 21491 case SDIOCRETRIEVE: 21492 case SDIOCRUN: 21493 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21494 "SDIOC detected cmd:0x%X:\n", cmd); 21495 /* call error generator */ 21496 sd_faultinjection_ioctl(cmd, arg, un); 21497 err = 0; 21498 break; 21499 21500 #endif /* SD_FAULT_INJECTION */ 21501 21502 default: 21503 err = ENOTTY; 21504 break; 21505 } 21506 mutex_enter(SD_MUTEX(un)); 21507 un->un_ncmds_in_driver--; 21508 ASSERT(un->un_ncmds_in_driver >= 0); 21509 mutex_exit(SD_MUTEX(un)); 21510 21511 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21512 return (err); 21513 } 21514 21515 21516 /* 21517 * Function: sd_uscsi_ioctl 21518 * 21519 * Description: This routine is the driver entry point for handling USCSI ioctl 21520 * requests (USCSICMD). 21521 * 21522 * Arguments: dev - the device number 21523 * arg - user provided scsi command 21524 * flag - this argument is a pass through to ddi_copyxxx() 21525 * directly from the mode argument of ioctl(). 21526 * 21527 * Return Code: code returned by sd_send_scsi_cmd 21528 * ENXIO 21529 * EFAULT 21530 * EAGAIN 21531 */ 21532 21533 static int 21534 sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag) 21535 { 21536 #ifdef _MULTI_DATAMODEL 21537 /* 21538 * For use when a 32 bit app makes a call into a 21539 * 64 bit ioctl 21540 */ 21541 struct uscsi_cmd32 uscsi_cmd_32_for_64; 21542 struct uscsi_cmd32 *ucmd32 = &uscsi_cmd_32_for_64; 21543 model_t model; 21544 #endif /* _MULTI_DATAMODEL */ 21545 struct uscsi_cmd *scmd = NULL; 21546 struct sd_lun *un = NULL; 21547 enum uio_seg uioseg; 21548 char cdb[CDB_GROUP0]; 21549 int rval = 0; 21550 21551 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21552 return (ENXIO); 21553 } 21554 21555 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: entry: un:0x%p\n", un); 21556 21557 scmd = (struct uscsi_cmd *) 21558 kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21559 21560 #ifdef _MULTI_DATAMODEL 21561 switch (model = ddi_model_convert_from(flag & FMODELS)) { 21562 case DDI_MODEL_ILP32: 21563 { 21564 if (ddi_copyin((void *)arg, ucmd32, sizeof (*ucmd32), flag)) { 21565 rval = EFAULT; 21566 goto done; 21567 } 21568 /* 21569 * Convert the ILP32 uscsi data from the 21570 * application to LP64 for internal use. 21571 */ 21572 uscsi_cmd32touscsi_cmd(ucmd32, scmd); 21573 break; 21574 } 21575 case DDI_MODEL_NONE: 21576 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21577 rval = EFAULT; 21578 goto done; 21579 } 21580 break; 21581 } 21582 #else /* ! _MULTI_DATAMODEL */ 21583 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21584 rval = EFAULT; 21585 goto done; 21586 } 21587 #endif /* _MULTI_DATAMODEL */ 21588 21589 scmd->uscsi_flags &= ~USCSI_NOINTR; 21590 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE; 21591 if (un->un_f_format_in_progress == TRUE) { 21592 rval = EAGAIN; 21593 goto done; 21594 } 21595 21596 /* 21597 * Gotta do the ddi_copyin() here on the uscsi_cdb so that 21598 * we will have a valid cdb[0] to test. 21599 */ 21600 if ((ddi_copyin(scmd->uscsi_cdb, cdb, CDB_GROUP0, flag) == 0) && 21601 (cdb[0] == SCMD_FORMAT)) { 21602 SD_TRACE(SD_LOG_IOCTL, un, 21603 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21604 mutex_enter(SD_MUTEX(un)); 21605 un->un_f_format_in_progress = TRUE; 21606 mutex_exit(SD_MUTEX(un)); 21607 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21608 SD_PATH_STANDARD); 21609 mutex_enter(SD_MUTEX(un)); 21610 un->un_f_format_in_progress = FALSE; 21611 mutex_exit(SD_MUTEX(un)); 21612 } else { 21613 SD_TRACE(SD_LOG_IOCTL, un, 21614 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21615 /* 21616 * It's OK to fall into here even if the ddi_copyin() 21617 * on the uscsi_cdb above fails, because sd_send_scsi_cmd() 21618 * does this same copyin and will return the EFAULT 21619 * if it fails. 21620 */ 21621 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21622 SD_PATH_STANDARD); 21623 } 21624 #ifdef _MULTI_DATAMODEL 21625 switch (model) { 21626 case DDI_MODEL_ILP32: 21627 /* 21628 * Convert back to ILP32 before copyout to the 21629 * application 21630 */ 21631 uscsi_cmdtouscsi_cmd32(scmd, ucmd32); 21632 if (ddi_copyout(ucmd32, (void *)arg, sizeof (*ucmd32), flag)) { 21633 if (rval != 0) { 21634 rval = EFAULT; 21635 } 21636 } 21637 break; 21638 case DDI_MODEL_NONE: 21639 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21640 if (rval != 0) { 21641 rval = EFAULT; 21642 } 21643 } 21644 break; 21645 } 21646 #else /* ! _MULTI_DATAMODE */ 21647 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21648 if (rval != 0) { 21649 rval = EFAULT; 21650 } 21651 } 21652 #endif /* _MULTI_DATAMODE */ 21653 done: 21654 kmem_free(scmd, sizeof (struct uscsi_cmd)); 21655 21656 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: exit: un:0x%p\n", un); 21657 21658 return (rval); 21659 } 21660 21661 21662 /* 21663 * Function: sd_dkio_ctrl_info 21664 * 21665 * Description: This routine is the driver entry point for handling controller 21666 * information ioctl requests (DKIOCINFO). 21667 * 21668 * Arguments: dev - the device number 21669 * arg - pointer to user provided dk_cinfo structure 21670 * specifying the controller type and attributes. 21671 * flag - this argument is a pass through to ddi_copyxxx() 21672 * directly from the mode argument of ioctl(). 21673 * 21674 * Return Code: 0 21675 * EFAULT 21676 * ENXIO 21677 */ 21678 21679 static int 21680 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21681 { 21682 struct sd_lun *un = NULL; 21683 struct dk_cinfo *info; 21684 dev_info_t *pdip; 21685 int lun, tgt; 21686 21687 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21688 return (ENXIO); 21689 } 21690 21691 info = (struct dk_cinfo *) 21692 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21693 21694 switch (un->un_ctype) { 21695 case CTYPE_CDROM: 21696 info->dki_ctype = DKC_CDROM; 21697 break; 21698 default: 21699 info->dki_ctype = DKC_SCSI_CCS; 21700 break; 21701 } 21702 pdip = ddi_get_parent(SD_DEVINFO(un)); 21703 info->dki_cnum = ddi_get_instance(pdip); 21704 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21705 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21706 } else { 21707 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21708 DK_DEVLEN - 1); 21709 } 21710 21711 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21712 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21713 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21714 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21715 21716 /* Unit Information */ 21717 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21718 info->dki_slave = ((tgt << 3) | lun); 21719 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21720 DK_DEVLEN - 1); 21721 info->dki_flags = DKI_FMTVOL; 21722 info->dki_partition = SDPART(dev); 21723 21724 /* Max Transfer size of this device in blocks */ 21725 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21726 info->dki_addr = 0; 21727 info->dki_space = 0; 21728 info->dki_prio = 0; 21729 info->dki_vec = 0; 21730 21731 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21732 kmem_free(info, sizeof (struct dk_cinfo)); 21733 return (EFAULT); 21734 } else { 21735 kmem_free(info, sizeof (struct dk_cinfo)); 21736 return (0); 21737 } 21738 } 21739 21740 21741 /* 21742 * Function: sd_get_media_info 21743 * 21744 * Description: This routine is the driver entry point for handling ioctl 21745 * requests for the media type or command set profile used by the 21746 * drive to operate on the media (DKIOCGMEDIAINFO). 21747 * 21748 * Arguments: dev - the device number 21749 * arg - pointer to user provided dk_minfo structure 21750 * specifying the media type, logical block size and 21751 * drive capacity. 21752 * flag - this argument is a pass through to ddi_copyxxx() 21753 * directly from the mode argument of ioctl(). 21754 * 21755 * Return Code: 0 21756 * EACCESS 21757 * EFAULT 21758 * ENXIO 21759 * EIO 21760 */ 21761 21762 static int 21763 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21764 { 21765 struct sd_lun *un = NULL; 21766 struct uscsi_cmd com; 21767 struct scsi_inquiry *sinq; 21768 struct dk_minfo media_info; 21769 u_longlong_t media_capacity; 21770 uint64_t capacity; 21771 uint_t lbasize; 21772 uchar_t *out_data; 21773 uchar_t *rqbuf; 21774 int rval = 0; 21775 int rtn; 21776 21777 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21778 (un->un_state == SD_STATE_OFFLINE)) { 21779 return (ENXIO); 21780 } 21781 21782 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21783 21784 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21785 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21786 21787 /* Issue a TUR to determine if the drive is ready with media present */ 21788 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21789 if (rval == ENXIO) { 21790 goto done; 21791 } 21792 21793 /* Now get configuration data */ 21794 if (ISCD(un)) { 21795 media_info.dki_media_type = DK_CDROM; 21796 21797 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21798 if (un->un_f_mmc_cap == TRUE) { 21799 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21800 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN); 21801 21802 if (rtn) { 21803 /* 21804 * Failed for other than an illegal request 21805 * or command not supported 21806 */ 21807 if ((com.uscsi_status == STATUS_CHECK) && 21808 (com.uscsi_rqstatus == STATUS_GOOD)) { 21809 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21810 (rqbuf[12] != 0x20)) { 21811 rval = EIO; 21812 goto done; 21813 } 21814 } 21815 } else { 21816 /* 21817 * The GET CONFIGURATION command succeeded 21818 * so set the media type according to the 21819 * returned data 21820 */ 21821 media_info.dki_media_type = out_data[6]; 21822 media_info.dki_media_type <<= 8; 21823 media_info.dki_media_type |= out_data[7]; 21824 } 21825 } 21826 } else { 21827 /* 21828 * The profile list is not available, so we attempt to identify 21829 * the media type based on the inquiry data 21830 */ 21831 sinq = un->un_sd->sd_inq; 21832 if (sinq->inq_qual == 0) { 21833 /* This is a direct access device */ 21834 media_info.dki_media_type = DK_FIXED_DISK; 21835 21836 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21837 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21838 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21839 media_info.dki_media_type = DK_ZIP; 21840 } else if ( 21841 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21842 media_info.dki_media_type = DK_JAZ; 21843 } 21844 } 21845 } else { 21846 /* Not a CD or direct access so return unknown media */ 21847 media_info.dki_media_type = DK_UNKNOWN; 21848 } 21849 } 21850 21851 /* Now read the capacity so we can provide the lbasize and capacity */ 21852 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21853 SD_PATH_DIRECT)) { 21854 case 0: 21855 break; 21856 case EACCES: 21857 rval = EACCES; 21858 goto done; 21859 default: 21860 rval = EIO; 21861 goto done; 21862 } 21863 21864 media_info.dki_lbsize = lbasize; 21865 media_capacity = capacity; 21866 21867 /* 21868 * sd_send_scsi_READ_CAPACITY() reports capacity in 21869 * un->un_sys_blocksize chunks. So we need to convert it into 21870 * cap.lbasize chunks. 21871 */ 21872 media_capacity *= un->un_sys_blocksize; 21873 media_capacity /= lbasize; 21874 media_info.dki_capacity = media_capacity; 21875 21876 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21877 rval = EFAULT; 21878 /* Put goto. Anybody might add some code below in future */ 21879 goto done; 21880 } 21881 done: 21882 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21883 kmem_free(rqbuf, SENSE_LENGTH); 21884 return (rval); 21885 } 21886 21887 21888 /* 21889 * Function: sd_dkio_get_geometry 21890 * 21891 * Description: This routine is the driver entry point for handling user 21892 * requests to get the device geometry (DKIOCGGEOM). 21893 * 21894 * Arguments: dev - the device number 21895 * arg - pointer to user provided dk_geom structure specifying 21896 * the controller's notion of the current geometry. 21897 * flag - this argument is a pass through to ddi_copyxxx() 21898 * directly from the mode argument of ioctl(). 21899 * geom_validated - flag indicating if the device geometry has been 21900 * previously validated in the sdioctl routine. 21901 * 21902 * Return Code: 0 21903 * EFAULT 21904 * ENXIO 21905 * EIO 21906 */ 21907 21908 static int 21909 sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, int geom_validated) 21910 { 21911 struct sd_lun *un = NULL; 21912 struct dk_geom *tmp_geom = NULL; 21913 int rval = 0; 21914 21915 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21916 return (ENXIO); 21917 } 21918 21919 #if defined(__i386) || defined(__amd64) 21920 if (un->un_solaris_size == 0) { 21921 return (EIO); 21922 } 21923 #endif 21924 if (geom_validated == FALSE) { 21925 /* 21926 * sd_validate_geometry does not spin a disk up 21927 * if it was spun down. We need to make sure it 21928 * is ready. 21929 */ 21930 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21931 return (rval); 21932 } 21933 mutex_enter(SD_MUTEX(un)); 21934 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 21935 mutex_exit(SD_MUTEX(un)); 21936 } 21937 if (rval) 21938 return (rval); 21939 21940 /* 21941 * Make a local copy of the soft state geometry to avoid some potential 21942 * race conditions associated with holding the mutex and updating the 21943 * write_reinstruct value 21944 */ 21945 tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21946 mutex_enter(SD_MUTEX(un)); 21947 bcopy(&un->un_g, tmp_geom, sizeof (struct dk_geom)); 21948 mutex_exit(SD_MUTEX(un)); 21949 21950 if (tmp_geom->dkg_write_reinstruct == 0) { 21951 tmp_geom->dkg_write_reinstruct = 21952 (int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm * 21953 sd_rot_delay) / (int)60000); 21954 } 21955 21956 rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom), 21957 flag); 21958 if (rval != 0) { 21959 rval = EFAULT; 21960 } 21961 21962 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21963 return (rval); 21964 21965 } 21966 21967 21968 /* 21969 * Function: sd_dkio_set_geometry 21970 * 21971 * Description: This routine is the driver entry point for handling user 21972 * requests to set the device geometry (DKIOCSGEOM). The actual 21973 * device geometry is not updated, just the driver "notion" of it. 21974 * 21975 * Arguments: dev - the device number 21976 * arg - pointer to user provided dk_geom structure used to set 21977 * the controller's notion of the current geometry. 21978 * flag - this argument is a pass through to ddi_copyxxx() 21979 * directly from the mode argument of ioctl(). 21980 * 21981 * Return Code: 0 21982 * EFAULT 21983 * ENXIO 21984 * EIO 21985 */ 21986 21987 static int 21988 sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag) 21989 { 21990 struct sd_lun *un = NULL; 21991 struct dk_geom *tmp_geom; 21992 struct dk_map *lp; 21993 int rval = 0; 21994 int i; 21995 21996 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21997 return (ENXIO); 21998 } 21999 22000 #if defined(__i386) || defined(__amd64) 22001 if (un->un_solaris_size == 0) { 22002 return (EIO); 22003 } 22004 #endif 22005 /* 22006 * We need to copy the user specified geometry into local 22007 * storage and then update the softstate. We don't want to hold 22008 * the mutex and copyin directly from the user to the soft state 22009 */ 22010 tmp_geom = (struct dk_geom *) 22011 kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 22012 rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag); 22013 if (rval != 0) { 22014 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22015 return (EFAULT); 22016 } 22017 22018 mutex_enter(SD_MUTEX(un)); 22019 bcopy(tmp_geom, &un->un_g, sizeof (struct dk_geom)); 22020 for (i = 0; i < NDKMAP; i++) { 22021 lp = &un->un_map[i]; 22022 un->un_offset[i] = 22023 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22024 #if defined(__i386) || defined(__amd64) 22025 un->un_offset[i] += un->un_solaris_offset; 22026 #endif 22027 } 22028 un->un_f_geometry_is_valid = FALSE; 22029 mutex_exit(SD_MUTEX(un)); 22030 kmem_free(tmp_geom, sizeof (struct dk_geom)); 22031 22032 return (rval); 22033 } 22034 22035 22036 /* 22037 * Function: sd_dkio_get_partition 22038 * 22039 * Description: This routine is the driver entry point for handling user 22040 * requests to get the partition table (DKIOCGAPART). 22041 * 22042 * Arguments: dev - the device number 22043 * arg - pointer to user provided dk_allmap structure specifying 22044 * the controller's notion of the current partition table. 22045 * flag - this argument is a pass through to ddi_copyxxx() 22046 * directly from the mode argument of ioctl(). 22047 * geom_validated - flag indicating if the device geometry has been 22048 * previously validated in the sdioctl routine. 22049 * 22050 * Return Code: 0 22051 * EFAULT 22052 * ENXIO 22053 * EIO 22054 */ 22055 22056 static int 22057 sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, int geom_validated) 22058 { 22059 struct sd_lun *un = NULL; 22060 int rval = 0; 22061 int size; 22062 22063 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22064 return (ENXIO); 22065 } 22066 22067 #if defined(__i386) || defined(__amd64) 22068 if (un->un_solaris_size == 0) { 22069 return (EIO); 22070 } 22071 #endif 22072 /* 22073 * Make sure the geometry is valid before getting the partition 22074 * information. 22075 */ 22076 mutex_enter(SD_MUTEX(un)); 22077 if (geom_validated == FALSE) { 22078 /* 22079 * sd_validate_geometry does not spin a disk up 22080 * if it was spun down. We need to make sure it 22081 * is ready before validating the geometry. 22082 */ 22083 mutex_exit(SD_MUTEX(un)); 22084 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22085 return (rval); 22086 } 22087 mutex_enter(SD_MUTEX(un)); 22088 22089 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22090 mutex_exit(SD_MUTEX(un)); 22091 return (rval); 22092 } 22093 } 22094 mutex_exit(SD_MUTEX(un)); 22095 22096 #ifdef _MULTI_DATAMODEL 22097 switch (ddi_model_convert_from(flag & FMODELS)) { 22098 case DDI_MODEL_ILP32: { 22099 struct dk_map32 dk_map32[NDKMAP]; 22100 int i; 22101 22102 for (i = 0; i < NDKMAP; i++) { 22103 dk_map32[i].dkl_cylno = un->un_map[i].dkl_cylno; 22104 dk_map32[i].dkl_nblk = un->un_map[i].dkl_nblk; 22105 } 22106 size = NDKMAP * sizeof (struct dk_map32); 22107 rval = ddi_copyout(dk_map32, (void *)arg, size, flag); 22108 if (rval != 0) { 22109 rval = EFAULT; 22110 } 22111 break; 22112 } 22113 case DDI_MODEL_NONE: 22114 size = NDKMAP * sizeof (struct dk_map); 22115 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22116 if (rval != 0) { 22117 rval = EFAULT; 22118 } 22119 break; 22120 } 22121 #else /* ! _MULTI_DATAMODEL */ 22122 size = NDKMAP * sizeof (struct dk_map); 22123 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22124 if (rval != 0) { 22125 rval = EFAULT; 22126 } 22127 #endif /* _MULTI_DATAMODEL */ 22128 return (rval); 22129 } 22130 22131 22132 /* 22133 * Function: sd_dkio_set_partition 22134 * 22135 * Description: This routine is the driver entry point for handling user 22136 * requests to set the partition table (DKIOCSAPART). The actual 22137 * device partition is not updated. 22138 * 22139 * Arguments: dev - the device number 22140 * arg - pointer to user provided dk_allmap structure used to set 22141 * the controller's notion of the partition table. 22142 * flag - this argument is a pass through to ddi_copyxxx() 22143 * directly from the mode argument of ioctl(). 22144 * 22145 * Return Code: 0 22146 * EINVAL 22147 * EFAULT 22148 * ENXIO 22149 * EIO 22150 */ 22151 22152 static int 22153 sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag) 22154 { 22155 struct sd_lun *un = NULL; 22156 struct dk_map dk_map[NDKMAP]; 22157 struct dk_map *lp; 22158 int rval = 0; 22159 int size; 22160 int i; 22161 #if defined(_SUNOS_VTOC_16) 22162 struct dkl_partition *vp; 22163 #endif 22164 22165 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22166 return (ENXIO); 22167 } 22168 22169 /* 22170 * Set the map for all logical partitions. We lock 22171 * the priority just to make sure an interrupt doesn't 22172 * come in while the map is half updated. 22173 */ 22174 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_solaris_size)) 22175 mutex_enter(SD_MUTEX(un)); 22176 if (un->un_blockcount > DK_MAX_BLOCKS) { 22177 mutex_exit(SD_MUTEX(un)); 22178 return (ENOTSUP); 22179 } 22180 mutex_exit(SD_MUTEX(un)); 22181 if (un->un_solaris_size == 0) { 22182 return (EIO); 22183 } 22184 22185 #ifdef _MULTI_DATAMODEL 22186 switch (ddi_model_convert_from(flag & FMODELS)) { 22187 case DDI_MODEL_ILP32: { 22188 struct dk_map32 dk_map32[NDKMAP]; 22189 22190 size = NDKMAP * sizeof (struct dk_map32); 22191 rval = ddi_copyin((void *)arg, dk_map32, size, flag); 22192 if (rval != 0) { 22193 return (EFAULT); 22194 } 22195 for (i = 0; i < NDKMAP; i++) { 22196 dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno; 22197 dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk; 22198 } 22199 break; 22200 } 22201 case DDI_MODEL_NONE: 22202 size = NDKMAP * sizeof (struct dk_map); 22203 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22204 if (rval != 0) { 22205 return (EFAULT); 22206 } 22207 break; 22208 } 22209 #else /* ! _MULTI_DATAMODEL */ 22210 size = NDKMAP * sizeof (struct dk_map); 22211 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22212 if (rval != 0) { 22213 return (EFAULT); 22214 } 22215 #endif /* _MULTI_DATAMODEL */ 22216 22217 mutex_enter(SD_MUTEX(un)); 22218 /* Note: The size used in this bcopy is set based upon the data model */ 22219 bcopy(dk_map, un->un_map, size); 22220 #if defined(_SUNOS_VTOC_16) 22221 vp = (struct dkl_partition *)&(un->un_vtoc); 22222 #endif /* defined(_SUNOS_VTOC_16) */ 22223 for (i = 0; i < NDKMAP; i++) { 22224 lp = &un->un_map[i]; 22225 un->un_offset[i] = 22226 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22227 #if defined(_SUNOS_VTOC_16) 22228 vp->p_start = un->un_offset[i]; 22229 vp->p_size = lp->dkl_nblk; 22230 vp++; 22231 #endif /* defined(_SUNOS_VTOC_16) */ 22232 #if defined(__i386) || defined(__amd64) 22233 un->un_offset[i] += un->un_solaris_offset; 22234 #endif 22235 } 22236 mutex_exit(SD_MUTEX(un)); 22237 return (rval); 22238 } 22239 22240 22241 /* 22242 * Function: sd_dkio_get_vtoc 22243 * 22244 * Description: This routine is the driver entry point for handling user 22245 * requests to get the current volume table of contents 22246 * (DKIOCGVTOC). 22247 * 22248 * Arguments: dev - the device number 22249 * arg - pointer to user provided vtoc structure specifying 22250 * the current vtoc. 22251 * flag - this argument is a pass through to ddi_copyxxx() 22252 * directly from the mode argument of ioctl(). 22253 * geom_validated - flag indicating if the device geometry has been 22254 * previously validated in the sdioctl routine. 22255 * 22256 * Return Code: 0 22257 * EFAULT 22258 * ENXIO 22259 * EIO 22260 */ 22261 22262 static int 22263 sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, int geom_validated) 22264 { 22265 struct sd_lun *un = NULL; 22266 #if defined(_SUNOS_VTOC_8) 22267 struct vtoc user_vtoc; 22268 #endif /* defined(_SUNOS_VTOC_8) */ 22269 int rval = 0; 22270 22271 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22272 return (ENXIO); 22273 } 22274 22275 mutex_enter(SD_MUTEX(un)); 22276 if (geom_validated == FALSE) { 22277 /* 22278 * sd_validate_geometry does not spin a disk up 22279 * if it was spun down. We need to make sure it 22280 * is ready. 22281 */ 22282 mutex_exit(SD_MUTEX(un)); 22283 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22284 return (rval); 22285 } 22286 mutex_enter(SD_MUTEX(un)); 22287 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22288 mutex_exit(SD_MUTEX(un)); 22289 return (rval); 22290 } 22291 } 22292 22293 #if defined(_SUNOS_VTOC_8) 22294 sd_build_user_vtoc(un, &user_vtoc); 22295 mutex_exit(SD_MUTEX(un)); 22296 22297 #ifdef _MULTI_DATAMODEL 22298 switch (ddi_model_convert_from(flag & FMODELS)) { 22299 case DDI_MODEL_ILP32: { 22300 struct vtoc32 user_vtoc32; 22301 22302 vtoctovtoc32(user_vtoc, user_vtoc32); 22303 if (ddi_copyout(&user_vtoc32, (void *)arg, 22304 sizeof (struct vtoc32), flag)) { 22305 return (EFAULT); 22306 } 22307 break; 22308 } 22309 22310 case DDI_MODEL_NONE: 22311 if (ddi_copyout(&user_vtoc, (void *)arg, 22312 sizeof (struct vtoc), flag)) { 22313 return (EFAULT); 22314 } 22315 break; 22316 } 22317 #else /* ! _MULTI_DATAMODEL */ 22318 if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) { 22319 return (EFAULT); 22320 } 22321 #endif /* _MULTI_DATAMODEL */ 22322 22323 #elif defined(_SUNOS_VTOC_16) 22324 mutex_exit(SD_MUTEX(un)); 22325 22326 #ifdef _MULTI_DATAMODEL 22327 /* 22328 * The un_vtoc structure is a "struct dk_vtoc" which is always 22329 * 32-bit to maintain compatibility with existing on-disk 22330 * structures. Thus, we need to convert the structure when copying 22331 * it out to a datamodel-dependent "struct vtoc" in a 64-bit 22332 * program. If the target is a 32-bit program, then no conversion 22333 * is necessary. 22334 */ 22335 /* LINTED: logical expression always true: op "||" */ 22336 ASSERT(sizeof (un->un_vtoc) == sizeof (struct vtoc32)); 22337 switch (ddi_model_convert_from(flag & FMODELS)) { 22338 case DDI_MODEL_ILP32: 22339 if (ddi_copyout(&(un->un_vtoc), (void *)arg, 22340 sizeof (un->un_vtoc), flag)) { 22341 return (EFAULT); 22342 } 22343 break; 22344 22345 case DDI_MODEL_NONE: { 22346 struct vtoc user_vtoc; 22347 22348 vtoc32tovtoc(un->un_vtoc, user_vtoc); 22349 if (ddi_copyout(&user_vtoc, (void *)arg, 22350 sizeof (struct vtoc), flag)) { 22351 return (EFAULT); 22352 } 22353 break; 22354 } 22355 } 22356 #else /* ! _MULTI_DATAMODEL */ 22357 if (ddi_copyout(&(un->un_vtoc), (void *)arg, sizeof (un->un_vtoc), 22358 flag)) { 22359 return (EFAULT); 22360 } 22361 #endif /* _MULTI_DATAMODEL */ 22362 #else 22363 #error "No VTOC format defined." 22364 #endif 22365 22366 return (rval); 22367 } 22368 22369 static int 22370 sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag) 22371 { 22372 struct sd_lun *un = NULL; 22373 dk_efi_t user_efi; 22374 int rval = 0; 22375 void *buffer; 22376 22377 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22378 return (ENXIO); 22379 22380 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22381 return (EFAULT); 22382 22383 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22384 22385 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22386 (user_efi.dki_length > un->un_max_xfer_size)) 22387 return (EINVAL); 22388 22389 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22390 rval = sd_send_scsi_READ(un, buffer, user_efi.dki_length, 22391 user_efi.dki_lba, SD_PATH_DIRECT); 22392 if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data, 22393 user_efi.dki_length, flag) != 0) 22394 rval = EFAULT; 22395 22396 kmem_free(buffer, user_efi.dki_length); 22397 return (rval); 22398 } 22399 22400 /* 22401 * Function: sd_build_user_vtoc 22402 * 22403 * Description: This routine populates a pass by reference variable with the 22404 * current volume table of contents. 22405 * 22406 * Arguments: un - driver soft state (unit) structure 22407 * user_vtoc - pointer to vtoc structure to be populated 22408 */ 22409 22410 static void 22411 sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22412 { 22413 struct dk_map2 *lpart; 22414 struct dk_map *lmap; 22415 struct partition *vpart; 22416 int nblks; 22417 int i; 22418 22419 ASSERT(mutex_owned(SD_MUTEX(un))); 22420 22421 /* 22422 * Return vtoc structure fields in the provided VTOC area, addressed 22423 * by *vtoc. 22424 */ 22425 bzero(user_vtoc, sizeof (struct vtoc)); 22426 user_vtoc->v_bootinfo[0] = un->un_vtoc.v_bootinfo[0]; 22427 user_vtoc->v_bootinfo[1] = un->un_vtoc.v_bootinfo[1]; 22428 user_vtoc->v_bootinfo[2] = un->un_vtoc.v_bootinfo[2]; 22429 user_vtoc->v_sanity = VTOC_SANE; 22430 user_vtoc->v_version = un->un_vtoc.v_version; 22431 bcopy(un->un_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL); 22432 user_vtoc->v_sectorsz = un->un_sys_blocksize; 22433 user_vtoc->v_nparts = un->un_vtoc.v_nparts; 22434 bcopy(un->un_vtoc.v_reserved, user_vtoc->v_reserved, 22435 sizeof (un->un_vtoc.v_reserved)); 22436 /* 22437 * Convert partitioning information. 22438 * 22439 * Note the conversion from starting cylinder number 22440 * to starting sector number. 22441 */ 22442 lmap = un->un_map; 22443 lpart = (struct dk_map2 *)un->un_vtoc.v_part; 22444 vpart = user_vtoc->v_part; 22445 22446 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22447 22448 for (i = 0; i < V_NUMPAR; i++) { 22449 vpart->p_tag = lpart->p_tag; 22450 vpart->p_flag = lpart->p_flag; 22451 vpart->p_start = lmap->dkl_cylno * nblks; 22452 vpart->p_size = lmap->dkl_nblk; 22453 lmap++; 22454 lpart++; 22455 vpart++; 22456 22457 /* (4364927) */ 22458 user_vtoc->timestamp[i] = (time_t)un->un_vtoc.v_timestamp[i]; 22459 } 22460 22461 bcopy(un->un_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII); 22462 } 22463 22464 static int 22465 sd_dkio_partition(dev_t dev, caddr_t arg, int flag) 22466 { 22467 struct sd_lun *un = NULL; 22468 struct partition64 p64; 22469 int rval = 0; 22470 uint_t nparts; 22471 efi_gpe_t *partitions; 22472 efi_gpt_t *buffer; 22473 diskaddr_t gpe_lba; 22474 22475 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22476 return (ENXIO); 22477 } 22478 22479 if (ddi_copyin((const void *)arg, &p64, 22480 sizeof (struct partition64), flag)) { 22481 return (EFAULT); 22482 } 22483 22484 buffer = kmem_alloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 22485 rval = sd_send_scsi_READ(un, buffer, DEV_BSIZE, 22486 1, SD_PATH_DIRECT); 22487 if (rval != 0) 22488 goto done_error; 22489 22490 sd_swap_efi_gpt(buffer); 22491 22492 if ((rval = sd_validate_efi(buffer)) != 0) 22493 goto done_error; 22494 22495 nparts = buffer->efi_gpt_NumberOfPartitionEntries; 22496 gpe_lba = buffer->efi_gpt_PartitionEntryLBA; 22497 if (p64.p_partno > nparts) { 22498 /* couldn't find it */ 22499 rval = ESRCH; 22500 goto done_error; 22501 } 22502 /* 22503 * if we're dealing with a partition that's out of the normal 22504 * 16K block, adjust accordingly 22505 */ 22506 gpe_lba += p64.p_partno / sizeof (efi_gpe_t); 22507 rval = sd_send_scsi_READ(un, buffer, EFI_MIN_ARRAY_SIZE, 22508 gpe_lba, SD_PATH_DIRECT); 22509 if (rval) { 22510 goto done_error; 22511 } 22512 partitions = (efi_gpe_t *)buffer; 22513 22514 sd_swap_efi_gpe(nparts, partitions); 22515 22516 partitions += p64.p_partno; 22517 bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type, 22518 sizeof (struct uuid)); 22519 p64.p_start = partitions->efi_gpe_StartingLBA; 22520 p64.p_size = partitions->efi_gpe_EndingLBA - 22521 p64.p_start + 1; 22522 22523 if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag)) 22524 rval = EFAULT; 22525 22526 done_error: 22527 kmem_free(buffer, EFI_MIN_ARRAY_SIZE); 22528 return (rval); 22529 } 22530 22531 22532 /* 22533 * Function: sd_dkio_set_vtoc 22534 * 22535 * Description: This routine is the driver entry point for handling user 22536 * requests to set the current volume table of contents 22537 * (DKIOCSVTOC). 22538 * 22539 * Arguments: dev - the device number 22540 * arg - pointer to user provided vtoc structure used to set the 22541 * current vtoc. 22542 * flag - this argument is a pass through to ddi_copyxxx() 22543 * directly from the mode argument of ioctl(). 22544 * 22545 * Return Code: 0 22546 * EFAULT 22547 * ENXIO 22548 * EINVAL 22549 * ENOTSUP 22550 */ 22551 22552 static int 22553 sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag) 22554 { 22555 struct sd_lun *un = NULL; 22556 struct vtoc user_vtoc; 22557 int rval = 0; 22558 22559 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22560 return (ENXIO); 22561 } 22562 22563 #if defined(__i386) || defined(__amd64) 22564 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 22565 return (EINVAL); 22566 } 22567 #endif 22568 22569 #ifdef _MULTI_DATAMODEL 22570 switch (ddi_model_convert_from(flag & FMODELS)) { 22571 case DDI_MODEL_ILP32: { 22572 struct vtoc32 user_vtoc32; 22573 22574 if (ddi_copyin((const void *)arg, &user_vtoc32, 22575 sizeof (struct vtoc32), flag)) { 22576 return (EFAULT); 22577 } 22578 vtoc32tovtoc(user_vtoc32, user_vtoc); 22579 break; 22580 } 22581 22582 case DDI_MODEL_NONE: 22583 if (ddi_copyin((const void *)arg, &user_vtoc, 22584 sizeof (struct vtoc), flag)) { 22585 return (EFAULT); 22586 } 22587 break; 22588 } 22589 #else /* ! _MULTI_DATAMODEL */ 22590 if (ddi_copyin((const void *)arg, &user_vtoc, 22591 sizeof (struct vtoc), flag)) { 22592 return (EFAULT); 22593 } 22594 #endif /* _MULTI_DATAMODEL */ 22595 22596 mutex_enter(SD_MUTEX(un)); 22597 if (un->un_blockcount > DK_MAX_BLOCKS) { 22598 mutex_exit(SD_MUTEX(un)); 22599 return (ENOTSUP); 22600 } 22601 if (un->un_g.dkg_ncyl == 0) { 22602 mutex_exit(SD_MUTEX(un)); 22603 return (EINVAL); 22604 } 22605 22606 mutex_exit(SD_MUTEX(un)); 22607 sd_clear_efi(un); 22608 ddi_remove_minor_node(SD_DEVINFO(un), "wd"); 22609 ddi_remove_minor_node(SD_DEVINFO(un), "wd,raw"); 22610 (void) ddi_create_minor_node(SD_DEVINFO(un), "h", 22611 S_IFBLK, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22612 un->un_node_type, NULL); 22613 (void) ddi_create_minor_node(SD_DEVINFO(un), "h,raw", 22614 S_IFCHR, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22615 un->un_node_type, NULL); 22616 mutex_enter(SD_MUTEX(un)); 22617 22618 if ((rval = sd_build_label_vtoc(un, &user_vtoc)) == 0) { 22619 if ((rval = sd_write_label(dev)) == 0) { 22620 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) 22621 != 0) { 22622 SD_ERROR(SD_LOG_IOCTL_DKIO, un, 22623 "sd_dkio_set_vtoc: " 22624 "Failed validate geometry\n"); 22625 } 22626 } 22627 } 22628 22629 /* 22630 * If sd_build_label_vtoc, or sd_write_label failed above write the 22631 * devid anyway, what can it hurt? Also preserve the device id by 22632 * writing to the disk acyl for the case where a devid has been 22633 * fabricated. 22634 */ 22635 if (!ISREMOVABLE(un) && !ISCD(un) && 22636 (un->un_f_opt_fab_devid == TRUE)) { 22637 if (un->un_devid == NULL) { 22638 sd_register_devid(un, SD_DEVINFO(un), 22639 SD_TARGET_IS_UNRESERVED); 22640 } else { 22641 /* 22642 * The device id for this disk has been 22643 * fabricated. Fabricated device id's are 22644 * managed by storing them in the last 2 22645 * available sectors on the drive. The device 22646 * id must be preserved by writing it back out 22647 * to this location. 22648 */ 22649 if (sd_write_deviceid(un) != 0) { 22650 ddi_devid_free(un->un_devid); 22651 un->un_devid = NULL; 22652 } 22653 } 22654 } 22655 mutex_exit(SD_MUTEX(un)); 22656 return (rval); 22657 } 22658 22659 22660 /* 22661 * Function: sd_build_label_vtoc 22662 * 22663 * Description: This routine updates the driver soft state current volume table 22664 * of contents based on a user specified vtoc. 22665 * 22666 * Arguments: un - driver soft state (unit) structure 22667 * user_vtoc - pointer to vtoc structure specifying vtoc to be used 22668 * to update the driver soft state. 22669 * 22670 * Return Code: 0 22671 * EINVAL 22672 */ 22673 22674 static int 22675 sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22676 { 22677 struct dk_map *lmap; 22678 struct partition *vpart; 22679 int nblks; 22680 #if defined(_SUNOS_VTOC_8) 22681 int ncyl; 22682 struct dk_map2 *lpart; 22683 #endif /* defined(_SUNOS_VTOC_8) */ 22684 int i; 22685 22686 ASSERT(mutex_owned(SD_MUTEX(un))); 22687 22688 /* Sanity-check the vtoc */ 22689 if (user_vtoc->v_sanity != VTOC_SANE || 22690 user_vtoc->v_sectorsz != un->un_sys_blocksize || 22691 user_vtoc->v_nparts != V_NUMPAR) { 22692 return (EINVAL); 22693 } 22694 22695 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22696 if (nblks == 0) { 22697 return (EINVAL); 22698 } 22699 22700 #if defined(_SUNOS_VTOC_8) 22701 vpart = user_vtoc->v_part; 22702 for (i = 0; i < V_NUMPAR; i++) { 22703 if ((vpart->p_start % nblks) != 0) { 22704 return (EINVAL); 22705 } 22706 ncyl = vpart->p_start / nblks; 22707 ncyl += vpart->p_size / nblks; 22708 if ((vpart->p_size % nblks) != 0) { 22709 ncyl++; 22710 } 22711 if (ncyl > (int)un->un_g.dkg_ncyl) { 22712 return (EINVAL); 22713 } 22714 vpart++; 22715 } 22716 #endif /* defined(_SUNOS_VTOC_8) */ 22717 22718 /* Put appropriate vtoc structure fields into the disk label */ 22719 #if defined(_SUNOS_VTOC_16) 22720 /* 22721 * The vtoc is always a 32bit data structure to maintain the 22722 * on-disk format. Convert "in place" instead of bcopying it. 22723 */ 22724 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(un->un_vtoc)))); 22725 22726 /* 22727 * in the 16-slice vtoc, starting sectors are expressed in 22728 * numbers *relative* to the start of the Solaris fdisk partition. 22729 */ 22730 lmap = un->un_map; 22731 vpart = user_vtoc->v_part; 22732 22733 for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) { 22734 lmap->dkl_cylno = vpart->p_start / nblks; 22735 lmap->dkl_nblk = vpart->p_size; 22736 } 22737 22738 #elif defined(_SUNOS_VTOC_8) 22739 22740 un->un_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0]; 22741 un->un_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1]; 22742 un->un_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2]; 22743 22744 un->un_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity; 22745 un->un_vtoc.v_version = (uint32_t)user_vtoc->v_version; 22746 22747 bcopy(user_vtoc->v_volume, un->un_vtoc.v_volume, LEN_DKL_VVOL); 22748 22749 un->un_vtoc.v_nparts = user_vtoc->v_nparts; 22750 22751 bcopy(user_vtoc->v_reserved, un->un_vtoc.v_reserved, 22752 sizeof (un->un_vtoc.v_reserved)); 22753 22754 /* 22755 * Note the conversion from starting sector number 22756 * to starting cylinder number. 22757 * Return error if division results in a remainder. 22758 */ 22759 lmap = un->un_map; 22760 lpart = un->un_vtoc.v_part; 22761 vpart = user_vtoc->v_part; 22762 22763 for (i = 0; i < (int)user_vtoc->v_nparts; i++) { 22764 lpart->p_tag = vpart->p_tag; 22765 lpart->p_flag = vpart->p_flag; 22766 lmap->dkl_cylno = vpart->p_start / nblks; 22767 lmap->dkl_nblk = vpart->p_size; 22768 22769 lmap++; 22770 lpart++; 22771 vpart++; 22772 22773 /* (4387723) */ 22774 #ifdef _LP64 22775 if (user_vtoc->timestamp[i] > TIME32_MAX) { 22776 un->un_vtoc.v_timestamp[i] = TIME32_MAX; 22777 } else { 22778 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22779 } 22780 #else 22781 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22782 #endif 22783 } 22784 22785 bcopy(user_vtoc->v_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 22786 #else 22787 #error "No VTOC format defined." 22788 #endif 22789 return (0); 22790 } 22791 22792 /* 22793 * Function: sd_clear_efi 22794 * 22795 * Description: This routine clears all EFI labels. 22796 * 22797 * Arguments: un - driver soft state (unit) structure 22798 * 22799 * Return Code: void 22800 */ 22801 22802 static void 22803 sd_clear_efi(struct sd_lun *un) 22804 { 22805 efi_gpt_t *gpt; 22806 uint_t lbasize; 22807 uint64_t cap; 22808 int rval; 22809 22810 ASSERT(!mutex_owned(SD_MUTEX(un))); 22811 22812 gpt = kmem_alloc(sizeof (efi_gpt_t), KM_SLEEP); 22813 22814 if (sd_send_scsi_READ(un, gpt, DEV_BSIZE, 1, SD_PATH_DIRECT) != 0) { 22815 goto done; 22816 } 22817 22818 sd_swap_efi_gpt(gpt); 22819 rval = sd_validate_efi(gpt); 22820 if (rval == 0) { 22821 /* clear primary */ 22822 bzero(gpt, sizeof (efi_gpt_t)); 22823 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 1, 22824 SD_PATH_DIRECT))) { 22825 SD_INFO(SD_LOG_IO_PARTITION, un, 22826 "sd_clear_efi: clear primary label failed\n"); 22827 } 22828 } 22829 /* the backup */ 22830 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 22831 SD_PATH_DIRECT); 22832 if (rval) { 22833 goto done; 22834 } 22835 if ((rval = sd_send_scsi_READ(un, gpt, lbasize, 22836 cap - 1, SD_PATH_DIRECT)) != 0) { 22837 goto done; 22838 } 22839 sd_swap_efi_gpt(gpt); 22840 rval = sd_validate_efi(gpt); 22841 if (rval == 0) { 22842 /* clear backup */ 22843 SD_TRACE(SD_LOG_IOCTL, un, "sd_clear_efi clear backup@%lu\n", 22844 cap-1); 22845 bzero(gpt, sizeof (efi_gpt_t)); 22846 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 22847 cap-1, SD_PATH_DIRECT))) { 22848 SD_INFO(SD_LOG_IO_PARTITION, un, 22849 "sd_clear_efi: clear backup label failed\n"); 22850 } 22851 } 22852 22853 done: 22854 kmem_free(gpt, sizeof (efi_gpt_t)); 22855 } 22856 22857 /* 22858 * Function: sd_set_vtoc 22859 * 22860 * Description: This routine writes data to the appropriate positions 22861 * 22862 * Arguments: un - driver soft state (unit) structure 22863 * dkl - the data to be written 22864 * 22865 * Return: void 22866 */ 22867 22868 static int 22869 sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl) 22870 { 22871 void *shadow_buf; 22872 uint_t label_addr; 22873 int sec; 22874 int blk; 22875 int head; 22876 int cyl; 22877 int rval; 22878 22879 #if defined(__i386) || defined(__amd64) 22880 label_addr = un->un_solaris_offset + DK_LABEL_LOC; 22881 #else 22882 /* Write the primary label at block 0 of the solaris partition. */ 22883 label_addr = 0; 22884 #endif 22885 22886 if (NOT_DEVBSIZE(un)) { 22887 shadow_buf = kmem_zalloc(un->un_tgt_blocksize, KM_SLEEP); 22888 /* 22889 * Read the target's first block. 22890 */ 22891 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22892 un->un_tgt_blocksize, label_addr, 22893 SD_PATH_STANDARD)) != 0) { 22894 goto exit; 22895 } 22896 /* 22897 * Copy the contents of the label into the shadow buffer 22898 * which is of the size of target block size. 22899 */ 22900 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22901 } 22902 22903 /* Write the primary label */ 22904 if (NOT_DEVBSIZE(un)) { 22905 rval = sd_send_scsi_WRITE(un, shadow_buf, un->un_tgt_blocksize, 22906 label_addr, SD_PATH_STANDARD); 22907 } else { 22908 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22909 label_addr, SD_PATH_STANDARD); 22910 } 22911 if (rval != 0) { 22912 return (rval); 22913 } 22914 22915 /* 22916 * Calculate where the backup labels go. They are always on 22917 * the last alternate cylinder, but some older drives put them 22918 * on head 2 instead of the last head. They are always on the 22919 * first 5 odd sectors of the appropriate track. 22920 * 22921 * We have no choice at this point, but to believe that the 22922 * disk label is valid. Use the geometry of the disk 22923 * as described in the label. 22924 */ 22925 cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1; 22926 head = dkl->dkl_nhead - 1; 22927 22928 /* 22929 * Write and verify the backup labels. Make sure we don't try to 22930 * write past the last cylinder. 22931 */ 22932 for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) { 22933 blk = (daddr_t)( 22934 (cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) + 22935 (head * dkl->dkl_nsect) + sec); 22936 #if defined(__i386) || defined(__amd64) 22937 blk += un->un_solaris_offset; 22938 #endif 22939 if (NOT_DEVBSIZE(un)) { 22940 uint64_t tblk; 22941 /* 22942 * Need to read the block first for read modify write. 22943 */ 22944 tblk = (uint64_t)blk; 22945 blk = (int)((tblk * un->un_sys_blocksize) / 22946 un->un_tgt_blocksize); 22947 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22948 un->un_tgt_blocksize, blk, 22949 SD_PATH_STANDARD)) != 0) { 22950 goto exit; 22951 } 22952 /* 22953 * Modify the shadow buffer with the label. 22954 */ 22955 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22956 rval = sd_send_scsi_WRITE(un, shadow_buf, 22957 un->un_tgt_blocksize, blk, SD_PATH_STANDARD); 22958 } else { 22959 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22960 blk, SD_PATH_STANDARD); 22961 SD_INFO(SD_LOG_IO_PARTITION, un, 22962 "sd_set_vtoc: wrote backup label %d\n", blk); 22963 } 22964 if (rval != 0) { 22965 goto exit; 22966 } 22967 } 22968 exit: 22969 if (NOT_DEVBSIZE(un)) { 22970 kmem_free(shadow_buf, un->un_tgt_blocksize); 22971 } 22972 return (rval); 22973 } 22974 22975 /* 22976 * Function: sd_clear_vtoc 22977 * 22978 * Description: This routine clears out the VTOC labels. 22979 * 22980 * Arguments: un - driver soft state (unit) structure 22981 * 22982 * Return: void 22983 */ 22984 22985 static void 22986 sd_clear_vtoc(struct sd_lun *un) 22987 { 22988 struct dk_label *dkl; 22989 22990 mutex_exit(SD_MUTEX(un)); 22991 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22992 mutex_enter(SD_MUTEX(un)); 22993 /* 22994 * sd_set_vtoc uses these fields in order to figure out 22995 * where to overwrite the backup labels 22996 */ 22997 dkl->dkl_apc = un->un_g.dkg_apc; 22998 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22999 dkl->dkl_acyl = un->un_g.dkg_acyl; 23000 dkl->dkl_nhead = un->un_g.dkg_nhead; 23001 dkl->dkl_nsect = un->un_g.dkg_nsect; 23002 mutex_exit(SD_MUTEX(un)); 23003 (void) sd_set_vtoc(un, dkl); 23004 kmem_free(dkl, sizeof (struct dk_label)); 23005 23006 mutex_enter(SD_MUTEX(un)); 23007 } 23008 23009 /* 23010 * Function: sd_write_label 23011 * 23012 * Description: This routine will validate and write the driver soft state vtoc 23013 * contents to the device. 23014 * 23015 * Arguments: dev - the device number 23016 * 23017 * Return Code: the code returned by sd_send_scsi_cmd() 23018 * 0 23019 * EINVAL 23020 * ENXIO 23021 * ENOMEM 23022 */ 23023 23024 static int 23025 sd_write_label(dev_t dev) 23026 { 23027 struct sd_lun *un; 23028 struct dk_label *dkl; 23029 short sum; 23030 short *sp; 23031 int i; 23032 int rval; 23033 23034 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23035 (un->un_state == SD_STATE_OFFLINE)) { 23036 return (ENXIO); 23037 } 23038 ASSERT(mutex_owned(SD_MUTEX(un))); 23039 mutex_exit(SD_MUTEX(un)); 23040 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 23041 mutex_enter(SD_MUTEX(un)); 23042 23043 bcopy(&un->un_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc)); 23044 dkl->dkl_rpm = un->un_g.dkg_rpm; 23045 dkl->dkl_pcyl = un->un_g.dkg_pcyl; 23046 dkl->dkl_apc = un->un_g.dkg_apc; 23047 dkl->dkl_intrlv = un->un_g.dkg_intrlv; 23048 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 23049 dkl->dkl_acyl = un->un_g.dkg_acyl; 23050 dkl->dkl_nhead = un->un_g.dkg_nhead; 23051 dkl->dkl_nsect = un->un_g.dkg_nsect; 23052 23053 #if defined(_SUNOS_VTOC_8) 23054 dkl->dkl_obs1 = un->un_g.dkg_obs1; 23055 dkl->dkl_obs2 = un->un_g.dkg_obs2; 23056 dkl->dkl_obs3 = un->un_g.dkg_obs3; 23057 for (i = 0; i < NDKMAP; i++) { 23058 dkl->dkl_map[i].dkl_cylno = un->un_map[i].dkl_cylno; 23059 dkl->dkl_map[i].dkl_nblk = un->un_map[i].dkl_nblk; 23060 } 23061 bcopy(un->un_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII); 23062 #elif defined(_SUNOS_VTOC_16) 23063 dkl->dkl_skew = un->un_dkg_skew; 23064 #else 23065 #error "No VTOC format defined." 23066 #endif 23067 23068 dkl->dkl_magic = DKL_MAGIC; 23069 dkl->dkl_write_reinstruct = un->un_g.dkg_write_reinstruct; 23070 dkl->dkl_read_reinstruct = un->un_g.dkg_read_reinstruct; 23071 23072 /* Construct checksum for the new disk label */ 23073 sum = 0; 23074 sp = (short *)dkl; 23075 i = sizeof (struct dk_label) / sizeof (short); 23076 while (i--) { 23077 sum ^= *sp++; 23078 } 23079 dkl->dkl_cksum = sum; 23080 23081 mutex_exit(SD_MUTEX(un)); 23082 23083 rval = sd_set_vtoc(un, dkl); 23084 exit: 23085 kmem_free(dkl, sizeof (struct dk_label)); 23086 mutex_enter(SD_MUTEX(un)); 23087 return (rval); 23088 } 23089 23090 static int 23091 sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag) 23092 { 23093 struct sd_lun *un = NULL; 23094 dk_efi_t user_efi; 23095 int rval = 0; 23096 void *buffer; 23097 23098 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 23099 return (ENXIO); 23100 23101 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 23102 return (EFAULT); 23103 23104 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 23105 23106 if ((user_efi.dki_length % un->un_tgt_blocksize) || 23107 (user_efi.dki_length > un->un_max_xfer_size)) 23108 return (EINVAL); 23109 23110 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 23111 if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) { 23112 rval = EFAULT; 23113 } else { 23114 /* 23115 * let's clear the vtoc labels and clear the softstate 23116 * vtoc. 23117 */ 23118 mutex_enter(SD_MUTEX(un)); 23119 if (un->un_vtoc.v_sanity == VTOC_SANE) { 23120 SD_TRACE(SD_LOG_IO_PARTITION, un, 23121 "sd_dkio_set_efi: CLEAR VTOC\n"); 23122 sd_clear_vtoc(un); 23123 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23124 mutex_exit(SD_MUTEX(un)); 23125 ddi_remove_minor_node(SD_DEVINFO(un), "h"); 23126 ddi_remove_minor_node(SD_DEVINFO(un), "h,raw"); 23127 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd", 23128 S_IFBLK, 23129 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23130 un->un_node_type, NULL); 23131 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd,raw", 23132 S_IFCHR, 23133 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23134 un->un_node_type, NULL); 23135 } else 23136 mutex_exit(SD_MUTEX(un)); 23137 rval = sd_send_scsi_WRITE(un, buffer, user_efi.dki_length, 23138 user_efi.dki_lba, SD_PATH_DIRECT); 23139 if (rval == 0) { 23140 mutex_enter(SD_MUTEX(un)); 23141 un->un_f_geometry_is_valid = FALSE; 23142 mutex_exit(SD_MUTEX(un)); 23143 } 23144 } 23145 kmem_free(buffer, user_efi.dki_length); 23146 return (rval); 23147 } 23148 23149 /* 23150 * Function: sd_dkio_get_mboot 23151 * 23152 * Description: This routine is the driver entry point for handling user 23153 * requests to get the current device mboot (DKIOCGMBOOT) 23154 * 23155 * Arguments: dev - the device number 23156 * arg - pointer to user provided mboot structure specifying 23157 * the current mboot. 23158 * flag - this argument is a pass through to ddi_copyxxx() 23159 * directly from the mode argument of ioctl(). 23160 * 23161 * Return Code: 0 23162 * EINVAL 23163 * EFAULT 23164 * ENXIO 23165 */ 23166 23167 static int 23168 sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag) 23169 { 23170 struct sd_lun *un; 23171 struct mboot *mboot; 23172 int rval; 23173 size_t buffer_size; 23174 23175 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23176 (un->un_state == SD_STATE_OFFLINE)) { 23177 return (ENXIO); 23178 } 23179 23180 #if defined(_SUNOS_VTOC_8) 23181 if ((!ISREMOVABLE(un)) || (arg == NULL)) { 23182 #elif defined(_SUNOS_VTOC_16) 23183 if (arg == NULL) { 23184 #endif 23185 return (EINVAL); 23186 } 23187 23188 /* 23189 * Read the mboot block, located at absolute block 0 on the target. 23190 */ 23191 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct mboot)); 23192 23193 SD_TRACE(SD_LOG_IO_PARTITION, un, 23194 "sd_dkio_get_mboot: allocation size: 0x%x\n", buffer_size); 23195 23196 mboot = kmem_zalloc(buffer_size, KM_SLEEP); 23197 if ((rval = sd_send_scsi_READ(un, mboot, buffer_size, 0, 23198 SD_PATH_STANDARD)) == 0) { 23199 if (ddi_copyout(mboot, (void *)arg, 23200 sizeof (struct mboot), flag) != 0) { 23201 rval = EFAULT; 23202 } 23203 } 23204 kmem_free(mboot, buffer_size); 23205 return (rval); 23206 } 23207 23208 23209 /* 23210 * Function: sd_dkio_set_mboot 23211 * 23212 * Description: This routine is the driver entry point for handling user 23213 * requests to validate and set the device master boot 23214 * (DKIOCSMBOOT). 23215 * 23216 * Arguments: dev - the device number 23217 * arg - pointer to user provided mboot structure used to set the 23218 * master boot. 23219 * flag - this argument is a pass through to ddi_copyxxx() 23220 * directly from the mode argument of ioctl(). 23221 * 23222 * Return Code: 0 23223 * EINVAL 23224 * EFAULT 23225 * ENXIO 23226 */ 23227 23228 static int 23229 sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag) 23230 { 23231 struct sd_lun *un = NULL; 23232 struct mboot *mboot = NULL; 23233 int rval; 23234 ushort_t magic; 23235 23236 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23237 return (ENXIO); 23238 } 23239 23240 ASSERT(!mutex_owned(SD_MUTEX(un))); 23241 23242 #if defined(_SUNOS_VTOC_8) 23243 if (!ISREMOVABLE(un)) { 23244 return (EINVAL); 23245 } 23246 #endif 23247 23248 if (arg == NULL) { 23249 return (EINVAL); 23250 } 23251 23252 mboot = kmem_zalloc(sizeof (struct mboot), KM_SLEEP); 23253 23254 if (ddi_copyin((const void *)arg, mboot, 23255 sizeof (struct mboot), flag) != 0) { 23256 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23257 return (EFAULT); 23258 } 23259 23260 /* Is this really a master boot record? */ 23261 magic = LE_16(mboot->signature); 23262 if (magic != MBB_MAGIC) { 23263 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23264 return (EINVAL); 23265 } 23266 23267 rval = sd_send_scsi_WRITE(un, mboot, un->un_sys_blocksize, 0, 23268 SD_PATH_STANDARD); 23269 23270 mutex_enter(SD_MUTEX(un)); 23271 #if defined(__i386) || defined(__amd64) 23272 if (rval == 0) { 23273 /* 23274 * mboot has been written successfully. 23275 * update the fdisk and vtoc tables in memory 23276 */ 23277 rval = sd_update_fdisk_and_vtoc(un); 23278 if ((un->un_f_geometry_is_valid == FALSE) || (rval != 0)) { 23279 mutex_exit(SD_MUTEX(un)); 23280 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23281 return (rval); 23282 } 23283 } 23284 23285 /* 23286 * If the mboot write fails, write the devid anyway, what can it hurt? 23287 * Also preserve the device id by writing to the disk acyl for the case 23288 * where a devid has been fabricated. 23289 */ 23290 if (!ISREMOVABLE(un) && !ISCD(un) && 23291 (un->un_f_opt_fab_devid == TRUE)) { 23292 if (un->un_devid == NULL) { 23293 sd_register_devid(un, SD_DEVINFO(un), 23294 SD_TARGET_IS_UNRESERVED); 23295 } else { 23296 /* 23297 * The device id for this disk has been 23298 * fabricated. Fabricated device id's are 23299 * managed by storing them in the last 2 23300 * available sectors on the drive. The device 23301 * id must be preserved by writing it back out 23302 * to this location. 23303 */ 23304 if (sd_write_deviceid(un) != 0) { 23305 ddi_devid_free(un->un_devid); 23306 un->un_devid = NULL; 23307 } 23308 } 23309 } 23310 #else 23311 if (rval == 0) { 23312 /* 23313 * mboot has been written successfully. 23314 * set up the default geometry and VTOC 23315 */ 23316 if (un->un_blockcount <= DK_MAX_BLOCKS) 23317 sd_setup_default_geometry(un); 23318 } 23319 #endif 23320 mutex_exit(SD_MUTEX(un)); 23321 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23322 return (rval); 23323 } 23324 23325 23326 /* 23327 * Function: sd_setup_default_geometry 23328 * 23329 * Description: This local utility routine sets the default geometry as part of 23330 * setting the device mboot. 23331 * 23332 * Arguments: un - driver soft state (unit) structure 23333 * 23334 * Note: This may be redundant with sd_build_default_label. 23335 */ 23336 23337 static void 23338 sd_setup_default_geometry(struct sd_lun *un) 23339 { 23340 /* zero out the soft state geometry and partition table. */ 23341 bzero(&un->un_g, sizeof (struct dk_geom)); 23342 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23343 bzero(un->un_map, NDKMAP * (sizeof (struct dk_map))); 23344 un->un_asciilabel[0] = '\0'; 23345 23346 /* 23347 * For the rpm, we use the minimum for the disk. 23348 * For the head, cyl and number of sector per track, 23349 * if the capacity <= 1GB, head = 64, sect = 32. 23350 * else head = 255, sect 63 23351 * Note: the capacity should be equal to C*H*S values. 23352 * This will cause some truncation of size due to 23353 * round off errors. For CD-ROMs, this truncation can 23354 * have adverse side effects, so returning ncyl and 23355 * nhead as 1. The nsect will overflow for most of 23356 * CD-ROMs as nsect is of type ushort. 23357 */ 23358 if (ISCD(un)) { 23359 un->un_g.dkg_ncyl = 1; 23360 un->un_g.dkg_nhead = 1; 23361 un->un_g.dkg_nsect = un->un_blockcount; 23362 } else { 23363 if (un->un_blockcount <= 0x1000) { 23364 /* Needed for unlabeled SCSI floppies. */ 23365 un->un_g.dkg_nhead = 2; 23366 un->un_g.dkg_ncyl = 80; 23367 un->un_g.dkg_pcyl = 80; 23368 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 23369 } else if (un->un_blockcount <= 0x200000) { 23370 un->un_g.dkg_nhead = 64; 23371 un->un_g.dkg_nsect = 32; 23372 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 23373 } else { 23374 un->un_g.dkg_nhead = 255; 23375 un->un_g.dkg_nsect = 63; 23376 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 23377 } 23378 un->un_blockcount = un->un_g.dkg_ncyl * 23379 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 23380 } 23381 un->un_g.dkg_acyl = 0; 23382 un->un_g.dkg_bcyl = 0; 23383 un->un_g.dkg_intrlv = 1; 23384 un->un_g.dkg_rpm = 200; 23385 un->un_g.dkg_read_reinstruct = 0; 23386 un->un_g.dkg_write_reinstruct = 0; 23387 if (un->un_g.dkg_pcyl == 0) { 23388 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl; 23389 } 23390 23391 un->un_map['a'-'a'].dkl_cylno = 0; 23392 un->un_map['a'-'a'].dkl_nblk = un->un_blockcount; 23393 un->un_map['c'-'a'].dkl_cylno = 0; 23394 un->un_map['c'-'a'].dkl_nblk = un->un_blockcount; 23395 un->un_f_geometry_is_valid = FALSE; 23396 } 23397 23398 23399 #if defined(__i386) || defined(__amd64) 23400 /* 23401 * Function: sd_update_fdisk_and_vtoc 23402 * 23403 * Description: This local utility routine updates the device fdisk and vtoc 23404 * as part of setting the device mboot. 23405 * 23406 * Arguments: un - driver soft state (unit) structure 23407 * 23408 * Return Code: 0 for success or errno-type return code. 23409 * 23410 * Note:x86: This looks like a duplicate of sd_validate_geometry(), but 23411 * these did exist seperately in x86 sd.c!!! 23412 */ 23413 23414 static int 23415 sd_update_fdisk_and_vtoc(struct sd_lun *un) 23416 { 23417 static char labelstring[128]; 23418 static char buf[256]; 23419 char *label = 0; 23420 int count; 23421 int label_rc = 0; 23422 int gvalid = un->un_f_geometry_is_valid; 23423 int fdisk_rval; 23424 int lbasize; 23425 int capacity; 23426 23427 ASSERT(mutex_owned(SD_MUTEX(un))); 23428 23429 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 23430 return (EINVAL); 23431 } 23432 23433 if (un->un_f_blockcount_is_valid == FALSE) { 23434 return (EINVAL); 23435 } 23436 23437 #if defined(_SUNOS_VTOC_16) 23438 /* 23439 * Set up the "whole disk" fdisk partition; this should always 23440 * exist, regardless of whether the disk contains an fdisk table 23441 * or vtoc. 23442 */ 23443 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 23444 un->un_map[P0_RAW_DISK].dkl_nblk = un->un_blockcount; 23445 #endif /* defined(_SUNOS_VTOC_16) */ 23446 23447 /* 23448 * copy the lbasize and capacity so that if they're 23449 * reset while we're not holding the SD_MUTEX(un), we will 23450 * continue to use valid values after the SD_MUTEX(un) is 23451 * reacquired. 23452 */ 23453 lbasize = un->un_tgt_blocksize; 23454 capacity = un->un_blockcount; 23455 23456 /* 23457 * refresh the logical and physical geometry caches. 23458 * (data from mode sense format/rigid disk geometry pages, 23459 * and scsi_ifgetcap("geometry"). 23460 */ 23461 sd_resync_geom_caches(un, capacity, lbasize, SD_PATH_DIRECT); 23462 23463 /* 23464 * Only DIRECT ACCESS devices will have Sun labels. 23465 * CD's supposedly have a Sun label, too 23466 */ 23467 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 23468 fdisk_rval = sd_read_fdisk(un, capacity, lbasize, 23469 SD_PATH_DIRECT); 23470 if (fdisk_rval == SD_CMD_FAILURE) { 23471 ASSERT(mutex_owned(SD_MUTEX(un))); 23472 return (EIO); 23473 } 23474 23475 if (fdisk_rval == SD_CMD_RESERVATION_CONFLICT) { 23476 ASSERT(mutex_owned(SD_MUTEX(un))); 23477 return (EACCES); 23478 } 23479 23480 if (un->un_solaris_size <= DK_LABEL_LOC) { 23481 /* 23482 * Found fdisk table but no Solaris partition entry, 23483 * so don't call sd_uselabel() and don't create 23484 * a default label. 23485 */ 23486 label_rc = 0; 23487 un->un_f_geometry_is_valid = TRUE; 23488 goto no_solaris_partition; 23489 } 23490 23491 #if defined(_SUNOS_VTOC_8) 23492 label = (char *)un->un_asciilabel; 23493 #elif defined(_SUNOS_VTOC_16) 23494 label = (char *)un->un_vtoc.v_asciilabel; 23495 #else 23496 #error "No VTOC format defined." 23497 #endif 23498 } else if (capacity < 0) { 23499 ASSERT(mutex_owned(SD_MUTEX(un))); 23500 return (EINVAL); 23501 } 23502 23503 /* 23504 * For Removable media We reach here if we have found a 23505 * SOLARIS PARTITION. 23506 * If un_f_geometry_is_valid is FALSE it indicates that the SOLARIS 23507 * PARTITION has changed from the previous one, hence we will setup a 23508 * default VTOC in this case. 23509 */ 23510 if (un->un_f_geometry_is_valid == FALSE) { 23511 sd_build_default_label(un); 23512 label_rc = 0; 23513 } 23514 23515 no_solaris_partition: 23516 if ((!ISREMOVABLE(un) || 23517 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 23518 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 23519 /* 23520 * Print out a message indicating who and what we are. 23521 * We do this only when we happen to really validate the 23522 * geometry. We may call sd_validate_geometry() at other 23523 * times, ioctl()'s like Get VTOC in which case we 23524 * don't want to print the label. 23525 * If the geometry is valid, print the label string, 23526 * else print vendor and product info, if available 23527 */ 23528 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 23529 SD_INFO(SD_LOG_IOCTL_DKIO, un, "?<%s>\n", label); 23530 } else { 23531 mutex_enter(&sd_label_mutex); 23532 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 23533 labelstring); 23534 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 23535 &labelstring[64]); 23536 (void) sprintf(buf, "?Vendor '%s', product '%s'", 23537 labelstring, &labelstring[64]); 23538 if (un->un_f_blockcount_is_valid == TRUE) { 23539 (void) sprintf(&buf[strlen(buf)], 23540 ", %" PRIu64 " %u byte blocks\n", 23541 un->un_blockcount, 23542 un->un_tgt_blocksize); 23543 } else { 23544 (void) sprintf(&buf[strlen(buf)], 23545 ", (unknown capacity)\n"); 23546 } 23547 SD_INFO(SD_LOG_IOCTL_DKIO, un, buf); 23548 mutex_exit(&sd_label_mutex); 23549 } 23550 } 23551 23552 #if defined(_SUNOS_VTOC_16) 23553 /* 23554 * If we have valid geometry, set up the remaining fdisk partitions. 23555 * Note that dkl_cylno is not used for the fdisk map entries, so 23556 * we set it to an entirely bogus value. 23557 */ 23558 for (count = 0; count < FD_NUMPART; count++) { 23559 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 23560 un->un_map[FDISK_P1 + count].dkl_nblk = 23561 un->un_fmap[count].fmap_nblk; 23562 un->un_offset[FDISK_P1 + count] = 23563 un->un_fmap[count].fmap_start; 23564 } 23565 #endif 23566 23567 for (count = 0; count < NDKMAP; count++) { 23568 #if defined(_SUNOS_VTOC_8) 23569 struct dk_map *lp = &un->un_map[count]; 23570 un->un_offset[count] = 23571 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 23572 #elif defined(_SUNOS_VTOC_16) 23573 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 23574 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 23575 #else 23576 #error "No VTOC format defined." 23577 #endif 23578 } 23579 23580 ASSERT(mutex_owned(SD_MUTEX(un))); 23581 return (label_rc); 23582 } 23583 #endif 23584 23585 23586 /* 23587 * Function: sd_check_media 23588 * 23589 * Description: This utility routine implements the functionality for the 23590 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23591 * driver state changes from that specified by the user 23592 * (inserted or ejected). For example, if the user specifies 23593 * DKIO_EJECTED and the current media state is inserted this 23594 * routine will immediately return DKIO_INSERTED. However, if the 23595 * current media state is not inserted the user thread will be 23596 * blocked until the drive state changes. If DKIO_NONE is specified 23597 * the user thread will block until a drive state change occurs. 23598 * 23599 * Arguments: dev - the device number 23600 * state - user pointer to a dkio_state, updated with the current 23601 * drive state at return. 23602 * 23603 * Return Code: ENXIO 23604 * EIO 23605 * EAGAIN 23606 * EINTR 23607 */ 23608 23609 static int 23610 sd_check_media(dev_t dev, enum dkio_state state) 23611 { 23612 struct sd_lun *un = NULL; 23613 enum dkio_state prev_state; 23614 opaque_t token = NULL; 23615 int rval = 0; 23616 23617 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23618 return (ENXIO); 23619 } 23620 23621 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23622 23623 mutex_enter(SD_MUTEX(un)); 23624 23625 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23626 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23627 23628 prev_state = un->un_mediastate; 23629 23630 /* is there anything to do? */ 23631 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23632 /* 23633 * submit the request to the scsi_watch service; 23634 * scsi_media_watch_cb() does the real work 23635 */ 23636 mutex_exit(SD_MUTEX(un)); 23637 23638 /* 23639 * This change handles the case where a scsi watch request is 23640 * added to a device that is powered down. To accomplish this 23641 * we power up the device before adding the scsi watch request, 23642 * since the scsi watch sends a TUR directly to the device 23643 * which the device cannot handle if it is powered down. 23644 */ 23645 if (sd_pm_entry(un) != DDI_SUCCESS) { 23646 mutex_enter(SD_MUTEX(un)); 23647 goto done; 23648 } 23649 23650 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23651 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23652 (caddr_t)dev); 23653 23654 sd_pm_exit(un); 23655 23656 mutex_enter(SD_MUTEX(un)); 23657 if (token == NULL) { 23658 rval = EAGAIN; 23659 goto done; 23660 } 23661 23662 /* 23663 * This is a special case IOCTL that doesn't return 23664 * until the media state changes. Routine sdpower 23665 * knows about and handles this so don't count it 23666 * as an active cmd in the driver, which would 23667 * keep the device busy to the pm framework. 23668 * If the count isn't decremented the device can't 23669 * be powered down. 23670 */ 23671 un->un_ncmds_in_driver--; 23672 ASSERT(un->un_ncmds_in_driver >= 0); 23673 23674 /* 23675 * if a prior request had been made, this will be the same 23676 * token, as scsi_watch was designed that way. 23677 */ 23678 un->un_swr_token = token; 23679 un->un_specified_mediastate = state; 23680 23681 /* 23682 * now wait for media change 23683 * we will not be signalled unless mediastate == state but it is 23684 * still better to test for this condition, since there is a 23685 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23686 */ 23687 SD_TRACE(SD_LOG_COMMON, un, 23688 "sd_check_media: waiting for media state change\n"); 23689 while (un->un_mediastate == state) { 23690 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23691 SD_TRACE(SD_LOG_COMMON, un, 23692 "sd_check_media: waiting for media state " 23693 "was interrupted\n"); 23694 un->un_ncmds_in_driver++; 23695 rval = EINTR; 23696 goto done; 23697 } 23698 SD_TRACE(SD_LOG_COMMON, un, 23699 "sd_check_media: received signal, state=%x\n", 23700 un->un_mediastate); 23701 } 23702 /* 23703 * Inc the counter to indicate the device once again 23704 * has an active outstanding cmd. 23705 */ 23706 un->un_ncmds_in_driver++; 23707 } 23708 23709 /* invalidate geometry */ 23710 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23711 sr_ejected(un); 23712 } 23713 23714 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23715 uint64_t capacity; 23716 uint_t lbasize; 23717 23718 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23719 mutex_exit(SD_MUTEX(un)); 23720 /* 23721 * Since the following routines use SD_PATH_DIRECT, we must 23722 * call PM directly before the upcoming disk accesses. This 23723 * may cause the disk to be power/spin up. 23724 */ 23725 23726 if (sd_pm_entry(un) == DDI_SUCCESS) { 23727 rval = sd_send_scsi_READ_CAPACITY(un, 23728 &capacity, 23729 &lbasize, SD_PATH_DIRECT); 23730 if (rval != 0) { 23731 sd_pm_exit(un); 23732 mutex_enter(SD_MUTEX(un)); 23733 goto done; 23734 } 23735 } else { 23736 rval = EIO; 23737 mutex_enter(SD_MUTEX(un)); 23738 goto done; 23739 } 23740 mutex_enter(SD_MUTEX(un)); 23741 23742 sd_update_block_info(un, lbasize, capacity); 23743 23744 un->un_f_geometry_is_valid = FALSE; 23745 (void) sd_validate_geometry(un, SD_PATH_DIRECT); 23746 23747 mutex_exit(SD_MUTEX(un)); 23748 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 23749 SD_PATH_DIRECT); 23750 sd_pm_exit(un); 23751 23752 mutex_enter(SD_MUTEX(un)); 23753 } 23754 done: 23755 un->un_f_watcht_stopped = FALSE; 23756 if (un->un_swr_token) { 23757 /* 23758 * Use of this local token and the mutex ensures that we avoid 23759 * some race conditions associated with terminating the 23760 * scsi watch. 23761 */ 23762 token = un->un_swr_token; 23763 un->un_swr_token = (opaque_t)NULL; 23764 mutex_exit(SD_MUTEX(un)); 23765 (void) scsi_watch_request_terminate(token, 23766 SCSI_WATCH_TERMINATE_WAIT); 23767 mutex_enter(SD_MUTEX(un)); 23768 } 23769 23770 /* 23771 * Update the capacity kstat value, if no media previously 23772 * (capacity kstat is 0) and a media has been inserted 23773 * (un_f_blockcount_is_valid == TRUE) 23774 * This is a more generic way then checking for ISREMOVABLE. 23775 */ 23776 if (un->un_errstats) { 23777 struct sd_errstats *stp = NULL; 23778 23779 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23780 if ((stp->sd_capacity.value.ui64 == 0) && 23781 (un->un_f_blockcount_is_valid == TRUE)) { 23782 stp->sd_capacity.value.ui64 = 23783 (uint64_t)((uint64_t)un->un_blockcount * 23784 un->un_sys_blocksize); 23785 } 23786 } 23787 mutex_exit(SD_MUTEX(un)); 23788 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23789 return (rval); 23790 } 23791 23792 23793 /* 23794 * Function: sd_delayed_cv_broadcast 23795 * 23796 * Description: Delayed cv_broadcast to allow for target to recover from media 23797 * insertion. 23798 * 23799 * Arguments: arg - driver soft state (unit) structure 23800 */ 23801 23802 static void 23803 sd_delayed_cv_broadcast(void *arg) 23804 { 23805 struct sd_lun *un = arg; 23806 23807 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23808 23809 mutex_enter(SD_MUTEX(un)); 23810 un->un_dcvb_timeid = NULL; 23811 cv_broadcast(&un->un_state_cv); 23812 mutex_exit(SD_MUTEX(un)); 23813 } 23814 23815 23816 /* 23817 * Function: sd_media_watch_cb 23818 * 23819 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23820 * routine processes the TUR sense data and updates the driver 23821 * state if a transition has occurred. The user thread 23822 * (sd_check_media) is then signalled. 23823 * 23824 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23825 * among multiple watches that share this callback function 23826 * resultp - scsi watch facility result packet containing scsi 23827 * packet, status byte and sense data 23828 * 23829 * Return Code: 0 for success, -1 for failure 23830 */ 23831 23832 static int 23833 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23834 { 23835 struct sd_lun *un; 23836 struct scsi_status *statusp = resultp->statusp; 23837 struct scsi_extended_sense *sensep = resultp->sensep; 23838 enum dkio_state state = DKIO_NONE; 23839 dev_t dev = (dev_t)arg; 23840 uchar_t actual_sense_length; 23841 23842 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23843 return (-1); 23844 } 23845 actual_sense_length = resultp->actual_sense_length; 23846 23847 mutex_enter(SD_MUTEX(un)); 23848 SD_TRACE(SD_LOG_COMMON, un, 23849 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23850 *((char *)statusp), (void *)sensep, actual_sense_length); 23851 23852 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23853 un->un_mediastate = DKIO_DEV_GONE; 23854 printf("sd_media_watch_cb: dev gone\n"); 23855 cv_broadcast(&un->un_state_cv); 23856 mutex_exit(SD_MUTEX(un)); 23857 23858 return (0); 23859 } 23860 23861 /* 23862 * If there was a check condition then sensep points to valid sense data 23863 * If status was not a check condition but a reservation or busy status 23864 * then the new state is DKIO_NONE 23865 */ 23866 if (sensep != NULL) { 23867 SD_INFO(SD_LOG_COMMON, un, 23868 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23869 sensep->es_key, sensep->es_add_code, sensep->es_qual_code); 23870 /* This routine only uses up to 13 bytes of sense data. */ 23871 if (actual_sense_length >= 13) { 23872 if (sensep->es_key == KEY_UNIT_ATTENTION) { 23873 if (sensep->es_add_code == 0x28) { 23874 state = DKIO_INSERTED; 23875 } 23876 } else { 23877 /* 23878 * if 02/04/02 means that the host 23879 * should send start command. Explicitly 23880 * leave the media state as is 23881 * (inserted) as the media is inserted 23882 * and host has stopped device for PM 23883 * reasons. Upon next true read/write 23884 * to this media will bring the 23885 * device to the right state good for 23886 * media access. 23887 */ 23888 if ((sensep->es_key == KEY_NOT_READY) && 23889 (sensep->es_add_code == 0x3a)) { 23890 state = DKIO_EJECTED; 23891 } 23892 23893 /* 23894 * If the drivge is busy with an operation 23895 * or long write, keep the media in an 23896 * inserted state. 23897 */ 23898 23899 if ((sensep->es_key == KEY_NOT_READY) && 23900 (sensep->es_add_code == 0x04) && 23901 ((sensep->es_qual_code == 0x02) || 23902 (sensep->es_qual_code == 0x07) || 23903 (sensep->es_qual_code == 0x08))) { 23904 state = DKIO_INSERTED; 23905 } 23906 } 23907 } 23908 } else if ((*((char *)statusp) == STATUS_GOOD) && 23909 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23910 state = DKIO_INSERTED; 23911 } 23912 23913 SD_TRACE(SD_LOG_COMMON, un, 23914 "sd_media_watch_cb: state=%x, specified=%x\n", 23915 state, un->un_specified_mediastate); 23916 23917 /* 23918 * now signal the waiting thread if this is *not* the specified state; 23919 * delay the signal if the state is DKIO_INSERTED to allow the target 23920 * to recover 23921 */ 23922 if (state != un->un_specified_mediastate) { 23923 un->un_mediastate = state; 23924 if (state == DKIO_INSERTED) { 23925 /* 23926 * delay the signal to give the drive a chance 23927 * to do what it apparently needs to do 23928 */ 23929 SD_TRACE(SD_LOG_COMMON, un, 23930 "sd_media_watch_cb: delayed cv_broadcast\n"); 23931 if (un->un_dcvb_timeid == NULL) { 23932 un->un_dcvb_timeid = 23933 timeout(sd_delayed_cv_broadcast, un, 23934 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23935 } 23936 } else { 23937 SD_TRACE(SD_LOG_COMMON, un, 23938 "sd_media_watch_cb: immediate cv_broadcast\n"); 23939 cv_broadcast(&un->un_state_cv); 23940 } 23941 } 23942 mutex_exit(SD_MUTEX(un)); 23943 return (0); 23944 } 23945 23946 23947 /* 23948 * Function: sd_dkio_get_temp 23949 * 23950 * Description: This routine is the driver entry point for handling ioctl 23951 * requests to get the disk temperature. 23952 * 23953 * Arguments: dev - the device number 23954 * arg - pointer to user provided dk_temperature structure. 23955 * flag - this argument is a pass through to ddi_copyxxx() 23956 * directly from the mode argument of ioctl(). 23957 * 23958 * Return Code: 0 23959 * EFAULT 23960 * ENXIO 23961 * EAGAIN 23962 */ 23963 23964 static int 23965 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23966 { 23967 struct sd_lun *un = NULL; 23968 struct dk_temperature *dktemp = NULL; 23969 uchar_t *temperature_page; 23970 int rval = 0; 23971 int path_flag = SD_PATH_STANDARD; 23972 23973 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23974 return (ENXIO); 23975 } 23976 23977 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23978 23979 /* copyin the disk temp argument to get the user flags */ 23980 if (ddi_copyin((void *)arg, dktemp, 23981 sizeof (struct dk_temperature), flag) != 0) { 23982 rval = EFAULT; 23983 goto done; 23984 } 23985 23986 /* Initialize the temperature to invalid. */ 23987 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23988 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23989 23990 /* 23991 * Note: Investigate removing the "bypass pm" semantic. 23992 * Can we just bypass PM always? 23993 */ 23994 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23995 path_flag = SD_PATH_DIRECT; 23996 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23997 mutex_enter(&un->un_pm_mutex); 23998 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23999 /* 24000 * If DKT_BYPASS_PM is set, and the drive happens to be 24001 * in low power mode, we can not wake it up, Need to 24002 * return EAGAIN. 24003 */ 24004 mutex_exit(&un->un_pm_mutex); 24005 rval = EAGAIN; 24006 goto done; 24007 } else { 24008 /* 24009 * Indicate to PM the device is busy. This is required 24010 * to avoid a race - i.e. the ioctl is issuing a 24011 * command and the pm framework brings down the device 24012 * to low power mode (possible power cut-off on some 24013 * platforms). 24014 */ 24015 mutex_exit(&un->un_pm_mutex); 24016 if (sd_pm_entry(un) != DDI_SUCCESS) { 24017 rval = EAGAIN; 24018 goto done; 24019 } 24020 } 24021 } 24022 24023 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 24024 24025 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 24026 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 24027 goto done2; 24028 } 24029 24030 /* 24031 * For the current temperature verify that the parameter length is 0x02 24032 * and the parameter code is 0x00 24033 */ 24034 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 24035 (temperature_page[5] == 0x00)) { 24036 if (temperature_page[9] == 0xFF) { 24037 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 24038 } else { 24039 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 24040 } 24041 } 24042 24043 /* 24044 * For the reference temperature verify that the parameter 24045 * length is 0x02 and the parameter code is 0x01 24046 */ 24047 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 24048 (temperature_page[11] == 0x01)) { 24049 if (temperature_page[15] == 0xFF) { 24050 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 24051 } else { 24052 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 24053 } 24054 } 24055 24056 /* Do the copyout regardless of the temperature commands status. */ 24057 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 24058 flag) != 0) { 24059 rval = EFAULT; 24060 } 24061 24062 done2: 24063 if (path_flag == SD_PATH_DIRECT) { 24064 sd_pm_exit(un); 24065 } 24066 24067 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 24068 done: 24069 if (dktemp != NULL) { 24070 kmem_free(dktemp, sizeof (struct dk_temperature)); 24071 } 24072 24073 return (rval); 24074 } 24075 24076 24077 /* 24078 * Function: sd_log_page_supported 24079 * 24080 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 24081 * supported log pages. 24082 * 24083 * Arguments: un - 24084 * log_page - 24085 * 24086 * Return Code: -1 - on error (log sense is optional and may not be supported). 24087 * 0 - log page not found. 24088 * 1 - log page found. 24089 */ 24090 24091 static int 24092 sd_log_page_supported(struct sd_lun *un, int log_page) 24093 { 24094 uchar_t *log_page_data; 24095 int i; 24096 int match = 0; 24097 int log_size; 24098 24099 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 24100 24101 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 24102 SD_PATH_DIRECT) != 0) { 24103 SD_ERROR(SD_LOG_COMMON, un, 24104 "sd_log_page_supported: failed log page retrieval\n"); 24105 kmem_free(log_page_data, 0xFF); 24106 return (-1); 24107 } 24108 log_size = log_page_data[3]; 24109 24110 /* 24111 * The list of supported log pages start from the fourth byte. Check 24112 * until we run out of log pages or a match is found. 24113 */ 24114 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24115 if (log_page_data[i] == log_page) { 24116 match++; 24117 } 24118 } 24119 kmem_free(log_page_data, 0xFF); 24120 return (match); 24121 } 24122 24123 24124 /* 24125 * Function: sd_mhdioc_failfast 24126 * 24127 * Description: This routine is the driver entry point for handling ioctl 24128 * requests to enable/disable the multihost failfast option. 24129 * (MHIOCENFAILFAST) 24130 * 24131 * Arguments: dev - the device number 24132 * arg - user specified probing interval. 24133 * flag - this argument is a pass through to ddi_copyxxx() 24134 * directly from the mode argument of ioctl(). 24135 * 24136 * Return Code: 0 24137 * EFAULT 24138 * ENXIO 24139 */ 24140 24141 static int 24142 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24143 { 24144 struct sd_lun *un = NULL; 24145 int mh_time; 24146 int rval = 0; 24147 24148 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24149 return (ENXIO); 24150 } 24151 24152 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24153 return (EFAULT); 24154 24155 if (mh_time) { 24156 mutex_enter(SD_MUTEX(un)); 24157 un->un_resvd_status |= SD_FAILFAST; 24158 mutex_exit(SD_MUTEX(un)); 24159 /* 24160 * If mh_time is INT_MAX, then this ioctl is being used for 24161 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24162 */ 24163 if (mh_time != INT_MAX) { 24164 rval = sd_check_mhd(dev, mh_time); 24165 } 24166 } else { 24167 (void) sd_check_mhd(dev, 0); 24168 mutex_enter(SD_MUTEX(un)); 24169 un->un_resvd_status &= ~SD_FAILFAST; 24170 mutex_exit(SD_MUTEX(un)); 24171 } 24172 return (rval); 24173 } 24174 24175 24176 /* 24177 * Function: sd_mhdioc_takeown 24178 * 24179 * Description: This routine is the driver entry point for handling ioctl 24180 * requests to forcefully acquire exclusive access rights to the 24181 * multihost disk (MHIOCTKOWN). 24182 * 24183 * Arguments: dev - the device number 24184 * arg - user provided structure specifying the delay 24185 * parameters in milliseconds 24186 * flag - this argument is a pass through to ddi_copyxxx() 24187 * directly from the mode argument of ioctl(). 24188 * 24189 * Return Code: 0 24190 * EFAULT 24191 * ENXIO 24192 */ 24193 24194 static int 24195 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24196 { 24197 struct sd_lun *un = NULL; 24198 struct mhioctkown *tkown = NULL; 24199 int rval = 0; 24200 24201 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24202 return (ENXIO); 24203 } 24204 24205 if (arg != NULL) { 24206 tkown = (struct mhioctkown *) 24207 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24208 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24209 if (rval != 0) { 24210 rval = EFAULT; 24211 goto error; 24212 } 24213 } 24214 24215 rval = sd_take_ownership(dev, tkown); 24216 mutex_enter(SD_MUTEX(un)); 24217 if (rval == 0) { 24218 un->un_resvd_status |= SD_RESERVE; 24219 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24220 sd_reinstate_resv_delay = 24221 tkown->reinstate_resv_delay * 1000; 24222 } else { 24223 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24224 } 24225 /* 24226 * Give the scsi_watch routine interval set by 24227 * the MHIOCENFAILFAST ioctl precedence here. 24228 */ 24229 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24230 mutex_exit(SD_MUTEX(un)); 24231 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24232 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24233 "sd_mhdioc_takeown : %d\n", 24234 sd_reinstate_resv_delay); 24235 } else { 24236 mutex_exit(SD_MUTEX(un)); 24237 } 24238 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24239 sd_mhd_reset_notify_cb, (caddr_t)un); 24240 } else { 24241 un->un_resvd_status &= ~SD_RESERVE; 24242 mutex_exit(SD_MUTEX(un)); 24243 } 24244 24245 error: 24246 if (tkown != NULL) { 24247 kmem_free(tkown, sizeof (struct mhioctkown)); 24248 } 24249 return (rval); 24250 } 24251 24252 24253 /* 24254 * Function: sd_mhdioc_release 24255 * 24256 * Description: This routine is the driver entry point for handling ioctl 24257 * requests to release exclusive access rights to the multihost 24258 * disk (MHIOCRELEASE). 24259 * 24260 * Arguments: dev - the device number 24261 * 24262 * Return Code: 0 24263 * ENXIO 24264 */ 24265 24266 static int 24267 sd_mhdioc_release(dev_t dev) 24268 { 24269 struct sd_lun *un = NULL; 24270 timeout_id_t resvd_timeid_save; 24271 int resvd_status_save; 24272 int rval = 0; 24273 24274 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24275 return (ENXIO); 24276 } 24277 24278 mutex_enter(SD_MUTEX(un)); 24279 resvd_status_save = un->un_resvd_status; 24280 un->un_resvd_status &= 24281 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24282 if (un->un_resvd_timeid) { 24283 resvd_timeid_save = un->un_resvd_timeid; 24284 un->un_resvd_timeid = NULL; 24285 mutex_exit(SD_MUTEX(un)); 24286 (void) untimeout(resvd_timeid_save); 24287 } else { 24288 mutex_exit(SD_MUTEX(un)); 24289 } 24290 24291 /* 24292 * destroy any pending timeout thread that may be attempting to 24293 * reinstate reservation on this device. 24294 */ 24295 sd_rmv_resv_reclaim_req(dev); 24296 24297 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24298 mutex_enter(SD_MUTEX(un)); 24299 if ((un->un_mhd_token) && 24300 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24301 mutex_exit(SD_MUTEX(un)); 24302 (void) sd_check_mhd(dev, 0); 24303 } else { 24304 mutex_exit(SD_MUTEX(un)); 24305 } 24306 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24307 sd_mhd_reset_notify_cb, (caddr_t)un); 24308 } else { 24309 /* 24310 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24311 */ 24312 mutex_enter(SD_MUTEX(un)); 24313 un->un_resvd_status = resvd_status_save; 24314 mutex_exit(SD_MUTEX(un)); 24315 } 24316 return (rval); 24317 } 24318 24319 24320 /* 24321 * Function: sd_mhdioc_register_devid 24322 * 24323 * Description: This routine is the driver entry point for handling ioctl 24324 * requests to register the device id (MHIOCREREGISTERDEVID). 24325 * 24326 * Note: The implementation for this ioctl has been updated to 24327 * be consistent with the original PSARC case (1999/357) 24328 * (4375899, 4241671, 4220005) 24329 * 24330 * Arguments: dev - the device number 24331 * 24332 * Return Code: 0 24333 * ENXIO 24334 */ 24335 24336 static int 24337 sd_mhdioc_register_devid(dev_t dev) 24338 { 24339 struct sd_lun *un = NULL; 24340 int rval = 0; 24341 24342 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24343 return (ENXIO); 24344 } 24345 24346 ASSERT(!mutex_owned(SD_MUTEX(un))); 24347 24348 mutex_enter(SD_MUTEX(un)); 24349 24350 /* If a devid already exists, de-register it */ 24351 if (un->un_devid != NULL) { 24352 ddi_devid_unregister(SD_DEVINFO(un)); 24353 /* 24354 * After unregister devid, needs to free devid memory 24355 */ 24356 ddi_devid_free(un->un_devid); 24357 un->un_devid = NULL; 24358 } 24359 24360 /* Check for reservation conflict */ 24361 mutex_exit(SD_MUTEX(un)); 24362 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 24363 mutex_enter(SD_MUTEX(un)); 24364 24365 switch (rval) { 24366 case 0: 24367 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24368 break; 24369 case EACCES: 24370 break; 24371 default: 24372 rval = EIO; 24373 } 24374 24375 mutex_exit(SD_MUTEX(un)); 24376 return (rval); 24377 } 24378 24379 24380 /* 24381 * Function: sd_mhdioc_inkeys 24382 * 24383 * Description: This routine is the driver entry point for handling ioctl 24384 * requests to issue the SCSI-3 Persistent In Read Keys command 24385 * to the device (MHIOCGRP_INKEYS). 24386 * 24387 * Arguments: dev - the device number 24388 * arg - user provided in_keys structure 24389 * flag - this argument is a pass through to ddi_copyxxx() 24390 * directly from the mode argument of ioctl(). 24391 * 24392 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24393 * ENXIO 24394 * EFAULT 24395 */ 24396 24397 static int 24398 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24399 { 24400 struct sd_lun *un; 24401 mhioc_inkeys_t inkeys; 24402 int rval = 0; 24403 24404 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24405 return (ENXIO); 24406 } 24407 24408 #ifdef _MULTI_DATAMODEL 24409 switch (ddi_model_convert_from(flag & FMODELS)) { 24410 case DDI_MODEL_ILP32: { 24411 struct mhioc_inkeys32 inkeys32; 24412 24413 if (ddi_copyin(arg, &inkeys32, 24414 sizeof (struct mhioc_inkeys32), flag) != 0) { 24415 return (EFAULT); 24416 } 24417 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24418 if ((rval = sd_persistent_reservation_in_read_keys(un, 24419 &inkeys, flag)) != 0) { 24420 return (rval); 24421 } 24422 inkeys32.generation = inkeys.generation; 24423 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24424 flag) != 0) { 24425 return (EFAULT); 24426 } 24427 break; 24428 } 24429 case DDI_MODEL_NONE: 24430 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24431 flag) != 0) { 24432 return (EFAULT); 24433 } 24434 if ((rval = sd_persistent_reservation_in_read_keys(un, 24435 &inkeys, flag)) != 0) { 24436 return (rval); 24437 } 24438 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24439 flag) != 0) { 24440 return (EFAULT); 24441 } 24442 break; 24443 } 24444 24445 #else /* ! _MULTI_DATAMODEL */ 24446 24447 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24448 return (EFAULT); 24449 } 24450 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24451 if (rval != 0) { 24452 return (rval); 24453 } 24454 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24455 return (EFAULT); 24456 } 24457 24458 #endif /* _MULTI_DATAMODEL */ 24459 24460 return (rval); 24461 } 24462 24463 24464 /* 24465 * Function: sd_mhdioc_inresv 24466 * 24467 * Description: This routine is the driver entry point for handling ioctl 24468 * requests to issue the SCSI-3 Persistent In Read Reservations 24469 * command to the device (MHIOCGRP_INKEYS). 24470 * 24471 * Arguments: dev - the device number 24472 * arg - user provided in_resv structure 24473 * flag - this argument is a pass through to ddi_copyxxx() 24474 * directly from the mode argument of ioctl(). 24475 * 24476 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24477 * ENXIO 24478 * EFAULT 24479 */ 24480 24481 static int 24482 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24483 { 24484 struct sd_lun *un; 24485 mhioc_inresvs_t inresvs; 24486 int rval = 0; 24487 24488 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24489 return (ENXIO); 24490 } 24491 24492 #ifdef _MULTI_DATAMODEL 24493 24494 switch (ddi_model_convert_from(flag & FMODELS)) { 24495 case DDI_MODEL_ILP32: { 24496 struct mhioc_inresvs32 inresvs32; 24497 24498 if (ddi_copyin(arg, &inresvs32, 24499 sizeof (struct mhioc_inresvs32), flag) != 0) { 24500 return (EFAULT); 24501 } 24502 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24503 if ((rval = sd_persistent_reservation_in_read_resv(un, 24504 &inresvs, flag)) != 0) { 24505 return (rval); 24506 } 24507 inresvs32.generation = inresvs.generation; 24508 if (ddi_copyout(&inresvs32, arg, 24509 sizeof (struct mhioc_inresvs32), flag) != 0) { 24510 return (EFAULT); 24511 } 24512 break; 24513 } 24514 case DDI_MODEL_NONE: 24515 if (ddi_copyin(arg, &inresvs, 24516 sizeof (mhioc_inresvs_t), flag) != 0) { 24517 return (EFAULT); 24518 } 24519 if ((rval = sd_persistent_reservation_in_read_resv(un, 24520 &inresvs, flag)) != 0) { 24521 return (rval); 24522 } 24523 if (ddi_copyout(&inresvs, arg, 24524 sizeof (mhioc_inresvs_t), flag) != 0) { 24525 return (EFAULT); 24526 } 24527 break; 24528 } 24529 24530 #else /* ! _MULTI_DATAMODEL */ 24531 24532 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24533 return (EFAULT); 24534 } 24535 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24536 if (rval != 0) { 24537 return (rval); 24538 } 24539 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24540 return (EFAULT); 24541 } 24542 24543 #endif /* ! _MULTI_DATAMODEL */ 24544 24545 return (rval); 24546 } 24547 24548 24549 /* 24550 * The following routines support the clustering functionality described below 24551 * and implement lost reservation reclaim functionality. 24552 * 24553 * Clustering 24554 * ---------- 24555 * The clustering code uses two different, independent forms of SCSI 24556 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24557 * Persistent Group Reservations. For any particular disk, it will use either 24558 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24559 * 24560 * SCSI-2 24561 * The cluster software takes ownership of a multi-hosted disk by issuing the 24562 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24563 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 24564 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 24565 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 24566 * meaning of failfast is that if the driver (on this host) ever encounters the 24567 * scsi error return code RESERVATION_CONFLICT from the device, it should 24568 * immediately panic the host. The motivation for this ioctl is that if this 24569 * host does encounter reservation conflict, the underlying cause is that some 24570 * other host of the cluster has decided that this host is no longer in the 24571 * cluster and has seized control of the disks for itself. Since this host is no 24572 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 24573 * does two things: 24574 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24575 * error to panic the host 24576 * (b) it sets up a periodic timer to test whether this host still has 24577 * "access" (in that no other host has reserved the device): if the 24578 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24579 * purpose of that periodic timer is to handle scenarios where the host is 24580 * otherwise temporarily quiescent, temporarily doing no real i/o. 24581 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24582 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24583 * the device itself. 24584 * 24585 * SCSI-3 PGR 24586 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24587 * facility is supported through the shared multihost disk ioctls 24588 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24589 * MHIOCGRP_PREEMPTANDABORT) 24590 * 24591 * Reservation Reclaim: 24592 * -------------------- 24593 * To support the lost reservation reclaim operations this driver creates a 24594 * single thread to handle reinstating reservations on all devices that have 24595 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24596 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24597 * and the reservation reclaim thread loops through the requests to regain the 24598 * lost reservations. 24599 */ 24600 24601 /* 24602 * Function: sd_check_mhd() 24603 * 24604 * Description: This function sets up and submits a scsi watch request or 24605 * terminates an existing watch request. This routine is used in 24606 * support of reservation reclaim. 24607 * 24608 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24609 * among multiple watches that share the callback function 24610 * interval - the number of microseconds specifying the watch 24611 * interval for issuing TEST UNIT READY commands. If 24612 * set to 0 the watch should be terminated. If the 24613 * interval is set to 0 and if the device is required 24614 * to hold reservation while disabling failfast, the 24615 * watch is restarted with an interval of 24616 * reinstate_resv_delay. 24617 * 24618 * Return Code: 0 - Successful submit/terminate of scsi watch request 24619 * ENXIO - Indicates an invalid device was specified 24620 * EAGAIN - Unable to submit the scsi watch request 24621 */ 24622 24623 static int 24624 sd_check_mhd(dev_t dev, int interval) 24625 { 24626 struct sd_lun *un; 24627 opaque_t token; 24628 24629 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24630 return (ENXIO); 24631 } 24632 24633 /* is this a watch termination request? */ 24634 if (interval == 0) { 24635 mutex_enter(SD_MUTEX(un)); 24636 /* if there is an existing watch task then terminate it */ 24637 if (un->un_mhd_token) { 24638 token = un->un_mhd_token; 24639 un->un_mhd_token = NULL; 24640 mutex_exit(SD_MUTEX(un)); 24641 (void) scsi_watch_request_terminate(token, 24642 SCSI_WATCH_TERMINATE_WAIT); 24643 mutex_enter(SD_MUTEX(un)); 24644 } else { 24645 mutex_exit(SD_MUTEX(un)); 24646 /* 24647 * Note: If we return here we don't check for the 24648 * failfast case. This is the original legacy 24649 * implementation but perhaps we should be checking 24650 * the failfast case. 24651 */ 24652 return (0); 24653 } 24654 /* 24655 * If the device is required to hold reservation while 24656 * disabling failfast, we need to restart the scsi_watch 24657 * routine with an interval of reinstate_resv_delay. 24658 */ 24659 if (un->un_resvd_status & SD_RESERVE) { 24660 interval = sd_reinstate_resv_delay/1000; 24661 } else { 24662 /* no failfast so bail */ 24663 mutex_exit(SD_MUTEX(un)); 24664 return (0); 24665 } 24666 mutex_exit(SD_MUTEX(un)); 24667 } 24668 24669 /* 24670 * adjust minimum time interval to 1 second, 24671 * and convert from msecs to usecs 24672 */ 24673 if (interval > 0 && interval < 1000) { 24674 interval = 1000; 24675 } 24676 interval *= 1000; 24677 24678 /* 24679 * submit the request to the scsi_watch service 24680 */ 24681 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24682 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24683 if (token == NULL) { 24684 return (EAGAIN); 24685 } 24686 24687 /* 24688 * save token for termination later on 24689 */ 24690 mutex_enter(SD_MUTEX(un)); 24691 un->un_mhd_token = token; 24692 mutex_exit(SD_MUTEX(un)); 24693 return (0); 24694 } 24695 24696 24697 /* 24698 * Function: sd_mhd_watch_cb() 24699 * 24700 * Description: This function is the call back function used by the scsi watch 24701 * facility. The scsi watch facility sends the "Test Unit Ready" 24702 * and processes the status. If applicable (i.e. a "Unit Attention" 24703 * status and automatic "Request Sense" not used) the scsi watch 24704 * facility will send a "Request Sense" and retrieve the sense data 24705 * to be passed to this callback function. In either case the 24706 * automatic "Request Sense" or the facility submitting one, this 24707 * callback is passed the status and sense data. 24708 * 24709 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24710 * among multiple watches that share this callback function 24711 * resultp - scsi watch facility result packet containing scsi 24712 * packet, status byte and sense data 24713 * 24714 * Return Code: 0 - continue the watch task 24715 * non-zero - terminate the watch task 24716 */ 24717 24718 static int 24719 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24720 { 24721 struct sd_lun *un; 24722 struct scsi_status *statusp; 24723 struct scsi_extended_sense *sensep; 24724 struct scsi_pkt *pkt; 24725 uchar_t actual_sense_length; 24726 dev_t dev = (dev_t)arg; 24727 24728 ASSERT(resultp != NULL); 24729 statusp = resultp->statusp; 24730 sensep = resultp->sensep; 24731 pkt = resultp->pkt; 24732 actual_sense_length = resultp->actual_sense_length; 24733 24734 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24735 return (ENXIO); 24736 } 24737 24738 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24739 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24740 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24741 24742 /* Begin processing of the status and/or sense data */ 24743 if (pkt->pkt_reason != CMD_CMPLT) { 24744 /* Handle the incomplete packet */ 24745 sd_mhd_watch_incomplete(un, pkt); 24746 return (0); 24747 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24748 if (*((unsigned char *)statusp) 24749 == STATUS_RESERVATION_CONFLICT) { 24750 /* 24751 * Handle a reservation conflict by panicking if 24752 * configured for failfast or by logging the conflict 24753 * and updating the reservation status 24754 */ 24755 mutex_enter(SD_MUTEX(un)); 24756 if ((un->un_resvd_status & SD_FAILFAST) && 24757 (sd_failfast_enable)) { 24758 panic("Reservation Conflict"); 24759 /*NOTREACHED*/ 24760 } 24761 SD_INFO(SD_LOG_IOCTL_MHD, un, 24762 "sd_mhd_watch_cb: Reservation Conflict\n"); 24763 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24764 mutex_exit(SD_MUTEX(un)); 24765 } 24766 } 24767 24768 if (sensep != NULL) { 24769 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24770 mutex_enter(SD_MUTEX(un)); 24771 if ((sensep->es_add_code == SD_SCSI_RESET_SENSE_CODE) && 24772 (un->un_resvd_status & SD_RESERVE)) { 24773 /* 24774 * The additional sense code indicates a power 24775 * on or bus device reset has occurred; update 24776 * the reservation status. 24777 */ 24778 un->un_resvd_status |= 24779 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24780 SD_INFO(SD_LOG_IOCTL_MHD, un, 24781 "sd_mhd_watch_cb: Lost Reservation\n"); 24782 } 24783 } else { 24784 return (0); 24785 } 24786 } else { 24787 mutex_enter(SD_MUTEX(un)); 24788 } 24789 24790 if ((un->un_resvd_status & SD_RESERVE) && 24791 (un->un_resvd_status & SD_LOST_RESERVE)) { 24792 if (un->un_resvd_status & SD_WANT_RESERVE) { 24793 /* 24794 * A reset occurred in between the last probe and this 24795 * one so if a timeout is pending cancel it. 24796 */ 24797 if (un->un_resvd_timeid) { 24798 timeout_id_t temp_id = un->un_resvd_timeid; 24799 un->un_resvd_timeid = NULL; 24800 mutex_exit(SD_MUTEX(un)); 24801 (void) untimeout(temp_id); 24802 mutex_enter(SD_MUTEX(un)); 24803 } 24804 un->un_resvd_status &= ~SD_WANT_RESERVE; 24805 } 24806 if (un->un_resvd_timeid == 0) { 24807 /* Schedule a timeout to handle the lost reservation */ 24808 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24809 (void *)dev, 24810 drv_usectohz(sd_reinstate_resv_delay)); 24811 } 24812 } 24813 mutex_exit(SD_MUTEX(un)); 24814 return (0); 24815 } 24816 24817 24818 /* 24819 * Function: sd_mhd_watch_incomplete() 24820 * 24821 * Description: This function is used to find out why a scsi pkt sent by the 24822 * scsi watch facility was not completed. Under some scenarios this 24823 * routine will return. Otherwise it will send a bus reset to see 24824 * if the drive is still online. 24825 * 24826 * Arguments: un - driver soft state (unit) structure 24827 * pkt - incomplete scsi pkt 24828 */ 24829 24830 static void 24831 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24832 { 24833 int be_chatty; 24834 int perr; 24835 24836 ASSERT(pkt != NULL); 24837 ASSERT(un != NULL); 24838 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24839 perr = (pkt->pkt_statistics & STAT_PERR); 24840 24841 mutex_enter(SD_MUTEX(un)); 24842 if (un->un_state == SD_STATE_DUMPING) { 24843 mutex_exit(SD_MUTEX(un)); 24844 return; 24845 } 24846 24847 switch (pkt->pkt_reason) { 24848 case CMD_UNX_BUS_FREE: 24849 /* 24850 * If we had a parity error that caused the target to drop BSY*, 24851 * don't be chatty about it. 24852 */ 24853 if (perr && be_chatty) { 24854 be_chatty = 0; 24855 } 24856 break; 24857 case CMD_TAG_REJECT: 24858 /* 24859 * The SCSI-2 spec states that a tag reject will be sent by the 24860 * target if tagged queuing is not supported. A tag reject may 24861 * also be sent during certain initialization periods or to 24862 * control internal resources. For the latter case the target 24863 * may also return Queue Full. 24864 * 24865 * If this driver receives a tag reject from a target that is 24866 * going through an init period or controlling internal 24867 * resources tagged queuing will be disabled. This is a less 24868 * than optimal behavior but the driver is unable to determine 24869 * the target state and assumes tagged queueing is not supported 24870 */ 24871 pkt->pkt_flags = 0; 24872 un->un_tagflags = 0; 24873 24874 if (un->un_f_opt_queueing == TRUE) { 24875 un->un_throttle = min(un->un_throttle, 3); 24876 } else { 24877 un->un_throttle = 1; 24878 } 24879 mutex_exit(SD_MUTEX(un)); 24880 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24881 mutex_enter(SD_MUTEX(un)); 24882 break; 24883 case CMD_INCOMPLETE: 24884 /* 24885 * The transport stopped with an abnormal state, fallthrough and 24886 * reset the target and/or bus unless selection did not complete 24887 * (indicated by STATE_GOT_BUS) in which case we don't want to 24888 * go through a target/bus reset 24889 */ 24890 if (pkt->pkt_state == STATE_GOT_BUS) { 24891 break; 24892 } 24893 /*FALLTHROUGH*/ 24894 24895 case CMD_TIMEOUT: 24896 default: 24897 /* 24898 * The lun may still be running the command, so a lun reset 24899 * should be attempted. If the lun reset fails or cannot be 24900 * issued, than try a target reset. Lastly try a bus reset. 24901 */ 24902 if ((pkt->pkt_statistics & 24903 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24904 int reset_retval = 0; 24905 mutex_exit(SD_MUTEX(un)); 24906 if (un->un_f_allow_bus_device_reset == TRUE) { 24907 if (un->un_f_lun_reset_enabled == TRUE) { 24908 reset_retval = 24909 scsi_reset(SD_ADDRESS(un), 24910 RESET_LUN); 24911 } 24912 if (reset_retval == 0) { 24913 reset_retval = 24914 scsi_reset(SD_ADDRESS(un), 24915 RESET_TARGET); 24916 } 24917 } 24918 if (reset_retval == 0) { 24919 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24920 } 24921 mutex_enter(SD_MUTEX(un)); 24922 } 24923 break; 24924 } 24925 24926 /* A device/bus reset has occurred; update the reservation status. */ 24927 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24928 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24929 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24930 un->un_resvd_status |= 24931 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24932 SD_INFO(SD_LOG_IOCTL_MHD, un, 24933 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24934 } 24935 } 24936 24937 /* 24938 * The disk has been turned off; Update the device state. 24939 * 24940 * Note: Should we be offlining the disk here? 24941 */ 24942 if (pkt->pkt_state == STATE_GOT_BUS) { 24943 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24944 "Disk not responding to selection\n"); 24945 if (un->un_state != SD_STATE_OFFLINE) { 24946 New_state(un, SD_STATE_OFFLINE); 24947 } 24948 } else if (be_chatty) { 24949 /* 24950 * suppress messages if they are all the same pkt reason; 24951 * with TQ, many (up to 256) are returned with the same 24952 * pkt_reason 24953 */ 24954 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24955 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24956 "sd_mhd_watch_incomplete: " 24957 "SCSI transport failed: reason '%s'\n", 24958 scsi_rname(pkt->pkt_reason)); 24959 } 24960 } 24961 un->un_last_pkt_reason = pkt->pkt_reason; 24962 mutex_exit(SD_MUTEX(un)); 24963 } 24964 24965 24966 /* 24967 * Function: sd_sname() 24968 * 24969 * Description: This is a simple little routine to return a string containing 24970 * a printable description of command status byte for use in 24971 * logging. 24972 * 24973 * Arguments: status - pointer to a status byte 24974 * 24975 * Return Code: char * - string containing status description. 24976 */ 24977 24978 static char * 24979 sd_sname(uchar_t status) 24980 { 24981 switch (status & STATUS_MASK) { 24982 case STATUS_GOOD: 24983 return ("good status"); 24984 case STATUS_CHECK: 24985 return ("check condition"); 24986 case STATUS_MET: 24987 return ("condition met"); 24988 case STATUS_BUSY: 24989 return ("busy"); 24990 case STATUS_INTERMEDIATE: 24991 return ("intermediate"); 24992 case STATUS_INTERMEDIATE_MET: 24993 return ("intermediate - condition met"); 24994 case STATUS_RESERVATION_CONFLICT: 24995 return ("reservation_conflict"); 24996 case STATUS_TERMINATED: 24997 return ("command terminated"); 24998 case STATUS_QFULL: 24999 return ("queue full"); 25000 default: 25001 return ("<unknown status>"); 25002 } 25003 } 25004 25005 25006 /* 25007 * Function: sd_mhd_resvd_recover() 25008 * 25009 * Description: This function adds a reservation entry to the 25010 * sd_resv_reclaim_request list and signals the reservation 25011 * reclaim thread that there is work pending. If the reservation 25012 * reclaim thread has not been previously created this function 25013 * will kick it off. 25014 * 25015 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25016 * among multiple watches that share this callback function 25017 * 25018 * Context: This routine is called by timeout() and is run in interrupt 25019 * context. It must not sleep or call other functions which may 25020 * sleep. 25021 */ 25022 25023 static void 25024 sd_mhd_resvd_recover(void *arg) 25025 { 25026 dev_t dev = (dev_t)arg; 25027 struct sd_lun *un; 25028 struct sd_thr_request *sd_treq = NULL; 25029 struct sd_thr_request *sd_cur = NULL; 25030 struct sd_thr_request *sd_prev = NULL; 25031 int already_there = 0; 25032 25033 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25034 return; 25035 } 25036 25037 mutex_enter(SD_MUTEX(un)); 25038 un->un_resvd_timeid = NULL; 25039 if (un->un_resvd_status & SD_WANT_RESERVE) { 25040 /* 25041 * There was a reset so don't issue the reserve, allow the 25042 * sd_mhd_watch_cb callback function to notice this and 25043 * reschedule the timeout for reservation. 25044 */ 25045 mutex_exit(SD_MUTEX(un)); 25046 return; 25047 } 25048 mutex_exit(SD_MUTEX(un)); 25049 25050 /* 25051 * Add this device to the sd_resv_reclaim_request list and the 25052 * sd_resv_reclaim_thread should take care of the rest. 25053 * 25054 * Note: We can't sleep in this context so if the memory allocation 25055 * fails allow the sd_mhd_watch_cb callback function to notice this and 25056 * reschedule the timeout for reservation. (4378460) 25057 */ 25058 sd_treq = (struct sd_thr_request *) 25059 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 25060 if (sd_treq == NULL) { 25061 return; 25062 } 25063 25064 sd_treq->sd_thr_req_next = NULL; 25065 sd_treq->dev = dev; 25066 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25067 if (sd_tr.srq_thr_req_head == NULL) { 25068 sd_tr.srq_thr_req_head = sd_treq; 25069 } else { 25070 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 25071 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 25072 if (sd_cur->dev == dev) { 25073 /* 25074 * already in Queue so don't log 25075 * another request for the device 25076 */ 25077 already_there = 1; 25078 break; 25079 } 25080 sd_prev = sd_cur; 25081 } 25082 if (!already_there) { 25083 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 25084 "logging request for %lx\n", dev); 25085 sd_prev->sd_thr_req_next = sd_treq; 25086 } else { 25087 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 25088 } 25089 } 25090 25091 /* 25092 * Create a kernel thread to do the reservation reclaim and free up this 25093 * thread. We cannot block this thread while we go away to do the 25094 * reservation reclaim 25095 */ 25096 if (sd_tr.srq_resv_reclaim_thread == NULL) 25097 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 25098 sd_resv_reclaim_thread, NULL, 25099 0, &p0, TS_RUN, v.v_maxsyspri - 2); 25100 25101 /* Tell the reservation reclaim thread that it has work to do */ 25102 cv_signal(&sd_tr.srq_resv_reclaim_cv); 25103 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25104 } 25105 25106 /* 25107 * Function: sd_resv_reclaim_thread() 25108 * 25109 * Description: This function implements the reservation reclaim operations 25110 * 25111 * Arguments: arg - the device 'dev_t' is used for context to discriminate 25112 * among multiple watches that share this callback function 25113 */ 25114 25115 static void 25116 sd_resv_reclaim_thread() 25117 { 25118 struct sd_lun *un; 25119 struct sd_thr_request *sd_mhreq; 25120 25121 /* Wait for work */ 25122 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25123 if (sd_tr.srq_thr_req_head == NULL) { 25124 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25125 &sd_tr.srq_resv_reclaim_mutex); 25126 } 25127 25128 /* Loop while we have work */ 25129 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25130 un = ddi_get_soft_state(sd_state, 25131 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25132 if (un == NULL) { 25133 /* 25134 * softstate structure is NULL so just 25135 * dequeue the request and continue 25136 */ 25137 sd_tr.srq_thr_req_head = 25138 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25139 kmem_free(sd_tr.srq_thr_cur_req, 25140 sizeof (struct sd_thr_request)); 25141 continue; 25142 } 25143 25144 /* dequeue the request */ 25145 sd_mhreq = sd_tr.srq_thr_cur_req; 25146 sd_tr.srq_thr_req_head = 25147 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25148 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25149 25150 /* 25151 * Reclaim reservation only if SD_RESERVE is still set. There 25152 * may have been a call to MHIOCRELEASE before we got here. 25153 */ 25154 mutex_enter(SD_MUTEX(un)); 25155 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25156 /* 25157 * Note: The SD_LOST_RESERVE flag is cleared before 25158 * reclaiming the reservation. If this is done after the 25159 * call to sd_reserve_release a reservation loss in the 25160 * window between pkt completion of reserve cmd and 25161 * mutex_enter below may not be recognized 25162 */ 25163 un->un_resvd_status &= ~SD_LOST_RESERVE; 25164 mutex_exit(SD_MUTEX(un)); 25165 25166 if (sd_reserve_release(sd_mhreq->dev, 25167 SD_RESERVE) == 0) { 25168 mutex_enter(SD_MUTEX(un)); 25169 un->un_resvd_status |= SD_RESERVE; 25170 mutex_exit(SD_MUTEX(un)); 25171 SD_INFO(SD_LOG_IOCTL_MHD, un, 25172 "sd_resv_reclaim_thread: " 25173 "Reservation Recovered\n"); 25174 } else { 25175 mutex_enter(SD_MUTEX(un)); 25176 un->un_resvd_status |= SD_LOST_RESERVE; 25177 mutex_exit(SD_MUTEX(un)); 25178 SD_INFO(SD_LOG_IOCTL_MHD, un, 25179 "sd_resv_reclaim_thread: Failed " 25180 "Reservation Recovery\n"); 25181 } 25182 } else { 25183 mutex_exit(SD_MUTEX(un)); 25184 } 25185 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25186 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25187 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25188 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25189 /* 25190 * wakeup the destroy thread if anyone is waiting on 25191 * us to complete. 25192 */ 25193 cv_signal(&sd_tr.srq_inprocess_cv); 25194 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25195 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25196 } 25197 25198 /* 25199 * cleanup the sd_tr structure now that this thread will not exist 25200 */ 25201 ASSERT(sd_tr.srq_thr_req_head == NULL); 25202 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25203 sd_tr.srq_resv_reclaim_thread = NULL; 25204 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25205 thread_exit(); 25206 } 25207 25208 25209 /* 25210 * Function: sd_rmv_resv_reclaim_req() 25211 * 25212 * Description: This function removes any pending reservation reclaim requests 25213 * for the specified device. 25214 * 25215 * Arguments: dev - the device 'dev_t' 25216 */ 25217 25218 static void 25219 sd_rmv_resv_reclaim_req(dev_t dev) 25220 { 25221 struct sd_thr_request *sd_mhreq; 25222 struct sd_thr_request *sd_prev; 25223 25224 /* Remove a reservation reclaim request from the list */ 25225 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25226 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25227 /* 25228 * We are attempting to reinstate reservation for 25229 * this device. We wait for sd_reserve_release() 25230 * to return before we return. 25231 */ 25232 cv_wait(&sd_tr.srq_inprocess_cv, 25233 &sd_tr.srq_resv_reclaim_mutex); 25234 } else { 25235 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25236 if (sd_mhreq && sd_mhreq->dev == dev) { 25237 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25238 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25239 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25240 return; 25241 } 25242 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25243 if (sd_mhreq && sd_mhreq->dev == dev) { 25244 break; 25245 } 25246 sd_prev = sd_mhreq; 25247 } 25248 if (sd_mhreq != NULL) { 25249 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25250 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25251 } 25252 } 25253 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25254 } 25255 25256 25257 /* 25258 * Function: sd_mhd_reset_notify_cb() 25259 * 25260 * Description: This is a call back function for scsi_reset_notify. This 25261 * function updates the softstate reserved status and logs the 25262 * reset. The driver scsi watch facility callback function 25263 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25264 * will reclaim the reservation. 25265 * 25266 * Arguments: arg - driver soft state (unit) structure 25267 */ 25268 25269 static void 25270 sd_mhd_reset_notify_cb(caddr_t arg) 25271 { 25272 struct sd_lun *un = (struct sd_lun *)arg; 25273 25274 mutex_enter(SD_MUTEX(un)); 25275 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25276 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25277 SD_INFO(SD_LOG_IOCTL_MHD, un, 25278 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25279 } 25280 mutex_exit(SD_MUTEX(un)); 25281 } 25282 25283 25284 /* 25285 * Function: sd_take_ownership() 25286 * 25287 * Description: This routine implements an algorithm to achieve a stable 25288 * reservation on disks which don't implement priority reserve, 25289 * and makes sure that other host lose re-reservation attempts. 25290 * This algorithm contains of a loop that keeps issuing the RESERVE 25291 * for some period of time (min_ownership_delay, default 6 seconds) 25292 * During that loop, it looks to see if there has been a bus device 25293 * reset or bus reset (both of which cause an existing reservation 25294 * to be lost). If the reservation is lost issue RESERVE until a 25295 * period of min_ownership_delay with no resets has gone by, or 25296 * until max_ownership_delay has expired. This loop ensures that 25297 * the host really did manage to reserve the device, in spite of 25298 * resets. The looping for min_ownership_delay (default six 25299 * seconds) is important to early generation clustering products, 25300 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25301 * MHIOCENFAILFAST periodic timer of two seconds. By having 25302 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25303 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25304 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25305 * have already noticed, via the MHIOCENFAILFAST polling, that it 25306 * no longer "owns" the disk and will have panicked itself. Thus, 25307 * the host issuing the MHIOCTKOWN is assured (with timing 25308 * dependencies) that by the time it actually starts to use the 25309 * disk for real work, the old owner is no longer accessing it. 25310 * 25311 * min_ownership_delay is the minimum amount of time for which the 25312 * disk must be reserved continuously devoid of resets before the 25313 * MHIOCTKOWN ioctl will return success. 25314 * 25315 * max_ownership_delay indicates the amount of time by which the 25316 * take ownership should succeed or timeout with an error. 25317 * 25318 * Arguments: dev - the device 'dev_t' 25319 * *p - struct containing timing info. 25320 * 25321 * Return Code: 0 for success or error code 25322 */ 25323 25324 static int 25325 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25326 { 25327 struct sd_lun *un; 25328 int rval; 25329 int err; 25330 int reservation_count = 0; 25331 int min_ownership_delay = 6000000; /* in usec */ 25332 int max_ownership_delay = 30000000; /* in usec */ 25333 clock_t start_time; /* starting time of this algorithm */ 25334 clock_t end_time; /* time limit for giving up */ 25335 clock_t ownership_time; /* time limit for stable ownership */ 25336 clock_t current_time; 25337 clock_t previous_current_time; 25338 25339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25340 return (ENXIO); 25341 } 25342 25343 /* 25344 * Attempt a device reservation. A priority reservation is requested. 25345 */ 25346 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25347 != SD_SUCCESS) { 25348 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25349 "sd_take_ownership: return(1)=%d\n", rval); 25350 return (rval); 25351 } 25352 25353 /* Update the softstate reserved status to indicate the reservation */ 25354 mutex_enter(SD_MUTEX(un)); 25355 un->un_resvd_status |= SD_RESERVE; 25356 un->un_resvd_status &= 25357 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25358 mutex_exit(SD_MUTEX(un)); 25359 25360 if (p != NULL) { 25361 if (p->min_ownership_delay != 0) { 25362 min_ownership_delay = p->min_ownership_delay * 1000; 25363 } 25364 if (p->max_ownership_delay != 0) { 25365 max_ownership_delay = p->max_ownership_delay * 1000; 25366 } 25367 } 25368 SD_INFO(SD_LOG_IOCTL_MHD, un, 25369 "sd_take_ownership: min, max delays: %d, %d\n", 25370 min_ownership_delay, max_ownership_delay); 25371 25372 start_time = ddi_get_lbolt(); 25373 current_time = start_time; 25374 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25375 end_time = start_time + drv_usectohz(max_ownership_delay); 25376 25377 while (current_time - end_time < 0) { 25378 delay(drv_usectohz(500000)); 25379 25380 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25381 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25382 mutex_enter(SD_MUTEX(un)); 25383 rval = (un->un_resvd_status & 25384 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25385 mutex_exit(SD_MUTEX(un)); 25386 break; 25387 } 25388 } 25389 previous_current_time = current_time; 25390 current_time = ddi_get_lbolt(); 25391 mutex_enter(SD_MUTEX(un)); 25392 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25393 ownership_time = ddi_get_lbolt() + 25394 drv_usectohz(min_ownership_delay); 25395 reservation_count = 0; 25396 } else { 25397 reservation_count++; 25398 } 25399 un->un_resvd_status |= SD_RESERVE; 25400 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25401 mutex_exit(SD_MUTEX(un)); 25402 25403 SD_INFO(SD_LOG_IOCTL_MHD, un, 25404 "sd_take_ownership: ticks for loop iteration=%ld, " 25405 "reservation=%s\n", (current_time - previous_current_time), 25406 reservation_count ? "ok" : "reclaimed"); 25407 25408 if (current_time - ownership_time >= 0 && 25409 reservation_count >= 4) { 25410 rval = 0; /* Achieved a stable ownership */ 25411 break; 25412 } 25413 if (current_time - end_time >= 0) { 25414 rval = EACCES; /* No ownership in max possible time */ 25415 break; 25416 } 25417 } 25418 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25419 "sd_take_ownership: return(2)=%d\n", rval); 25420 return (rval); 25421 } 25422 25423 25424 /* 25425 * Function: sd_reserve_release() 25426 * 25427 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25428 * PRIORITY RESERVE commands based on a user specified command type 25429 * 25430 * Arguments: dev - the device 'dev_t' 25431 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25432 * SD_RESERVE, SD_RELEASE 25433 * 25434 * Return Code: 0 or Error Code 25435 */ 25436 25437 static int 25438 sd_reserve_release(dev_t dev, int cmd) 25439 { 25440 struct uscsi_cmd *com = NULL; 25441 struct sd_lun *un = NULL; 25442 char cdb[CDB_GROUP0]; 25443 int rval; 25444 25445 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25446 (cmd == SD_PRIORITY_RESERVE)); 25447 25448 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25449 return (ENXIO); 25450 } 25451 25452 /* instantiate and initialize the command and cdb */ 25453 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25454 bzero(cdb, CDB_GROUP0); 25455 com->uscsi_flags = USCSI_SILENT; 25456 com->uscsi_timeout = un->un_reserve_release_time; 25457 com->uscsi_cdblen = CDB_GROUP0; 25458 com->uscsi_cdb = cdb; 25459 if (cmd == SD_RELEASE) { 25460 cdb[0] = SCMD_RELEASE; 25461 } else { 25462 cdb[0] = SCMD_RESERVE; 25463 } 25464 25465 /* Send the command. */ 25466 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25467 UIO_SYSSPACE, SD_PATH_STANDARD); 25468 25469 /* 25470 * "break" a reservation that is held by another host, by issuing a 25471 * reset if priority reserve is desired, and we could not get the 25472 * device. 25473 */ 25474 if ((cmd == SD_PRIORITY_RESERVE) && 25475 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25476 /* 25477 * First try to reset the LUN. If we cannot, then try a target 25478 * reset, followed by a bus reset if the target reset fails. 25479 */ 25480 int reset_retval = 0; 25481 if (un->un_f_lun_reset_enabled == TRUE) { 25482 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25483 } 25484 if (reset_retval == 0) { 25485 /* The LUN reset either failed or was not issued */ 25486 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25487 } 25488 if ((reset_retval == 0) && 25489 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25490 rval = EIO; 25491 kmem_free(com, sizeof (*com)); 25492 return (rval); 25493 } 25494 25495 bzero(com, sizeof (struct uscsi_cmd)); 25496 com->uscsi_flags = USCSI_SILENT; 25497 com->uscsi_cdb = cdb; 25498 com->uscsi_cdblen = CDB_GROUP0; 25499 com->uscsi_timeout = 5; 25500 25501 /* 25502 * Reissue the last reserve command, this time without request 25503 * sense. Assume that it is just a regular reserve command. 25504 */ 25505 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25506 UIO_SYSSPACE, SD_PATH_STANDARD); 25507 } 25508 25509 /* Return an error if still getting a reservation conflict. */ 25510 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25511 rval = EACCES; 25512 } 25513 25514 kmem_free(com, sizeof (*com)); 25515 return (rval); 25516 } 25517 25518 25519 #define SD_NDUMP_RETRIES 12 25520 /* 25521 * System Crash Dump routine 25522 */ 25523 25524 static int 25525 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25526 { 25527 int instance; 25528 int partition; 25529 int i; 25530 int err; 25531 struct sd_lun *un; 25532 struct dk_map *lp; 25533 struct scsi_pkt *wr_pktp; 25534 struct buf *wr_bp; 25535 struct buf wr_buf; 25536 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25537 daddr_t tgt_blkno; /* rmw - blkno for target */ 25538 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25539 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25540 size_t io_start_offset; 25541 int doing_rmw = FALSE; 25542 int rval; 25543 #if defined(__i386) || defined(__amd64) 25544 ssize_t dma_resid; 25545 daddr_t oblkno; 25546 #endif 25547 25548 instance = SDUNIT(dev); 25549 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25550 (!un->un_f_geometry_is_valid) || ISCD(un)) { 25551 return (ENXIO); 25552 } 25553 25554 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25555 25556 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25557 25558 partition = SDPART(dev); 25559 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25560 25561 /* Validate blocks to dump at against partition size. */ 25562 lp = &un->un_map[partition]; 25563 if ((blkno + nblk) > lp->dkl_nblk) { 25564 SD_TRACE(SD_LOG_DUMP, un, 25565 "sddump: dump range larger than partition: " 25566 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25567 blkno, nblk, lp->dkl_nblk); 25568 return (EINVAL); 25569 } 25570 25571 mutex_enter(&un->un_pm_mutex); 25572 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25573 struct scsi_pkt *start_pktp; 25574 25575 mutex_exit(&un->un_pm_mutex); 25576 25577 /* 25578 * use pm framework to power on HBA 1st 25579 */ 25580 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25581 25582 /* 25583 * Dump no long uses sdpower to power on a device, it's 25584 * in-line here so it can be done in polled mode. 25585 */ 25586 25587 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25588 25589 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25590 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25591 25592 if (start_pktp == NULL) { 25593 /* We were not given a SCSI packet, fail. */ 25594 return (EIO); 25595 } 25596 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25597 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25598 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25599 start_pktp->pkt_flags = FLAG_NOINTR; 25600 25601 mutex_enter(SD_MUTEX(un)); 25602 SD_FILL_SCSI1_LUN(un, start_pktp); 25603 mutex_exit(SD_MUTEX(un)); 25604 /* 25605 * Scsi_poll returns 0 (success) if the command completes and 25606 * the status block is STATUS_GOOD. 25607 */ 25608 if (sd_scsi_poll(un, start_pktp) != 0) { 25609 scsi_destroy_pkt(start_pktp); 25610 return (EIO); 25611 } 25612 scsi_destroy_pkt(start_pktp); 25613 (void) sd_ddi_pm_resume(un); 25614 } else { 25615 mutex_exit(&un->un_pm_mutex); 25616 } 25617 25618 mutex_enter(SD_MUTEX(un)); 25619 un->un_throttle = 0; 25620 25621 /* 25622 * The first time through, reset the specific target device. 25623 * However, when cpr calls sddump we know that sd is in a 25624 * a good state so no bus reset is required. 25625 * Clear sense data via Request Sense cmd. 25626 * In sddump we don't care about allow_bus_device_reset anymore 25627 */ 25628 25629 if ((un->un_state != SD_STATE_SUSPENDED) && 25630 (un->un_state != SD_STATE_DUMPING)) { 25631 25632 New_state(un, SD_STATE_DUMPING); 25633 25634 if (un->un_f_is_fibre == FALSE) { 25635 mutex_exit(SD_MUTEX(un)); 25636 /* 25637 * Attempt a bus reset for parallel scsi. 25638 * 25639 * Note: A bus reset is required because on some host 25640 * systems (i.e. E420R) a bus device reset is 25641 * insufficient to reset the state of the target. 25642 * 25643 * Note: Don't issue the reset for fibre-channel, 25644 * because this tends to hang the bus (loop) for 25645 * too long while everyone is logging out and in 25646 * and the deadman timer for dumping will fire 25647 * before the dump is complete. 25648 */ 25649 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25650 mutex_enter(SD_MUTEX(un)); 25651 Restore_state(un); 25652 mutex_exit(SD_MUTEX(un)); 25653 return (EIO); 25654 } 25655 25656 /* Delay to give the device some recovery time. */ 25657 drv_usecwait(10000); 25658 25659 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25660 SD_INFO(SD_LOG_DUMP, un, 25661 "sddump: sd_send_polled_RQS failed\n"); 25662 } 25663 mutex_enter(SD_MUTEX(un)); 25664 } 25665 } 25666 25667 /* 25668 * Convert the partition-relative block number to a 25669 * disk physical block number. 25670 */ 25671 blkno += un->un_offset[partition]; 25672 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25673 25674 25675 /* 25676 * Check if the device has a non-512 block size. 25677 */ 25678 wr_bp = NULL; 25679 if (NOT_DEVBSIZE(un)) { 25680 tgt_byte_offset = blkno * un->un_sys_blocksize; 25681 tgt_byte_count = nblk * un->un_sys_blocksize; 25682 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25683 (tgt_byte_count % un->un_tgt_blocksize)) { 25684 doing_rmw = TRUE; 25685 /* 25686 * Calculate the block number and number of block 25687 * in terms of the media block size. 25688 */ 25689 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25690 tgt_nblk = 25691 ((tgt_byte_offset + tgt_byte_count + 25692 (un->un_tgt_blocksize - 1)) / 25693 un->un_tgt_blocksize) - tgt_blkno; 25694 25695 /* 25696 * Invoke the routine which is going to do read part 25697 * of read-modify-write. 25698 * Note that this routine returns a pointer to 25699 * a valid bp in wr_bp. 25700 */ 25701 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25702 &wr_bp); 25703 if (err) { 25704 mutex_exit(SD_MUTEX(un)); 25705 return (err); 25706 } 25707 /* 25708 * Offset is being calculated as - 25709 * (original block # * system block size) - 25710 * (new block # * target block size) 25711 */ 25712 io_start_offset = 25713 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25714 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25715 25716 ASSERT((io_start_offset >= 0) && 25717 (io_start_offset < un->un_tgt_blocksize)); 25718 /* 25719 * Do the modify portion of read modify write. 25720 */ 25721 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25722 (size_t)nblk * un->un_sys_blocksize); 25723 } else { 25724 doing_rmw = FALSE; 25725 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25726 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25727 } 25728 25729 /* Convert blkno and nblk to target blocks */ 25730 blkno = tgt_blkno; 25731 nblk = tgt_nblk; 25732 } else { 25733 wr_bp = &wr_buf; 25734 bzero(wr_bp, sizeof (struct buf)); 25735 wr_bp->b_flags = B_BUSY; 25736 wr_bp->b_un.b_addr = addr; 25737 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25738 wr_bp->b_resid = 0; 25739 } 25740 25741 mutex_exit(SD_MUTEX(un)); 25742 25743 /* 25744 * Obtain a SCSI packet for the write command. 25745 * It should be safe to call the allocator here without 25746 * worrying about being locked for DVMA mapping because 25747 * the address we're passed is already a DVMA mapping 25748 * 25749 * We are also not going to worry about semaphore ownership 25750 * in the dump buffer. Dumping is single threaded at present. 25751 */ 25752 25753 wr_pktp = NULL; 25754 25755 #if defined(__i386) || defined(__amd64) 25756 dma_resid = wr_bp->b_bcount; 25757 oblkno = blkno; 25758 while (dma_resid != 0) { 25759 #endif 25760 25761 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25762 wr_bp->b_flags &= ~B_ERROR; 25763 25764 #if defined(__i386) || defined(__amd64) 25765 blkno = oblkno + 25766 ((wr_bp->b_bcount - dma_resid) / 25767 un->un_tgt_blocksize); 25768 nblk = dma_resid / un->un_tgt_blocksize; 25769 25770 if (wr_pktp) { 25771 /* Partial DMA transfers after initial transfer */ 25772 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25773 blkno, nblk); 25774 } else { 25775 /* Initial transfer */ 25776 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25777 un->un_pkt_flags, NULL_FUNC, NULL, 25778 blkno, nblk); 25779 } 25780 #else 25781 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25782 0, NULL_FUNC, NULL, blkno, nblk); 25783 #endif 25784 25785 if (rval == 0) { 25786 /* We were given a SCSI packet, continue. */ 25787 break; 25788 } 25789 25790 if (i == 0) { 25791 if (wr_bp->b_flags & B_ERROR) { 25792 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25793 "no resources for dumping; " 25794 "error code: 0x%x, retrying", 25795 geterror(wr_bp)); 25796 } else { 25797 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25798 "no resources for dumping; retrying"); 25799 } 25800 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25801 if (wr_bp->b_flags & B_ERROR) { 25802 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25803 "no resources for dumping; error code: " 25804 "0x%x, retrying\n", geterror(wr_bp)); 25805 } 25806 } else { 25807 if (wr_bp->b_flags & B_ERROR) { 25808 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25809 "no resources for dumping; " 25810 "error code: 0x%x, retries failed, " 25811 "giving up.\n", geterror(wr_bp)); 25812 } else { 25813 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25814 "no resources for dumping; " 25815 "retries failed, giving up.\n"); 25816 } 25817 mutex_enter(SD_MUTEX(un)); 25818 Restore_state(un); 25819 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25820 mutex_exit(SD_MUTEX(un)); 25821 scsi_free_consistent_buf(wr_bp); 25822 } else { 25823 mutex_exit(SD_MUTEX(un)); 25824 } 25825 return (EIO); 25826 } 25827 drv_usecwait(10000); 25828 } 25829 25830 #if defined(__i386) || defined(__amd64) 25831 /* 25832 * save the resid from PARTIAL_DMA 25833 */ 25834 dma_resid = wr_pktp->pkt_resid; 25835 if (dma_resid != 0) 25836 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25837 wr_pktp->pkt_resid = 0; 25838 #endif 25839 25840 /* SunBug 1222170 */ 25841 wr_pktp->pkt_flags = FLAG_NOINTR; 25842 25843 err = EIO; 25844 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25845 25846 /* 25847 * Scsi_poll returns 0 (success) if the command completes and 25848 * the status block is STATUS_GOOD. We should only check 25849 * errors if this condition is not true. Even then we should 25850 * send our own request sense packet only if we have a check 25851 * condition and auto request sense has not been performed by 25852 * the hba. 25853 */ 25854 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25855 25856 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25857 (wr_pktp->pkt_resid == 0)) { 25858 err = SD_SUCCESS; 25859 break; 25860 } 25861 25862 /* 25863 * Check CMD_DEV_GONE 1st, give up if device is gone. 25864 */ 25865 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25866 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25867 "Device is gone\n"); 25868 break; 25869 } 25870 25871 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25872 SD_INFO(SD_LOG_DUMP, un, 25873 "sddump: write failed with CHECK, try # %d\n", i); 25874 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25875 (void) sd_send_polled_RQS(un); 25876 } 25877 25878 continue; 25879 } 25880 25881 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25882 int reset_retval = 0; 25883 25884 SD_INFO(SD_LOG_DUMP, un, 25885 "sddump: write failed with BUSY, try # %d\n", i); 25886 25887 if (un->un_f_lun_reset_enabled == TRUE) { 25888 reset_retval = scsi_reset(SD_ADDRESS(un), 25889 RESET_LUN); 25890 } 25891 if (reset_retval == 0) { 25892 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25893 } 25894 (void) sd_send_polled_RQS(un); 25895 25896 } else { 25897 SD_INFO(SD_LOG_DUMP, un, 25898 "sddump: write failed with 0x%x, try # %d\n", 25899 SD_GET_PKT_STATUS(wr_pktp), i); 25900 mutex_enter(SD_MUTEX(un)); 25901 sd_reset_target(un, wr_pktp); 25902 mutex_exit(SD_MUTEX(un)); 25903 } 25904 25905 /* 25906 * If we are not getting anywhere with lun/target resets, 25907 * let's reset the bus. 25908 */ 25909 if (i == SD_NDUMP_RETRIES/2) { 25910 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25911 (void) sd_send_polled_RQS(un); 25912 } 25913 25914 } 25915 #if defined(__i386) || defined(__amd64) 25916 } /* dma_resid */ 25917 #endif 25918 25919 scsi_destroy_pkt(wr_pktp); 25920 mutex_enter(SD_MUTEX(un)); 25921 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25922 mutex_exit(SD_MUTEX(un)); 25923 scsi_free_consistent_buf(wr_bp); 25924 } else { 25925 mutex_exit(SD_MUTEX(un)); 25926 } 25927 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25928 return (err); 25929 } 25930 25931 /* 25932 * Function: sd_scsi_poll() 25933 * 25934 * Description: This is a wrapper for the scsi_poll call. 25935 * 25936 * Arguments: sd_lun - The unit structure 25937 * scsi_pkt - The scsi packet being sent to the device. 25938 * 25939 * Return Code: 0 - Command completed successfully with good status 25940 * -1 - Command failed. This could indicate a check condition 25941 * or other status value requiring recovery action. 25942 * 25943 */ 25944 25945 static int 25946 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25947 { 25948 int status; 25949 25950 ASSERT(un != NULL); 25951 ASSERT(!mutex_owned(SD_MUTEX(un))); 25952 ASSERT(pktp != NULL); 25953 25954 status = SD_SUCCESS; 25955 25956 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25957 pktp->pkt_flags |= un->un_tagflags; 25958 pktp->pkt_flags &= ~FLAG_NODISCON; 25959 } 25960 25961 status = sd_ddi_scsi_poll(pktp); 25962 /* 25963 * Scsi_poll returns 0 (success) if the command completes and the 25964 * status block is STATUS_GOOD. We should only check errors if this 25965 * condition is not true. Even then we should send our own request 25966 * sense packet only if we have a check condition and auto 25967 * request sense has not been performed by the hba. 25968 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25969 */ 25970 if ((status != SD_SUCCESS) && 25971 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25972 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25973 (pktp->pkt_reason != CMD_DEV_GONE)) 25974 (void) sd_send_polled_RQS(un); 25975 25976 return (status); 25977 } 25978 25979 /* 25980 * Function: sd_send_polled_RQS() 25981 * 25982 * Description: This sends the request sense command to a device. 25983 * 25984 * Arguments: sd_lun - The unit structure 25985 * 25986 * Return Code: 0 - Command completed successfully with good status 25987 * -1 - Command failed. 25988 * 25989 */ 25990 25991 static int 25992 sd_send_polled_RQS(struct sd_lun *un) 25993 { 25994 int ret_val; 25995 struct scsi_pkt *rqs_pktp; 25996 struct buf *rqs_bp; 25997 25998 ASSERT(un != NULL); 25999 ASSERT(!mutex_owned(SD_MUTEX(un))); 26000 26001 ret_val = SD_SUCCESS; 26002 26003 rqs_pktp = un->un_rqs_pktp; 26004 rqs_bp = un->un_rqs_bp; 26005 26006 mutex_enter(SD_MUTEX(un)); 26007 26008 if (un->un_sense_isbusy) { 26009 ret_val = SD_FAILURE; 26010 mutex_exit(SD_MUTEX(un)); 26011 return (ret_val); 26012 } 26013 26014 /* 26015 * If the request sense buffer (and packet) is not in use, 26016 * let's set the un_sense_isbusy and send our packet 26017 */ 26018 un->un_sense_isbusy = 1; 26019 rqs_pktp->pkt_resid = 0; 26020 rqs_pktp->pkt_reason = 0; 26021 rqs_pktp->pkt_flags |= FLAG_NOINTR; 26022 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 26023 26024 mutex_exit(SD_MUTEX(un)); 26025 26026 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 26027 " 0x%p\n", rqs_bp->b_un.b_addr); 26028 26029 /* 26030 * Can't send this to sd_scsi_poll, we wrap ourselves around the 26031 * axle - it has a call into us! 26032 */ 26033 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 26034 SD_INFO(SD_LOG_COMMON, un, 26035 "sd_send_polled_RQS: RQS failed\n"); 26036 } 26037 26038 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 26039 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 26040 26041 mutex_enter(SD_MUTEX(un)); 26042 un->un_sense_isbusy = 0; 26043 mutex_exit(SD_MUTEX(un)); 26044 26045 return (ret_val); 26046 } 26047 26048 /* 26049 * Defines needed for localized version of the scsi_poll routine. 26050 */ 26051 #define SD_CSEC 10000 /* usecs */ 26052 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 26053 26054 26055 /* 26056 * Function: sd_ddi_scsi_poll() 26057 * 26058 * Description: Localized version of the scsi_poll routine. The purpose is to 26059 * send a scsi_pkt to a device as a polled command. This version 26060 * is to ensure more robust handling of transport errors. 26061 * Specifically this routine cures not ready, coming ready 26062 * transition for power up and reset of sonoma's. This can take 26063 * up to 45 seconds for power-on and 20 seconds for reset of a 26064 * sonoma lun. 26065 * 26066 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 26067 * 26068 * Return Code: 0 - Command completed successfully with good status 26069 * -1 - Command failed. 26070 * 26071 */ 26072 26073 static int 26074 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 26075 { 26076 int busy_count; 26077 int timeout; 26078 int rval = SD_FAILURE; 26079 int savef; 26080 struct scsi_extended_sense *sensep; 26081 long savet; 26082 void (*savec)(); 26083 /* 26084 * The following is defined in machdep.c and is used in determining if 26085 * the scsi transport system will do polled I/O instead of interrupt 26086 * I/O when called from xx_dump(). 26087 */ 26088 extern int do_polled_io; 26089 26090 /* 26091 * save old flags in pkt, to restore at end 26092 */ 26093 savef = pkt->pkt_flags; 26094 savec = pkt->pkt_comp; 26095 savet = pkt->pkt_time; 26096 26097 pkt->pkt_flags |= FLAG_NOINTR; 26098 26099 /* 26100 * XXX there is nothing in the SCSA spec that states that we should not 26101 * do a callback for polled cmds; however, removing this will break sd 26102 * and probably other target drivers 26103 */ 26104 pkt->pkt_comp = NULL; 26105 26106 /* 26107 * we don't like a polled command without timeout. 26108 * 60 seconds seems long enough. 26109 */ 26110 if (pkt->pkt_time == 0) { 26111 pkt->pkt_time = SCSI_POLL_TIMEOUT; 26112 } 26113 26114 /* 26115 * Send polled cmd. 26116 * 26117 * We do some error recovery for various errors. Tran_busy, 26118 * queue full, and non-dispatched commands are retried every 10 msec. 26119 * as they are typically transient failures. Busy status and Not 26120 * Ready are retried every second as this status takes a while to 26121 * change. Unit attention is retried for pkt_time (60) times 26122 * with no delay. 26123 */ 26124 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 26125 26126 for (busy_count = 0; busy_count < timeout; busy_count++) { 26127 int rc; 26128 int poll_delay; 26129 26130 /* 26131 * Initialize pkt status variables. 26132 */ 26133 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26134 26135 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26136 if (rc != TRAN_BUSY) { 26137 /* Transport failed - give up. */ 26138 break; 26139 } else { 26140 /* Transport busy - try again. */ 26141 poll_delay = 1 * SD_CSEC; /* 10 msec */ 26142 } 26143 } else { 26144 /* 26145 * Transport accepted - check pkt status. 26146 */ 26147 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26148 if (pkt->pkt_reason == CMD_CMPLT && 26149 rc == STATUS_CHECK && 26150 pkt->pkt_state & STATE_ARQ_DONE) { 26151 struct scsi_arq_status *arqstat = 26152 (struct scsi_arq_status *)(pkt->pkt_scbp); 26153 26154 sensep = &arqstat->sts_sensedata; 26155 } else { 26156 sensep = NULL; 26157 } 26158 26159 if ((pkt->pkt_reason == CMD_CMPLT) && 26160 (rc == STATUS_GOOD)) { 26161 /* No error - we're done */ 26162 rval = SD_SUCCESS; 26163 break; 26164 26165 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26166 /* Lost connection - give up */ 26167 break; 26168 26169 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26170 (pkt->pkt_state == 0)) { 26171 /* Pkt not dispatched - try again. */ 26172 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26173 26174 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26175 (rc == STATUS_QFULL)) { 26176 /* Queue full - try again. */ 26177 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26178 26179 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26180 (rc == STATUS_BUSY)) { 26181 /* Busy - try again. */ 26182 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26183 busy_count += (SD_SEC_TO_CSEC - 1); 26184 26185 } else if ((sensep != NULL) && 26186 (sensep->es_key == KEY_UNIT_ATTENTION)) { 26187 /* Unit Attention - try again */ 26188 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 26189 continue; 26190 26191 } else if ((sensep != NULL) && 26192 (sensep->es_key == KEY_NOT_READY) && 26193 (sensep->es_add_code == 0x04) && 26194 (sensep->es_qual_code == 0x01)) { 26195 /* Not ready -> ready - try again. */ 26196 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26197 busy_count += (SD_SEC_TO_CSEC - 1); 26198 26199 } else { 26200 /* BAD status - give up. */ 26201 break; 26202 } 26203 } 26204 26205 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 26206 !do_polled_io) { 26207 delay(drv_usectohz(poll_delay)); 26208 } else { 26209 /* we busy wait during cpr_dump or interrupt threads */ 26210 drv_usecwait(poll_delay); 26211 } 26212 } 26213 26214 pkt->pkt_flags = savef; 26215 pkt->pkt_comp = savec; 26216 pkt->pkt_time = savet; 26217 return (rval); 26218 } 26219 26220 26221 /* 26222 * Function: sd_persistent_reservation_in_read_keys 26223 * 26224 * Description: This routine is the driver entry point for handling CD-ROM 26225 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26226 * by sending the SCSI-3 PRIN commands to the device. 26227 * Processes the read keys command response by copying the 26228 * reservation key information into the user provided buffer. 26229 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26230 * 26231 * Arguments: un - Pointer to soft state struct for the target. 26232 * usrp - user provided pointer to multihost Persistent In Read 26233 * Keys structure (mhioc_inkeys_t) 26234 * flag - this argument is a pass through to ddi_copyxxx() 26235 * directly from the mode argument of ioctl(). 26236 * 26237 * Return Code: 0 - Success 26238 * EACCES 26239 * ENOTSUP 26240 * errno return code from sd_send_scsi_cmd() 26241 * 26242 * Context: Can sleep. Does not return until command is completed. 26243 */ 26244 26245 static int 26246 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26247 mhioc_inkeys_t *usrp, int flag) 26248 { 26249 #ifdef _MULTI_DATAMODEL 26250 struct mhioc_key_list32 li32; 26251 #endif 26252 sd_prin_readkeys_t *in; 26253 mhioc_inkeys_t *ptr; 26254 mhioc_key_list_t li; 26255 uchar_t *data_bufp; 26256 int data_len; 26257 int rval; 26258 size_t copysz; 26259 26260 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26261 return (EINVAL); 26262 } 26263 bzero(&li, sizeof (mhioc_key_list_t)); 26264 26265 /* 26266 * Get the listsize from user 26267 */ 26268 #ifdef _MULTI_DATAMODEL 26269 26270 switch (ddi_model_convert_from(flag & FMODELS)) { 26271 case DDI_MODEL_ILP32: 26272 copysz = sizeof (struct mhioc_key_list32); 26273 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26274 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26275 "sd_persistent_reservation_in_read_keys: " 26276 "failed ddi_copyin: mhioc_key_list32_t\n"); 26277 rval = EFAULT; 26278 goto done; 26279 } 26280 li.listsize = li32.listsize; 26281 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26282 break; 26283 26284 case DDI_MODEL_NONE: 26285 copysz = sizeof (mhioc_key_list_t); 26286 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26287 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26288 "sd_persistent_reservation_in_read_keys: " 26289 "failed ddi_copyin: mhioc_key_list_t\n"); 26290 rval = EFAULT; 26291 goto done; 26292 } 26293 break; 26294 } 26295 26296 #else /* ! _MULTI_DATAMODEL */ 26297 copysz = sizeof (mhioc_key_list_t); 26298 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26299 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26300 "sd_persistent_reservation_in_read_keys: " 26301 "failed ddi_copyin: mhioc_key_list_t\n"); 26302 rval = EFAULT; 26303 goto done; 26304 } 26305 #endif 26306 26307 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26308 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26309 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26310 26311 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 26312 data_len, data_bufp)) != 0) { 26313 goto done; 26314 } 26315 in = (sd_prin_readkeys_t *)data_bufp; 26316 ptr->generation = BE_32(in->generation); 26317 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26318 26319 /* 26320 * Return the min(listsize, listlen) keys 26321 */ 26322 #ifdef _MULTI_DATAMODEL 26323 26324 switch (ddi_model_convert_from(flag & FMODELS)) { 26325 case DDI_MODEL_ILP32: 26326 li32.listlen = li.listlen; 26327 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26328 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26329 "sd_persistent_reservation_in_read_keys: " 26330 "failed ddi_copyout: mhioc_key_list32_t\n"); 26331 rval = EFAULT; 26332 goto done; 26333 } 26334 break; 26335 26336 case DDI_MODEL_NONE: 26337 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26338 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26339 "sd_persistent_reservation_in_read_keys: " 26340 "failed ddi_copyout: mhioc_key_list_t\n"); 26341 rval = EFAULT; 26342 goto done; 26343 } 26344 break; 26345 } 26346 26347 #else /* ! _MULTI_DATAMODEL */ 26348 26349 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26350 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26351 "sd_persistent_reservation_in_read_keys: " 26352 "failed ddi_copyout: mhioc_key_list_t\n"); 26353 rval = EFAULT; 26354 goto done; 26355 } 26356 26357 #endif /* _MULTI_DATAMODEL */ 26358 26359 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26360 li.listsize * MHIOC_RESV_KEY_SIZE); 26361 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26362 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26363 "sd_persistent_reservation_in_read_keys: " 26364 "failed ddi_copyout: keylist\n"); 26365 rval = EFAULT; 26366 } 26367 done: 26368 kmem_free(data_bufp, data_len); 26369 return (rval); 26370 } 26371 26372 26373 /* 26374 * Function: sd_persistent_reservation_in_read_resv 26375 * 26376 * Description: This routine is the driver entry point for handling CD-ROM 26377 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26378 * by sending the SCSI-3 PRIN commands to the device. 26379 * Process the read persistent reservations command response by 26380 * copying the reservation information into the user provided 26381 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26382 * 26383 * Arguments: un - Pointer to soft state struct for the target. 26384 * usrp - user provided pointer to multihost Persistent In Read 26385 * Keys structure (mhioc_inkeys_t) 26386 * flag - this argument is a pass through to ddi_copyxxx() 26387 * directly from the mode argument of ioctl(). 26388 * 26389 * Return Code: 0 - Success 26390 * EACCES 26391 * ENOTSUP 26392 * errno return code from sd_send_scsi_cmd() 26393 * 26394 * Context: Can sleep. Does not return until command is completed. 26395 */ 26396 26397 static int 26398 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26399 mhioc_inresvs_t *usrp, int flag) 26400 { 26401 #ifdef _MULTI_DATAMODEL 26402 struct mhioc_resv_desc_list32 resvlist32; 26403 #endif 26404 sd_prin_readresv_t *in; 26405 mhioc_inresvs_t *ptr; 26406 sd_readresv_desc_t *readresv_ptr; 26407 mhioc_resv_desc_list_t resvlist; 26408 mhioc_resv_desc_t resvdesc; 26409 uchar_t *data_bufp; 26410 int data_len; 26411 int rval; 26412 int i; 26413 size_t copysz; 26414 mhioc_resv_desc_t *bufp; 26415 26416 if ((ptr = usrp) == NULL) { 26417 return (EINVAL); 26418 } 26419 26420 /* 26421 * Get the listsize from user 26422 */ 26423 #ifdef _MULTI_DATAMODEL 26424 switch (ddi_model_convert_from(flag & FMODELS)) { 26425 case DDI_MODEL_ILP32: 26426 copysz = sizeof (struct mhioc_resv_desc_list32); 26427 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26428 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26429 "sd_persistent_reservation_in_read_resv: " 26430 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26431 rval = EFAULT; 26432 goto done; 26433 } 26434 resvlist.listsize = resvlist32.listsize; 26435 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26436 break; 26437 26438 case DDI_MODEL_NONE: 26439 copysz = sizeof (mhioc_resv_desc_list_t); 26440 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26441 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26442 "sd_persistent_reservation_in_read_resv: " 26443 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26444 rval = EFAULT; 26445 goto done; 26446 } 26447 break; 26448 } 26449 #else /* ! _MULTI_DATAMODEL */ 26450 copysz = sizeof (mhioc_resv_desc_list_t); 26451 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26452 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26453 "sd_persistent_reservation_in_read_resv: " 26454 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26455 rval = EFAULT; 26456 goto done; 26457 } 26458 #endif /* ! _MULTI_DATAMODEL */ 26459 26460 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26461 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26462 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26463 26464 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 26465 data_len, data_bufp)) != 0) { 26466 goto done; 26467 } 26468 in = (sd_prin_readresv_t *)data_bufp; 26469 ptr->generation = BE_32(in->generation); 26470 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26471 26472 /* 26473 * Return the min(listsize, listlen( keys 26474 */ 26475 #ifdef _MULTI_DATAMODEL 26476 26477 switch (ddi_model_convert_from(flag & FMODELS)) { 26478 case DDI_MODEL_ILP32: 26479 resvlist32.listlen = resvlist.listlen; 26480 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26481 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26482 "sd_persistent_reservation_in_read_resv: " 26483 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26484 rval = EFAULT; 26485 goto done; 26486 } 26487 break; 26488 26489 case DDI_MODEL_NONE: 26490 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26491 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26492 "sd_persistent_reservation_in_read_resv: " 26493 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26494 rval = EFAULT; 26495 goto done; 26496 } 26497 break; 26498 } 26499 26500 #else /* ! _MULTI_DATAMODEL */ 26501 26502 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26503 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26504 "sd_persistent_reservation_in_read_resv: " 26505 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26506 rval = EFAULT; 26507 goto done; 26508 } 26509 26510 #endif /* ! _MULTI_DATAMODEL */ 26511 26512 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26513 bufp = resvlist.list; 26514 copysz = sizeof (mhioc_resv_desc_t); 26515 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26516 i++, readresv_ptr++, bufp++) { 26517 26518 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26519 MHIOC_RESV_KEY_SIZE); 26520 resvdesc.type = readresv_ptr->type; 26521 resvdesc.scope = readresv_ptr->scope; 26522 resvdesc.scope_specific_addr = 26523 BE_32(readresv_ptr->scope_specific_addr); 26524 26525 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26526 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26527 "sd_persistent_reservation_in_read_resv: " 26528 "failed ddi_copyout: resvlist\n"); 26529 rval = EFAULT; 26530 goto done; 26531 } 26532 } 26533 done: 26534 kmem_free(data_bufp, data_len); 26535 return (rval); 26536 } 26537 26538 26539 /* 26540 * Function: sr_change_blkmode() 26541 * 26542 * Description: This routine is the driver entry point for handling CD-ROM 26543 * block mode ioctl requests. Support for returning and changing 26544 * the current block size in use by the device is implemented. The 26545 * LBA size is changed via a MODE SELECT Block Descriptor. 26546 * 26547 * This routine issues a mode sense with an allocation length of 26548 * 12 bytes for the mode page header and a single block descriptor. 26549 * 26550 * Arguments: dev - the device 'dev_t' 26551 * cmd - the request type; one of CDROMGBLKMODE (get) or 26552 * CDROMSBLKMODE (set) 26553 * data - current block size or requested block size 26554 * flag - this argument is a pass through to ddi_copyxxx() directly 26555 * from the mode argument of ioctl(). 26556 * 26557 * Return Code: the code returned by sd_send_scsi_cmd() 26558 * EINVAL if invalid arguments are provided 26559 * EFAULT if ddi_copyxxx() fails 26560 * ENXIO if fail ddi_get_soft_state 26561 * EIO if invalid mode sense block descriptor length 26562 * 26563 */ 26564 26565 static int 26566 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26567 { 26568 struct sd_lun *un = NULL; 26569 struct mode_header *sense_mhp, *select_mhp; 26570 struct block_descriptor *sense_desc, *select_desc; 26571 int current_bsize; 26572 int rval = EINVAL; 26573 uchar_t *sense = NULL; 26574 uchar_t *select = NULL; 26575 26576 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26577 26578 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26579 return (ENXIO); 26580 } 26581 26582 /* 26583 * The block length is changed via the Mode Select block descriptor, the 26584 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26585 * required as part of this routine. Therefore the mode sense allocation 26586 * length is specified to be the length of a mode page header and a 26587 * block descriptor. 26588 */ 26589 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26590 26591 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26592 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 26593 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26594 "sr_change_blkmode: Mode Sense Failed\n"); 26595 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26596 return (rval); 26597 } 26598 26599 /* Check the block descriptor len to handle only 1 block descriptor */ 26600 sense_mhp = (struct mode_header *)sense; 26601 if ((sense_mhp->bdesc_length == 0) || 26602 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26603 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26604 "sr_change_blkmode: Mode Sense returned invalid block" 26605 " descriptor length\n"); 26606 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26607 return (EIO); 26608 } 26609 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26610 current_bsize = ((sense_desc->blksize_hi << 16) | 26611 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26612 26613 /* Process command */ 26614 switch (cmd) { 26615 case CDROMGBLKMODE: 26616 /* Return the block size obtained during the mode sense */ 26617 if (ddi_copyout(¤t_bsize, (void *)data, 26618 sizeof (int), flag) != 0) 26619 rval = EFAULT; 26620 break; 26621 case CDROMSBLKMODE: 26622 /* Validate the requested block size */ 26623 switch (data) { 26624 case CDROM_BLK_512: 26625 case CDROM_BLK_1024: 26626 case CDROM_BLK_2048: 26627 case CDROM_BLK_2056: 26628 case CDROM_BLK_2336: 26629 case CDROM_BLK_2340: 26630 case CDROM_BLK_2352: 26631 case CDROM_BLK_2368: 26632 case CDROM_BLK_2448: 26633 case CDROM_BLK_2646: 26634 case CDROM_BLK_2647: 26635 break; 26636 default: 26637 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26638 "sr_change_blkmode: " 26639 "Block Size '%ld' Not Supported\n", data); 26640 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26641 return (EINVAL); 26642 } 26643 26644 /* 26645 * The current block size matches the requested block size so 26646 * there is no need to send the mode select to change the size 26647 */ 26648 if (current_bsize == data) { 26649 break; 26650 } 26651 26652 /* Build the select data for the requested block size */ 26653 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26654 select_mhp = (struct mode_header *)select; 26655 select_desc = 26656 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26657 /* 26658 * The LBA size is changed via the block descriptor, so the 26659 * descriptor is built according to the user data 26660 */ 26661 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26662 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26663 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26664 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26665 26666 /* Send the mode select for the requested block size */ 26667 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26668 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26669 SD_PATH_STANDARD)) != 0) { 26670 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26671 "sr_change_blkmode: Mode Select Failed\n"); 26672 /* 26673 * The mode select failed for the requested block size, 26674 * so reset the data for the original block size and 26675 * send it to the target. The error is indicated by the 26676 * return value for the failed mode select. 26677 */ 26678 select_desc->blksize_hi = sense_desc->blksize_hi; 26679 select_desc->blksize_mid = sense_desc->blksize_mid; 26680 select_desc->blksize_lo = sense_desc->blksize_lo; 26681 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26682 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26683 SD_PATH_STANDARD); 26684 } else { 26685 ASSERT(!mutex_owned(SD_MUTEX(un))); 26686 mutex_enter(SD_MUTEX(un)); 26687 sd_update_block_info(un, (uint32_t)data, 0); 26688 26689 mutex_exit(SD_MUTEX(un)); 26690 } 26691 break; 26692 default: 26693 /* should not reach here, but check anyway */ 26694 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26695 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26696 rval = EINVAL; 26697 break; 26698 } 26699 26700 if (select) { 26701 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26702 } 26703 if (sense) { 26704 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26705 } 26706 return (rval); 26707 } 26708 26709 26710 /* 26711 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26712 * implement driver support for getting and setting the CD speed. The command 26713 * set used will be based on the device type. If the device has not been 26714 * identified as MMC the Toshiba vendor specific mode page will be used. If 26715 * the device is MMC but does not support the Real Time Streaming feature 26716 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26717 * be used to read the speed. 26718 */ 26719 26720 /* 26721 * Function: sr_change_speed() 26722 * 26723 * Description: This routine is the driver entry point for handling CD-ROM 26724 * drive speed ioctl requests for devices supporting the Toshiba 26725 * vendor specific drive speed mode page. Support for returning 26726 * and changing the current drive speed in use by the device is 26727 * implemented. 26728 * 26729 * Arguments: dev - the device 'dev_t' 26730 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26731 * CDROMSDRVSPEED (set) 26732 * data - current drive speed or requested drive speed 26733 * flag - this argument is a pass through to ddi_copyxxx() directly 26734 * from the mode argument of ioctl(). 26735 * 26736 * Return Code: the code returned by sd_send_scsi_cmd() 26737 * EINVAL if invalid arguments are provided 26738 * EFAULT if ddi_copyxxx() fails 26739 * ENXIO if fail ddi_get_soft_state 26740 * EIO if invalid mode sense block descriptor length 26741 */ 26742 26743 static int 26744 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26745 { 26746 struct sd_lun *un = NULL; 26747 struct mode_header *sense_mhp, *select_mhp; 26748 struct mode_speed *sense_page, *select_page; 26749 int current_speed; 26750 int rval = EINVAL; 26751 int bd_len; 26752 uchar_t *sense = NULL; 26753 uchar_t *select = NULL; 26754 26755 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26756 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26757 return (ENXIO); 26758 } 26759 26760 /* 26761 * Note: The drive speed is being modified here according to a Toshiba 26762 * vendor specific mode page (0x31). 26763 */ 26764 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26765 26766 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26767 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26768 SD_PATH_STANDARD)) != 0) { 26769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26770 "sr_change_speed: Mode Sense Failed\n"); 26771 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26772 return (rval); 26773 } 26774 sense_mhp = (struct mode_header *)sense; 26775 26776 /* Check the block descriptor len to handle only 1 block descriptor */ 26777 bd_len = sense_mhp->bdesc_length; 26778 if (bd_len > MODE_BLK_DESC_LENGTH) { 26779 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26780 "sr_change_speed: Mode Sense returned invalid block " 26781 "descriptor length\n"); 26782 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26783 return (EIO); 26784 } 26785 26786 sense_page = (struct mode_speed *) 26787 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26788 current_speed = sense_page->speed; 26789 26790 /* Process command */ 26791 switch (cmd) { 26792 case CDROMGDRVSPEED: 26793 /* Return the drive speed obtained during the mode sense */ 26794 if (current_speed == 0x2) { 26795 current_speed = CDROM_TWELVE_SPEED; 26796 } 26797 if (ddi_copyout(¤t_speed, (void *)data, 26798 sizeof (int), flag) != 0) { 26799 rval = EFAULT; 26800 } 26801 break; 26802 case CDROMSDRVSPEED: 26803 /* Validate the requested drive speed */ 26804 switch ((uchar_t)data) { 26805 case CDROM_TWELVE_SPEED: 26806 data = 0x2; 26807 /*FALLTHROUGH*/ 26808 case CDROM_NORMAL_SPEED: 26809 case CDROM_DOUBLE_SPEED: 26810 case CDROM_QUAD_SPEED: 26811 case CDROM_MAXIMUM_SPEED: 26812 break; 26813 default: 26814 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26815 "sr_change_speed: " 26816 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26817 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26818 return (EINVAL); 26819 } 26820 26821 /* 26822 * The current drive speed matches the requested drive speed so 26823 * there is no need to send the mode select to change the speed 26824 */ 26825 if (current_speed == data) { 26826 break; 26827 } 26828 26829 /* Build the select data for the requested drive speed */ 26830 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26831 select_mhp = (struct mode_header *)select; 26832 select_mhp->bdesc_length = 0; 26833 select_page = 26834 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26835 select_page = 26836 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26837 select_page->mode_page.code = CDROM_MODE_SPEED; 26838 select_page->mode_page.length = 2; 26839 select_page->speed = (uchar_t)data; 26840 26841 /* Send the mode select for the requested block size */ 26842 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26843 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26844 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 26845 /* 26846 * The mode select failed for the requested drive speed, 26847 * so reset the data for the original drive speed and 26848 * send it to the target. The error is indicated by the 26849 * return value for the failed mode select. 26850 */ 26851 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26852 "sr_drive_speed: Mode Select Failed\n"); 26853 select_page->speed = sense_page->speed; 26854 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26855 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26856 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26857 } 26858 break; 26859 default: 26860 /* should not reach here, but check anyway */ 26861 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26862 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26863 rval = EINVAL; 26864 break; 26865 } 26866 26867 if (select) { 26868 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26869 } 26870 if (sense) { 26871 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26872 } 26873 26874 return (rval); 26875 } 26876 26877 26878 /* 26879 * Function: sr_atapi_change_speed() 26880 * 26881 * Description: This routine is the driver entry point for handling CD-ROM 26882 * drive speed ioctl requests for MMC devices that do not support 26883 * the Real Time Streaming feature (0x107). 26884 * 26885 * Note: This routine will use the SET SPEED command which may not 26886 * be supported by all devices. 26887 * 26888 * Arguments: dev- the device 'dev_t' 26889 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26890 * CDROMSDRVSPEED (set) 26891 * data- current drive speed or requested drive speed 26892 * flag- this argument is a pass through to ddi_copyxxx() directly 26893 * from the mode argument of ioctl(). 26894 * 26895 * Return Code: the code returned by sd_send_scsi_cmd() 26896 * EINVAL if invalid arguments are provided 26897 * EFAULT if ddi_copyxxx() fails 26898 * ENXIO if fail ddi_get_soft_state 26899 * EIO if invalid mode sense block descriptor length 26900 */ 26901 26902 static int 26903 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26904 { 26905 struct sd_lun *un; 26906 struct uscsi_cmd *com = NULL; 26907 struct mode_header_grp2 *sense_mhp; 26908 uchar_t *sense_page; 26909 uchar_t *sense = NULL; 26910 char cdb[CDB_GROUP5]; 26911 int bd_len; 26912 int current_speed = 0; 26913 int max_speed = 0; 26914 int rval; 26915 26916 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26917 26918 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26919 return (ENXIO); 26920 } 26921 26922 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26923 26924 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26925 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26926 SD_PATH_STANDARD)) != 0) { 26927 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26928 "sr_atapi_change_speed: Mode Sense Failed\n"); 26929 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26930 return (rval); 26931 } 26932 26933 /* Check the block descriptor len to handle only 1 block descriptor */ 26934 sense_mhp = (struct mode_header_grp2 *)sense; 26935 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26936 if (bd_len > MODE_BLK_DESC_LENGTH) { 26937 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26938 "sr_atapi_change_speed: Mode Sense returned invalid " 26939 "block descriptor length\n"); 26940 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26941 return (EIO); 26942 } 26943 26944 /* Calculate the current and maximum drive speeds */ 26945 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26946 current_speed = (sense_page[14] << 8) | sense_page[15]; 26947 max_speed = (sense_page[8] << 8) | sense_page[9]; 26948 26949 /* Process the command */ 26950 switch (cmd) { 26951 case CDROMGDRVSPEED: 26952 current_speed /= SD_SPEED_1X; 26953 if (ddi_copyout(¤t_speed, (void *)data, 26954 sizeof (int), flag) != 0) 26955 rval = EFAULT; 26956 break; 26957 case CDROMSDRVSPEED: 26958 /* Convert the speed code to KB/sec */ 26959 switch ((uchar_t)data) { 26960 case CDROM_NORMAL_SPEED: 26961 current_speed = SD_SPEED_1X; 26962 break; 26963 case CDROM_DOUBLE_SPEED: 26964 current_speed = 2 * SD_SPEED_1X; 26965 break; 26966 case CDROM_QUAD_SPEED: 26967 current_speed = 4 * SD_SPEED_1X; 26968 break; 26969 case CDROM_TWELVE_SPEED: 26970 current_speed = 12 * SD_SPEED_1X; 26971 break; 26972 case CDROM_MAXIMUM_SPEED: 26973 current_speed = 0xffff; 26974 break; 26975 default: 26976 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26977 "sr_atapi_change_speed: invalid drive speed %d\n", 26978 (uchar_t)data); 26979 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26980 return (EINVAL); 26981 } 26982 26983 /* Check the request against the drive's max speed. */ 26984 if (current_speed != 0xffff) { 26985 if (current_speed > max_speed) { 26986 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26987 return (EINVAL); 26988 } 26989 } 26990 26991 /* 26992 * Build and send the SET SPEED command 26993 * 26994 * Note: The SET SPEED (0xBB) command used in this routine is 26995 * obsolete per the SCSI MMC spec but still supported in the 26996 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26997 * therefore the command is still implemented in this routine. 26998 */ 26999 bzero(cdb, sizeof (cdb)); 27000 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 27001 cdb[2] = (uchar_t)(current_speed >> 8); 27002 cdb[3] = (uchar_t)current_speed; 27003 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27004 com->uscsi_cdb = (caddr_t)cdb; 27005 com->uscsi_cdblen = CDB_GROUP5; 27006 com->uscsi_bufaddr = NULL; 27007 com->uscsi_buflen = 0; 27008 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27009 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, 0, 27010 UIO_SYSSPACE, SD_PATH_STANDARD); 27011 break; 27012 default: 27013 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27014 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 27015 rval = EINVAL; 27016 } 27017 27018 if (sense) { 27019 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 27020 } 27021 if (com) { 27022 kmem_free(com, sizeof (*com)); 27023 } 27024 return (rval); 27025 } 27026 27027 27028 /* 27029 * Function: sr_pause_resume() 27030 * 27031 * Description: This routine is the driver entry point for handling CD-ROM 27032 * pause/resume ioctl requests. This only affects the audio play 27033 * operation. 27034 * 27035 * Arguments: dev - the device 'dev_t' 27036 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 27037 * for setting the resume bit of the cdb. 27038 * 27039 * Return Code: the code returned by sd_send_scsi_cmd() 27040 * EINVAL if invalid mode specified 27041 * 27042 */ 27043 27044 static int 27045 sr_pause_resume(dev_t dev, int cmd) 27046 { 27047 struct sd_lun *un; 27048 struct uscsi_cmd *com; 27049 char cdb[CDB_GROUP1]; 27050 int rval; 27051 27052 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27053 return (ENXIO); 27054 } 27055 27056 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27057 bzero(cdb, CDB_GROUP1); 27058 cdb[0] = SCMD_PAUSE_RESUME; 27059 switch (cmd) { 27060 case CDROMRESUME: 27061 cdb[8] = 1; 27062 break; 27063 case CDROMPAUSE: 27064 cdb[8] = 0; 27065 break; 27066 default: 27067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 27068 " Command '%x' Not Supported\n", cmd); 27069 rval = EINVAL; 27070 goto done; 27071 } 27072 27073 com->uscsi_cdb = cdb; 27074 com->uscsi_cdblen = CDB_GROUP1; 27075 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27076 27077 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27078 UIO_SYSSPACE, SD_PATH_STANDARD); 27079 27080 done: 27081 kmem_free(com, sizeof (*com)); 27082 return (rval); 27083 } 27084 27085 27086 /* 27087 * Function: sr_play_msf() 27088 * 27089 * Description: This routine is the driver entry point for handling CD-ROM 27090 * ioctl requests to output the audio signals at the specified 27091 * starting address and continue the audio play until the specified 27092 * ending address (CDROMPLAYMSF) The address is in Minute Second 27093 * Frame (MSF) format. 27094 * 27095 * Arguments: dev - the device 'dev_t' 27096 * data - pointer to user provided audio msf structure, 27097 * specifying start/end addresses. 27098 * flag - this argument is a pass through to ddi_copyxxx() 27099 * directly from the mode argument of ioctl(). 27100 * 27101 * Return Code: the code returned by sd_send_scsi_cmd() 27102 * EFAULT if ddi_copyxxx() fails 27103 * ENXIO if fail ddi_get_soft_state 27104 * EINVAL if data pointer is NULL 27105 */ 27106 27107 static int 27108 sr_play_msf(dev_t dev, caddr_t data, int flag) 27109 { 27110 struct sd_lun *un; 27111 struct uscsi_cmd *com; 27112 struct cdrom_msf msf_struct; 27113 struct cdrom_msf *msf = &msf_struct; 27114 char cdb[CDB_GROUP1]; 27115 int rval; 27116 27117 if (data == NULL) { 27118 return (EINVAL); 27119 } 27120 27121 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27122 return (ENXIO); 27123 } 27124 27125 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27126 return (EFAULT); 27127 } 27128 27129 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27130 bzero(cdb, CDB_GROUP1); 27131 cdb[0] = SCMD_PLAYAUDIO_MSF; 27132 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27133 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27134 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27135 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27136 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27137 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27138 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27139 } else { 27140 cdb[3] = msf->cdmsf_min0; 27141 cdb[4] = msf->cdmsf_sec0; 27142 cdb[5] = msf->cdmsf_frame0; 27143 cdb[6] = msf->cdmsf_min1; 27144 cdb[7] = msf->cdmsf_sec1; 27145 cdb[8] = msf->cdmsf_frame1; 27146 } 27147 com->uscsi_cdb = cdb; 27148 com->uscsi_cdblen = CDB_GROUP1; 27149 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27150 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27151 UIO_SYSSPACE, SD_PATH_STANDARD); 27152 kmem_free(com, sizeof (*com)); 27153 return (rval); 27154 } 27155 27156 27157 /* 27158 * Function: sr_play_trkind() 27159 * 27160 * Description: This routine is the driver entry point for handling CD-ROM 27161 * ioctl requests to output the audio signals at the specified 27162 * starting address and continue the audio play until the specified 27163 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27164 * format. 27165 * 27166 * Arguments: dev - the device 'dev_t' 27167 * data - pointer to user provided audio track/index structure, 27168 * specifying start/end addresses. 27169 * flag - this argument is a pass through to ddi_copyxxx() 27170 * directly from the mode argument of ioctl(). 27171 * 27172 * Return Code: the code returned by sd_send_scsi_cmd() 27173 * EFAULT if ddi_copyxxx() fails 27174 * ENXIO if fail ddi_get_soft_state 27175 * EINVAL if data pointer is NULL 27176 */ 27177 27178 static int 27179 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27180 { 27181 struct cdrom_ti ti_struct; 27182 struct cdrom_ti *ti = &ti_struct; 27183 struct uscsi_cmd *com = NULL; 27184 char cdb[CDB_GROUP1]; 27185 int rval; 27186 27187 if (data == NULL) { 27188 return (EINVAL); 27189 } 27190 27191 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27192 return (EFAULT); 27193 } 27194 27195 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27196 bzero(cdb, CDB_GROUP1); 27197 cdb[0] = SCMD_PLAYAUDIO_TI; 27198 cdb[4] = ti->cdti_trk0; 27199 cdb[5] = ti->cdti_ind0; 27200 cdb[7] = ti->cdti_trk1; 27201 cdb[8] = ti->cdti_ind1; 27202 com->uscsi_cdb = cdb; 27203 com->uscsi_cdblen = CDB_GROUP1; 27204 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27205 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27206 UIO_SYSSPACE, SD_PATH_STANDARD); 27207 kmem_free(com, sizeof (*com)); 27208 return (rval); 27209 } 27210 27211 27212 /* 27213 * Function: sr_read_all_subcodes() 27214 * 27215 * Description: This routine is the driver entry point for handling CD-ROM 27216 * ioctl requests to return raw subcode data while the target is 27217 * playing audio (CDROMSUBCODE). 27218 * 27219 * Arguments: dev - the device 'dev_t' 27220 * data - pointer to user provided cdrom subcode structure, 27221 * specifying the transfer length and address. 27222 * flag - this argument is a pass through to ddi_copyxxx() 27223 * directly from the mode argument of ioctl(). 27224 * 27225 * Return Code: the code returned by sd_send_scsi_cmd() 27226 * EFAULT if ddi_copyxxx() fails 27227 * ENXIO if fail ddi_get_soft_state 27228 * EINVAL if data pointer is NULL 27229 */ 27230 27231 static int 27232 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27233 { 27234 struct sd_lun *un = NULL; 27235 struct uscsi_cmd *com = NULL; 27236 struct cdrom_subcode *subcode = NULL; 27237 int rval; 27238 size_t buflen; 27239 char cdb[CDB_GROUP5]; 27240 27241 #ifdef _MULTI_DATAMODEL 27242 /* To support ILP32 applications in an LP64 world */ 27243 struct cdrom_subcode32 cdrom_subcode32; 27244 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27245 #endif 27246 if (data == NULL) { 27247 return (EINVAL); 27248 } 27249 27250 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27251 return (ENXIO); 27252 } 27253 27254 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27255 27256 #ifdef _MULTI_DATAMODEL 27257 switch (ddi_model_convert_from(flag & FMODELS)) { 27258 case DDI_MODEL_ILP32: 27259 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27260 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27261 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27262 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27263 return (EFAULT); 27264 } 27265 /* Convert the ILP32 uscsi data from the application to LP64 */ 27266 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27267 break; 27268 case DDI_MODEL_NONE: 27269 if (ddi_copyin(data, subcode, 27270 sizeof (struct cdrom_subcode), flag)) { 27271 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27272 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27273 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27274 return (EFAULT); 27275 } 27276 break; 27277 } 27278 #else /* ! _MULTI_DATAMODEL */ 27279 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27281 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27282 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27283 return (EFAULT); 27284 } 27285 #endif /* _MULTI_DATAMODEL */ 27286 27287 /* 27288 * Since MMC-2 expects max 3 bytes for length, check if the 27289 * length input is greater than 3 bytes 27290 */ 27291 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27292 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27293 "sr_read_all_subcodes: " 27294 "cdrom transfer length too large: %d (limit %d)\n", 27295 subcode->cdsc_length, 0xFFFFFF); 27296 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27297 return (EINVAL); 27298 } 27299 27300 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27301 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27302 bzero(cdb, CDB_GROUP5); 27303 27304 if (un->un_f_mmc_cap == TRUE) { 27305 cdb[0] = (char)SCMD_READ_CD; 27306 cdb[2] = (char)0xff; 27307 cdb[3] = (char)0xff; 27308 cdb[4] = (char)0xff; 27309 cdb[5] = (char)0xff; 27310 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27311 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27312 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27313 cdb[10] = 1; 27314 } else { 27315 /* 27316 * Note: A vendor specific command (0xDF) is being used her to 27317 * request a read of all subcodes. 27318 */ 27319 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27320 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27321 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27322 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27323 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27324 } 27325 com->uscsi_cdb = cdb; 27326 com->uscsi_cdblen = CDB_GROUP5; 27327 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27328 com->uscsi_buflen = buflen; 27329 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27330 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27331 UIO_SYSSPACE, SD_PATH_STANDARD); 27332 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27333 kmem_free(com, sizeof (*com)); 27334 return (rval); 27335 } 27336 27337 27338 /* 27339 * Function: sr_read_subchannel() 27340 * 27341 * Description: This routine is the driver entry point for handling CD-ROM 27342 * ioctl requests to return the Q sub-channel data of the CD 27343 * current position block. (CDROMSUBCHNL) The data includes the 27344 * track number, index number, absolute CD-ROM address (LBA or MSF 27345 * format per the user) , track relative CD-ROM address (LBA or MSF 27346 * format per the user), control data and audio status. 27347 * 27348 * Arguments: dev - the device 'dev_t' 27349 * data - pointer to user provided cdrom sub-channel structure 27350 * flag - this argument is a pass through to ddi_copyxxx() 27351 * directly from the mode argument of ioctl(). 27352 * 27353 * Return Code: the code returned by sd_send_scsi_cmd() 27354 * EFAULT if ddi_copyxxx() fails 27355 * ENXIO if fail ddi_get_soft_state 27356 * EINVAL if data pointer is NULL 27357 */ 27358 27359 static int 27360 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27361 { 27362 struct sd_lun *un; 27363 struct uscsi_cmd *com; 27364 struct cdrom_subchnl subchanel; 27365 struct cdrom_subchnl *subchnl = &subchanel; 27366 char cdb[CDB_GROUP1]; 27367 caddr_t buffer; 27368 int rval; 27369 27370 if (data == NULL) { 27371 return (EINVAL); 27372 } 27373 27374 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27375 (un->un_state == SD_STATE_OFFLINE)) { 27376 return (ENXIO); 27377 } 27378 27379 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27380 return (EFAULT); 27381 } 27382 27383 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27384 bzero(cdb, CDB_GROUP1); 27385 cdb[0] = SCMD_READ_SUBCHANNEL; 27386 /* Set the MSF bit based on the user requested address format */ 27387 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27388 /* 27389 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27390 * returned 27391 */ 27392 cdb[2] = 0x40; 27393 /* 27394 * Set byte 3 to specify the return data format. A value of 0x01 27395 * indicates that the CD-ROM current position should be returned. 27396 */ 27397 cdb[3] = 0x01; 27398 cdb[8] = 0x10; 27399 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27400 com->uscsi_cdb = cdb; 27401 com->uscsi_cdblen = CDB_GROUP1; 27402 com->uscsi_bufaddr = buffer; 27403 com->uscsi_buflen = 16; 27404 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27405 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27406 UIO_SYSSPACE, SD_PATH_STANDARD); 27407 if (rval != 0) { 27408 kmem_free(buffer, 16); 27409 kmem_free(com, sizeof (*com)); 27410 return (rval); 27411 } 27412 27413 /* Process the returned Q sub-channel data */ 27414 subchnl->cdsc_audiostatus = buffer[1]; 27415 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27416 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27417 subchnl->cdsc_trk = buffer[6]; 27418 subchnl->cdsc_ind = buffer[7]; 27419 if (subchnl->cdsc_format & CDROM_LBA) { 27420 subchnl->cdsc_absaddr.lba = 27421 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27422 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27423 subchnl->cdsc_reladdr.lba = 27424 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27425 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27426 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27427 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27428 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27429 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27430 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27431 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27432 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27433 } else { 27434 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27435 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27436 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27437 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27438 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27439 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27440 } 27441 kmem_free(buffer, 16); 27442 kmem_free(com, sizeof (*com)); 27443 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27444 != 0) { 27445 return (EFAULT); 27446 } 27447 return (rval); 27448 } 27449 27450 27451 /* 27452 * Function: sr_read_tocentry() 27453 * 27454 * Description: This routine is the driver entry point for handling CD-ROM 27455 * ioctl requests to read from the Table of Contents (TOC) 27456 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27457 * fields, the starting address (LBA or MSF format per the user) 27458 * and the data mode if the user specified track is a data track. 27459 * 27460 * Note: The READ HEADER (0x44) command used in this routine is 27461 * obsolete per the SCSI MMC spec but still supported in the 27462 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27463 * therefore the command is still implemented in this routine. 27464 * 27465 * Arguments: dev - the device 'dev_t' 27466 * data - pointer to user provided toc entry structure, 27467 * specifying the track # and the address format 27468 * (LBA or MSF). 27469 * flag - this argument is a pass through to ddi_copyxxx() 27470 * directly from the mode argument of ioctl(). 27471 * 27472 * Return Code: the code returned by sd_send_scsi_cmd() 27473 * EFAULT if ddi_copyxxx() fails 27474 * ENXIO if fail ddi_get_soft_state 27475 * EINVAL if data pointer is NULL 27476 */ 27477 27478 static int 27479 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27480 { 27481 struct sd_lun *un = NULL; 27482 struct uscsi_cmd *com; 27483 struct cdrom_tocentry toc_entry; 27484 struct cdrom_tocentry *entry = &toc_entry; 27485 caddr_t buffer; 27486 int rval; 27487 char cdb[CDB_GROUP1]; 27488 27489 if (data == NULL) { 27490 return (EINVAL); 27491 } 27492 27493 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27494 (un->un_state == SD_STATE_OFFLINE)) { 27495 return (ENXIO); 27496 } 27497 27498 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27499 return (EFAULT); 27500 } 27501 27502 /* Validate the requested track and address format */ 27503 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27504 return (EINVAL); 27505 } 27506 27507 if (entry->cdte_track == 0) { 27508 return (EINVAL); 27509 } 27510 27511 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27512 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27513 bzero(cdb, CDB_GROUP1); 27514 27515 cdb[0] = SCMD_READ_TOC; 27516 /* Set the MSF bit based on the user requested address format */ 27517 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27518 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27519 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27520 } else { 27521 cdb[6] = entry->cdte_track; 27522 } 27523 27524 /* 27525 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27526 * (4 byte TOC response header + 8 byte track descriptor) 27527 */ 27528 cdb[8] = 12; 27529 com->uscsi_cdb = cdb; 27530 com->uscsi_cdblen = CDB_GROUP1; 27531 com->uscsi_bufaddr = buffer; 27532 com->uscsi_buflen = 0x0C; 27533 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27534 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27535 UIO_SYSSPACE, SD_PATH_STANDARD); 27536 if (rval != 0) { 27537 kmem_free(buffer, 12); 27538 kmem_free(com, sizeof (*com)); 27539 return (rval); 27540 } 27541 27542 /* Process the toc entry */ 27543 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27544 entry->cdte_ctrl = (buffer[5] & 0x0F); 27545 if (entry->cdte_format & CDROM_LBA) { 27546 entry->cdte_addr.lba = 27547 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27548 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27549 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27550 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27551 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27552 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27553 /* 27554 * Send a READ TOC command using the LBA address format to get 27555 * the LBA for the track requested so it can be used in the 27556 * READ HEADER request 27557 * 27558 * Note: The MSF bit of the READ HEADER command specifies the 27559 * output format. The block address specified in that command 27560 * must be in LBA format. 27561 */ 27562 cdb[1] = 0; 27563 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27564 UIO_SYSSPACE, SD_PATH_STANDARD); 27565 if (rval != 0) { 27566 kmem_free(buffer, 12); 27567 kmem_free(com, sizeof (*com)); 27568 return (rval); 27569 } 27570 } else { 27571 entry->cdte_addr.msf.minute = buffer[9]; 27572 entry->cdte_addr.msf.second = buffer[10]; 27573 entry->cdte_addr.msf.frame = buffer[11]; 27574 /* 27575 * Send a READ TOC command using the LBA address format to get 27576 * the LBA for the track requested so it can be used in the 27577 * READ HEADER request 27578 * 27579 * Note: The MSF bit of the READ HEADER command specifies the 27580 * output format. The block address specified in that command 27581 * must be in LBA format. 27582 */ 27583 cdb[1] = 0; 27584 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27585 UIO_SYSSPACE, SD_PATH_STANDARD); 27586 if (rval != 0) { 27587 kmem_free(buffer, 12); 27588 kmem_free(com, sizeof (*com)); 27589 return (rval); 27590 } 27591 } 27592 27593 /* 27594 * Build and send the READ HEADER command to determine the data mode of 27595 * the user specified track. 27596 */ 27597 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27598 (entry->cdte_track != CDROM_LEADOUT)) { 27599 bzero(cdb, CDB_GROUP1); 27600 cdb[0] = SCMD_READ_HEADER; 27601 cdb[2] = buffer[8]; 27602 cdb[3] = buffer[9]; 27603 cdb[4] = buffer[10]; 27604 cdb[5] = buffer[11]; 27605 cdb[8] = 0x08; 27606 com->uscsi_buflen = 0x08; 27607 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27608 UIO_SYSSPACE, SD_PATH_STANDARD); 27609 if (rval == 0) { 27610 entry->cdte_datamode = buffer[0]; 27611 } else { 27612 /* 27613 * READ HEADER command failed, since this is 27614 * obsoleted in one spec, its better to return 27615 * -1 for an invlid track so that we can still 27616 * recieve the rest of the TOC data. 27617 */ 27618 entry->cdte_datamode = (uchar_t)-1; 27619 } 27620 } else { 27621 entry->cdte_datamode = (uchar_t)-1; 27622 } 27623 27624 kmem_free(buffer, 12); 27625 kmem_free(com, sizeof (*com)); 27626 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27627 return (EFAULT); 27628 27629 return (rval); 27630 } 27631 27632 27633 /* 27634 * Function: sr_read_tochdr() 27635 * 27636 * Description: This routine is the driver entry point for handling CD-ROM 27637 * ioctl requests to read the Table of Contents (TOC) header 27638 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27639 * and ending track numbers 27640 * 27641 * Arguments: dev - the device 'dev_t' 27642 * data - pointer to user provided toc header structure, 27643 * specifying the starting and ending track numbers. 27644 * flag - this argument is a pass through to ddi_copyxxx() 27645 * directly from the mode argument of ioctl(). 27646 * 27647 * Return Code: the code returned by sd_send_scsi_cmd() 27648 * EFAULT if ddi_copyxxx() fails 27649 * ENXIO if fail ddi_get_soft_state 27650 * EINVAL if data pointer is NULL 27651 */ 27652 27653 static int 27654 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27655 { 27656 struct sd_lun *un; 27657 struct uscsi_cmd *com; 27658 struct cdrom_tochdr toc_header; 27659 struct cdrom_tochdr *hdr = &toc_header; 27660 char cdb[CDB_GROUP1]; 27661 int rval; 27662 caddr_t buffer; 27663 27664 if (data == NULL) { 27665 return (EINVAL); 27666 } 27667 27668 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27669 (un->un_state == SD_STATE_OFFLINE)) { 27670 return (ENXIO); 27671 } 27672 27673 buffer = kmem_zalloc(4, KM_SLEEP); 27674 bzero(cdb, CDB_GROUP1); 27675 cdb[0] = SCMD_READ_TOC; 27676 /* 27677 * Specifying a track number of 0x00 in the READ TOC command indicates 27678 * that the TOC header should be returned 27679 */ 27680 cdb[6] = 0x00; 27681 /* 27682 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27683 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27684 */ 27685 cdb[8] = 0x04; 27686 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27687 com->uscsi_cdb = cdb; 27688 com->uscsi_cdblen = CDB_GROUP1; 27689 com->uscsi_bufaddr = buffer; 27690 com->uscsi_buflen = 0x04; 27691 com->uscsi_timeout = 300; 27692 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27693 27694 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27695 UIO_SYSSPACE, SD_PATH_STANDARD); 27696 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27697 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27698 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27699 } else { 27700 hdr->cdth_trk0 = buffer[2]; 27701 hdr->cdth_trk1 = buffer[3]; 27702 } 27703 kmem_free(buffer, 4); 27704 kmem_free(com, sizeof (*com)); 27705 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27706 return (EFAULT); 27707 } 27708 return (rval); 27709 } 27710 27711 27712 /* 27713 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27714 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27715 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27716 * digital audio and extended architecture digital audio. These modes are 27717 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27718 * MMC specs. 27719 * 27720 * In addition to support for the various data formats these routines also 27721 * include support for devices that implement only the direct access READ 27722 * commands (0x08, 0x28), devices that implement the READ_CD commands 27723 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27724 * READ CDXA commands (0xD8, 0xDB) 27725 */ 27726 27727 /* 27728 * Function: sr_read_mode1() 27729 * 27730 * Description: This routine is the driver entry point for handling CD-ROM 27731 * ioctl read mode1 requests (CDROMREADMODE1). 27732 * 27733 * Arguments: dev - the device 'dev_t' 27734 * data - pointer to user provided cd read structure specifying 27735 * the lba buffer address and length. 27736 * flag - this argument is a pass through to ddi_copyxxx() 27737 * directly from the mode argument of ioctl(). 27738 * 27739 * Return Code: the code returned by sd_send_scsi_cmd() 27740 * EFAULT if ddi_copyxxx() fails 27741 * ENXIO if fail ddi_get_soft_state 27742 * EINVAL if data pointer is NULL 27743 */ 27744 27745 static int 27746 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27747 { 27748 struct sd_lun *un; 27749 struct cdrom_read mode1_struct; 27750 struct cdrom_read *mode1 = &mode1_struct; 27751 int rval; 27752 #ifdef _MULTI_DATAMODEL 27753 /* To support ILP32 applications in an LP64 world */ 27754 struct cdrom_read32 cdrom_read32; 27755 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27756 #endif /* _MULTI_DATAMODEL */ 27757 27758 if (data == NULL) { 27759 return (EINVAL); 27760 } 27761 27762 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27763 (un->un_state == SD_STATE_OFFLINE)) { 27764 return (ENXIO); 27765 } 27766 27767 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27768 "sd_read_mode1: entry: un:0x%p\n", un); 27769 27770 #ifdef _MULTI_DATAMODEL 27771 switch (ddi_model_convert_from(flag & FMODELS)) { 27772 case DDI_MODEL_ILP32: 27773 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27774 return (EFAULT); 27775 } 27776 /* Convert the ILP32 uscsi data from the application to LP64 */ 27777 cdrom_read32tocdrom_read(cdrd32, mode1); 27778 break; 27779 case DDI_MODEL_NONE: 27780 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27781 return (EFAULT); 27782 } 27783 } 27784 #else /* ! _MULTI_DATAMODEL */ 27785 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27786 return (EFAULT); 27787 } 27788 #endif /* _MULTI_DATAMODEL */ 27789 27790 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 27791 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27792 27793 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27794 "sd_read_mode1: exit: un:0x%p\n", un); 27795 27796 return (rval); 27797 } 27798 27799 27800 /* 27801 * Function: sr_read_cd_mode2() 27802 * 27803 * Description: This routine is the driver entry point for handling CD-ROM 27804 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27805 * support the READ CD (0xBE) command or the 1st generation 27806 * READ CD (0xD4) command. 27807 * 27808 * Arguments: dev - the device 'dev_t' 27809 * data - pointer to user provided cd read structure specifying 27810 * the lba buffer address and length. 27811 * flag - this argument is a pass through to ddi_copyxxx() 27812 * directly from the mode argument of ioctl(). 27813 * 27814 * Return Code: the code returned by sd_send_scsi_cmd() 27815 * EFAULT if ddi_copyxxx() fails 27816 * ENXIO if fail ddi_get_soft_state 27817 * EINVAL if data pointer is NULL 27818 */ 27819 27820 static int 27821 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27822 { 27823 struct sd_lun *un; 27824 struct uscsi_cmd *com; 27825 struct cdrom_read mode2_struct; 27826 struct cdrom_read *mode2 = &mode2_struct; 27827 uchar_t cdb[CDB_GROUP5]; 27828 int nblocks; 27829 int rval; 27830 #ifdef _MULTI_DATAMODEL 27831 /* To support ILP32 applications in an LP64 world */ 27832 struct cdrom_read32 cdrom_read32; 27833 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27834 #endif /* _MULTI_DATAMODEL */ 27835 27836 if (data == NULL) { 27837 return (EINVAL); 27838 } 27839 27840 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27841 (un->un_state == SD_STATE_OFFLINE)) { 27842 return (ENXIO); 27843 } 27844 27845 #ifdef _MULTI_DATAMODEL 27846 switch (ddi_model_convert_from(flag & FMODELS)) { 27847 case DDI_MODEL_ILP32: 27848 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27849 return (EFAULT); 27850 } 27851 /* Convert the ILP32 uscsi data from the application to LP64 */ 27852 cdrom_read32tocdrom_read(cdrd32, mode2); 27853 break; 27854 case DDI_MODEL_NONE: 27855 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27856 return (EFAULT); 27857 } 27858 break; 27859 } 27860 27861 #else /* ! _MULTI_DATAMODEL */ 27862 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27863 return (EFAULT); 27864 } 27865 #endif /* _MULTI_DATAMODEL */ 27866 27867 bzero(cdb, sizeof (cdb)); 27868 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27869 /* Read command supported by 1st generation atapi drives */ 27870 cdb[0] = SCMD_READ_CDD4; 27871 } else { 27872 /* Universal CD Access Command */ 27873 cdb[0] = SCMD_READ_CD; 27874 } 27875 27876 /* 27877 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27878 */ 27879 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27880 27881 /* set the start address */ 27882 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27883 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27884 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27885 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27886 27887 /* set the transfer length */ 27888 nblocks = mode2->cdread_buflen / 2336; 27889 cdb[6] = (uchar_t)(nblocks >> 16); 27890 cdb[7] = (uchar_t)(nblocks >> 8); 27891 cdb[8] = (uchar_t)nblocks; 27892 27893 /* set the filter bits */ 27894 cdb[9] = CDROM_READ_CD_USERDATA; 27895 27896 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27897 com->uscsi_cdb = (caddr_t)cdb; 27898 com->uscsi_cdblen = sizeof (cdb); 27899 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27900 com->uscsi_buflen = mode2->cdread_buflen; 27901 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27902 27903 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27904 UIO_SYSSPACE, SD_PATH_STANDARD); 27905 kmem_free(com, sizeof (*com)); 27906 return (rval); 27907 } 27908 27909 27910 /* 27911 * Function: sr_read_mode2() 27912 * 27913 * Description: This routine is the driver entry point for handling CD-ROM 27914 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27915 * do not support the READ CD (0xBE) command. 27916 * 27917 * Arguments: dev - the device 'dev_t' 27918 * data - pointer to user provided cd read structure specifying 27919 * the lba buffer address and length. 27920 * flag - this argument is a pass through to ddi_copyxxx() 27921 * directly from the mode argument of ioctl(). 27922 * 27923 * Return Code: the code returned by sd_send_scsi_cmd() 27924 * EFAULT if ddi_copyxxx() fails 27925 * ENXIO if fail ddi_get_soft_state 27926 * EINVAL if data pointer is NULL 27927 * EIO if fail to reset block size 27928 * EAGAIN if commands are in progress in the driver 27929 */ 27930 27931 static int 27932 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27933 { 27934 struct sd_lun *un; 27935 struct cdrom_read mode2_struct; 27936 struct cdrom_read *mode2 = &mode2_struct; 27937 int rval; 27938 uint32_t restore_blksize; 27939 struct uscsi_cmd *com; 27940 uchar_t cdb[CDB_GROUP0]; 27941 int nblocks; 27942 27943 #ifdef _MULTI_DATAMODEL 27944 /* To support ILP32 applications in an LP64 world */ 27945 struct cdrom_read32 cdrom_read32; 27946 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27947 #endif /* _MULTI_DATAMODEL */ 27948 27949 if (data == NULL) { 27950 return (EINVAL); 27951 } 27952 27953 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27954 (un->un_state == SD_STATE_OFFLINE)) { 27955 return (ENXIO); 27956 } 27957 27958 /* 27959 * Because this routine will update the device and driver block size 27960 * being used we want to make sure there are no commands in progress. 27961 * If commands are in progress the user will have to try again. 27962 * 27963 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27964 * in sdioctl to protect commands from sdioctl through to the top of 27965 * sd_uscsi_strategy. See sdioctl for details. 27966 */ 27967 mutex_enter(SD_MUTEX(un)); 27968 if (un->un_ncmds_in_driver != 1) { 27969 mutex_exit(SD_MUTEX(un)); 27970 return (EAGAIN); 27971 } 27972 mutex_exit(SD_MUTEX(un)); 27973 27974 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27975 "sd_read_mode2: entry: un:0x%p\n", un); 27976 27977 #ifdef _MULTI_DATAMODEL 27978 switch (ddi_model_convert_from(flag & FMODELS)) { 27979 case DDI_MODEL_ILP32: 27980 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27981 return (EFAULT); 27982 } 27983 /* Convert the ILP32 uscsi data from the application to LP64 */ 27984 cdrom_read32tocdrom_read(cdrd32, mode2); 27985 break; 27986 case DDI_MODEL_NONE: 27987 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27988 return (EFAULT); 27989 } 27990 break; 27991 } 27992 #else /* ! _MULTI_DATAMODEL */ 27993 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27994 return (EFAULT); 27995 } 27996 #endif /* _MULTI_DATAMODEL */ 27997 27998 /* Store the current target block size for restoration later */ 27999 restore_blksize = un->un_tgt_blocksize; 28000 28001 /* Change the device and soft state target block size to 2336 */ 28002 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 28003 rval = EIO; 28004 goto done; 28005 } 28006 28007 28008 bzero(cdb, sizeof (cdb)); 28009 28010 /* set READ operation */ 28011 cdb[0] = SCMD_READ; 28012 28013 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 28014 mode2->cdread_lba >>= 2; 28015 28016 /* set the start address */ 28017 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 28018 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 28019 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 28020 28021 /* set the transfer length */ 28022 nblocks = mode2->cdread_buflen / 2336; 28023 cdb[4] = (uchar_t)nblocks & 0xFF; 28024 28025 /* build command */ 28026 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28027 com->uscsi_cdb = (caddr_t)cdb; 28028 com->uscsi_cdblen = sizeof (cdb); 28029 com->uscsi_bufaddr = mode2->cdread_bufaddr; 28030 com->uscsi_buflen = mode2->cdread_buflen; 28031 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28032 28033 /* 28034 * Issue SCSI command with user space address for read buffer. 28035 * 28036 * This sends the command through main channel in the driver. 28037 * 28038 * Since this is accessed via an IOCTL call, we go through the 28039 * standard path, so that if the device was powered down, then 28040 * it would be 'awakened' to handle the command. 28041 */ 28042 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28043 UIO_SYSSPACE, SD_PATH_STANDARD); 28044 28045 kmem_free(com, sizeof (*com)); 28046 28047 /* Restore the device and soft state target block size */ 28048 if (sr_sector_mode(dev, restore_blksize) != 0) { 28049 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28050 "can't do switch back to mode 1\n"); 28051 /* 28052 * If sd_send_scsi_READ succeeded we still need to report 28053 * an error because we failed to reset the block size 28054 */ 28055 if (rval == 0) { 28056 rval = EIO; 28057 } 28058 } 28059 28060 done: 28061 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 28062 "sd_read_mode2: exit: un:0x%p\n", un); 28063 28064 return (rval); 28065 } 28066 28067 28068 /* 28069 * Function: sr_sector_mode() 28070 * 28071 * Description: This utility function is used by sr_read_mode2 to set the target 28072 * block size based on the user specified size. This is a legacy 28073 * implementation based upon a vendor specific mode page 28074 * 28075 * Arguments: dev - the device 'dev_t' 28076 * data - flag indicating if block size is being set to 2336 or 28077 * 512. 28078 * 28079 * Return Code: the code returned by sd_send_scsi_cmd() 28080 * EFAULT if ddi_copyxxx() fails 28081 * ENXIO if fail ddi_get_soft_state 28082 * EINVAL if data pointer is NULL 28083 */ 28084 28085 static int 28086 sr_sector_mode(dev_t dev, uint32_t blksize) 28087 { 28088 struct sd_lun *un; 28089 uchar_t *sense; 28090 uchar_t *select; 28091 int rval; 28092 28093 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28094 (un->un_state == SD_STATE_OFFLINE)) { 28095 return (ENXIO); 28096 } 28097 28098 sense = kmem_zalloc(20, KM_SLEEP); 28099 28100 /* Note: This is a vendor specific mode page (0x81) */ 28101 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 28102 SD_PATH_STANDARD)) != 0) { 28103 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28104 "sr_sector_mode: Mode Sense failed\n"); 28105 kmem_free(sense, 20); 28106 return (rval); 28107 } 28108 select = kmem_zalloc(20, KM_SLEEP); 28109 select[3] = 0x08; 28110 select[10] = ((blksize >> 8) & 0xff); 28111 select[11] = (blksize & 0xff); 28112 select[12] = 0x01; 28113 select[13] = 0x06; 28114 select[14] = sense[14]; 28115 select[15] = sense[15]; 28116 if (blksize == SD_MODE2_BLKSIZE) { 28117 select[14] |= 0x01; 28118 } 28119 28120 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 28121 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 28122 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28123 "sr_sector_mode: Mode Select failed\n"); 28124 } else { 28125 /* 28126 * Only update the softstate block size if we successfully 28127 * changed the device block mode. 28128 */ 28129 mutex_enter(SD_MUTEX(un)); 28130 sd_update_block_info(un, blksize, 0); 28131 mutex_exit(SD_MUTEX(un)); 28132 } 28133 kmem_free(sense, 20); 28134 kmem_free(select, 20); 28135 return (rval); 28136 } 28137 28138 28139 /* 28140 * Function: sr_read_cdda() 28141 * 28142 * Description: This routine is the driver entry point for handling CD-ROM 28143 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28144 * the target supports CDDA these requests are handled via a vendor 28145 * specific command (0xD8) If the target does not support CDDA 28146 * these requests are handled via the READ CD command (0xBE). 28147 * 28148 * Arguments: dev - the device 'dev_t' 28149 * data - pointer to user provided CD-DA structure specifying 28150 * the track starting address, transfer length, and 28151 * subcode options. 28152 * flag - this argument is a pass through to ddi_copyxxx() 28153 * directly from the mode argument of ioctl(). 28154 * 28155 * Return Code: the code returned by sd_send_scsi_cmd() 28156 * EFAULT if ddi_copyxxx() fails 28157 * ENXIO if fail ddi_get_soft_state 28158 * EINVAL if invalid arguments are provided 28159 * ENOTTY 28160 */ 28161 28162 static int 28163 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28164 { 28165 struct sd_lun *un; 28166 struct uscsi_cmd *com; 28167 struct cdrom_cdda *cdda; 28168 int rval; 28169 size_t buflen; 28170 char cdb[CDB_GROUP5]; 28171 28172 #ifdef _MULTI_DATAMODEL 28173 /* To support ILP32 applications in an LP64 world */ 28174 struct cdrom_cdda32 cdrom_cdda32; 28175 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28176 #endif /* _MULTI_DATAMODEL */ 28177 28178 if (data == NULL) { 28179 return (EINVAL); 28180 } 28181 28182 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28183 return (ENXIO); 28184 } 28185 28186 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28187 28188 #ifdef _MULTI_DATAMODEL 28189 switch (ddi_model_convert_from(flag & FMODELS)) { 28190 case DDI_MODEL_ILP32: 28191 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28192 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28193 "sr_read_cdda: ddi_copyin Failed\n"); 28194 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28195 return (EFAULT); 28196 } 28197 /* Convert the ILP32 uscsi data from the application to LP64 */ 28198 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28199 break; 28200 case DDI_MODEL_NONE: 28201 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28202 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28203 "sr_read_cdda: ddi_copyin Failed\n"); 28204 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28205 return (EFAULT); 28206 } 28207 break; 28208 } 28209 #else /* ! _MULTI_DATAMODEL */ 28210 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28211 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28212 "sr_read_cdda: ddi_copyin Failed\n"); 28213 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28214 return (EFAULT); 28215 } 28216 #endif /* _MULTI_DATAMODEL */ 28217 28218 /* 28219 * Since MMC-2 expects max 3 bytes for length, check if the 28220 * length input is greater than 3 bytes 28221 */ 28222 if ((cdda->cdda_length & 0xFF000000) != 0) { 28223 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28224 "cdrom transfer length too large: %d (limit %d)\n", 28225 cdda->cdda_length, 0xFFFFFF); 28226 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28227 return (EINVAL); 28228 } 28229 28230 switch (cdda->cdda_subcode) { 28231 case CDROM_DA_NO_SUBCODE: 28232 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28233 break; 28234 case CDROM_DA_SUBQ: 28235 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28236 break; 28237 case CDROM_DA_ALL_SUBCODE: 28238 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28239 break; 28240 case CDROM_DA_SUBCODE_ONLY: 28241 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28242 break; 28243 default: 28244 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28245 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28246 cdda->cdda_subcode); 28247 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28248 return (EINVAL); 28249 } 28250 28251 /* Build and send the command */ 28252 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28253 bzero(cdb, CDB_GROUP5); 28254 28255 if (un->un_f_cfg_cdda == TRUE) { 28256 cdb[0] = (char)SCMD_READ_CD; 28257 cdb[1] = 0x04; 28258 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28259 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28260 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28261 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28262 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28263 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28264 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28265 cdb[9] = 0x10; 28266 switch (cdda->cdda_subcode) { 28267 case CDROM_DA_NO_SUBCODE : 28268 cdb[10] = 0x0; 28269 break; 28270 case CDROM_DA_SUBQ : 28271 cdb[10] = 0x2; 28272 break; 28273 case CDROM_DA_ALL_SUBCODE : 28274 cdb[10] = 0x1; 28275 break; 28276 case CDROM_DA_SUBCODE_ONLY : 28277 /* FALLTHROUGH */ 28278 default : 28279 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28280 kmem_free(com, sizeof (*com)); 28281 return (ENOTTY); 28282 } 28283 } else { 28284 cdb[0] = (char)SCMD_READ_CDDA; 28285 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28286 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28287 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28288 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28289 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28290 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28291 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28292 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28293 cdb[10] = cdda->cdda_subcode; 28294 } 28295 28296 com->uscsi_cdb = cdb; 28297 com->uscsi_cdblen = CDB_GROUP5; 28298 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28299 com->uscsi_buflen = buflen; 28300 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28301 28302 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28303 UIO_SYSSPACE, SD_PATH_STANDARD); 28304 28305 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28306 kmem_free(com, sizeof (*com)); 28307 return (rval); 28308 } 28309 28310 28311 /* 28312 * Function: sr_read_cdxa() 28313 * 28314 * Description: This routine is the driver entry point for handling CD-ROM 28315 * ioctl requests to return CD-XA (Extended Architecture) data. 28316 * (CDROMCDXA). 28317 * 28318 * Arguments: dev - the device 'dev_t' 28319 * data - pointer to user provided CD-XA structure specifying 28320 * the data starting address, transfer length, and format 28321 * flag - this argument is a pass through to ddi_copyxxx() 28322 * directly from the mode argument of ioctl(). 28323 * 28324 * Return Code: the code returned by sd_send_scsi_cmd() 28325 * EFAULT if ddi_copyxxx() fails 28326 * ENXIO if fail ddi_get_soft_state 28327 * EINVAL if data pointer is NULL 28328 */ 28329 28330 static int 28331 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28332 { 28333 struct sd_lun *un; 28334 struct uscsi_cmd *com; 28335 struct cdrom_cdxa *cdxa; 28336 int rval; 28337 size_t buflen; 28338 char cdb[CDB_GROUP5]; 28339 uchar_t read_flags; 28340 28341 #ifdef _MULTI_DATAMODEL 28342 /* To support ILP32 applications in an LP64 world */ 28343 struct cdrom_cdxa32 cdrom_cdxa32; 28344 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28345 #endif /* _MULTI_DATAMODEL */ 28346 28347 if (data == NULL) { 28348 return (EINVAL); 28349 } 28350 28351 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28352 return (ENXIO); 28353 } 28354 28355 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28356 28357 #ifdef _MULTI_DATAMODEL 28358 switch (ddi_model_convert_from(flag & FMODELS)) { 28359 case DDI_MODEL_ILP32: 28360 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28361 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28362 return (EFAULT); 28363 } 28364 /* 28365 * Convert the ILP32 uscsi data from the 28366 * application to LP64 for internal use. 28367 */ 28368 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28369 break; 28370 case DDI_MODEL_NONE: 28371 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28372 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28373 return (EFAULT); 28374 } 28375 break; 28376 } 28377 #else /* ! _MULTI_DATAMODEL */ 28378 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28379 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28380 return (EFAULT); 28381 } 28382 #endif /* _MULTI_DATAMODEL */ 28383 28384 /* 28385 * Since MMC-2 expects max 3 bytes for length, check if the 28386 * length input is greater than 3 bytes 28387 */ 28388 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28389 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28390 "cdrom transfer length too large: %d (limit %d)\n", 28391 cdxa->cdxa_length, 0xFFFFFF); 28392 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28393 return (EINVAL); 28394 } 28395 28396 switch (cdxa->cdxa_format) { 28397 case CDROM_XA_DATA: 28398 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28399 read_flags = 0x10; 28400 break; 28401 case CDROM_XA_SECTOR_DATA: 28402 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28403 read_flags = 0xf8; 28404 break; 28405 case CDROM_XA_DATA_W_ERROR: 28406 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28407 read_flags = 0xfc; 28408 break; 28409 default: 28410 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28411 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28412 cdxa->cdxa_format); 28413 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28414 return (EINVAL); 28415 } 28416 28417 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28418 bzero(cdb, CDB_GROUP5); 28419 if (un->un_f_mmc_cap == TRUE) { 28420 cdb[0] = (char)SCMD_READ_CD; 28421 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28422 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28423 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28424 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28425 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28426 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28427 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28428 cdb[9] = (char)read_flags; 28429 } else { 28430 /* 28431 * Note: A vendor specific command (0xDB) is being used her to 28432 * request a read of all subcodes. 28433 */ 28434 cdb[0] = (char)SCMD_READ_CDXA; 28435 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28436 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28437 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28438 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28439 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28440 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28441 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28442 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28443 cdb[10] = cdxa->cdxa_format; 28444 } 28445 com->uscsi_cdb = cdb; 28446 com->uscsi_cdblen = CDB_GROUP5; 28447 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28448 com->uscsi_buflen = buflen; 28449 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28450 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28451 UIO_SYSSPACE, SD_PATH_STANDARD); 28452 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28453 kmem_free(com, sizeof (*com)); 28454 return (rval); 28455 } 28456 28457 28458 /* 28459 * Function: sr_eject() 28460 * 28461 * Description: This routine is the driver entry point for handling CD-ROM 28462 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28463 * 28464 * Arguments: dev - the device 'dev_t' 28465 * 28466 * Return Code: the code returned by sd_send_scsi_cmd() 28467 */ 28468 28469 static int 28470 sr_eject(dev_t dev) 28471 { 28472 struct sd_lun *un; 28473 int rval; 28474 28475 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28476 (un->un_state == SD_STATE_OFFLINE)) { 28477 return (ENXIO); 28478 } 28479 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 28480 SD_PATH_STANDARD)) != 0) { 28481 return (rval); 28482 } 28483 28484 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 28485 SD_PATH_STANDARD); 28486 28487 if (rval == 0) { 28488 mutex_enter(SD_MUTEX(un)); 28489 sr_ejected(un); 28490 un->un_mediastate = DKIO_EJECTED; 28491 cv_broadcast(&un->un_state_cv); 28492 mutex_exit(SD_MUTEX(un)); 28493 } 28494 return (rval); 28495 } 28496 28497 28498 /* 28499 * Function: sr_ejected() 28500 * 28501 * Description: This routine updates the soft state structure to invalidate the 28502 * geometry information after the media has been ejected or a 28503 * media eject has been detected. 28504 * 28505 * Arguments: un - driver soft state (unit) structure 28506 */ 28507 28508 static void 28509 sr_ejected(struct sd_lun *un) 28510 { 28511 struct sd_errstats *stp; 28512 28513 ASSERT(un != NULL); 28514 ASSERT(mutex_owned(SD_MUTEX(un))); 28515 28516 un->un_f_blockcount_is_valid = FALSE; 28517 un->un_f_tgt_blocksize_is_valid = FALSE; 28518 un->un_f_geometry_is_valid = FALSE; 28519 28520 if (un->un_errstats != NULL) { 28521 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28522 stp->sd_capacity.value.ui64 = 0; 28523 } 28524 } 28525 28526 28527 /* 28528 * Function: sr_check_wp() 28529 * 28530 * Description: This routine checks the write protection of a removable media 28531 * disk via the write protect bit of the Mode Page Header device 28532 * specific field. This routine has been implemented to use the 28533 * error recovery mode page for all device types. 28534 * Note: In the future use a sd_send_scsi_MODE_SENSE() routine 28535 * 28536 * Arguments: dev - the device 'dev_t' 28537 * 28538 * Return Code: int indicating if the device is write protected (1) or not (0) 28539 * 28540 * Context: Kernel thread. 28541 * 28542 */ 28543 28544 static int 28545 sr_check_wp(dev_t dev) 28546 { 28547 struct sd_lun *un; 28548 uchar_t device_specific; 28549 uchar_t *sense; 28550 int hdrlen; 28551 int rval; 28552 int retry_flag = FALSE; 28553 28554 /* 28555 * Note: The return codes for this routine should be reworked to 28556 * properly handle the case of a NULL softstate. 28557 */ 28558 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28559 return (FALSE); 28560 } 28561 28562 if (un->un_f_cfg_is_atapi == TRUE) { 28563 retry_flag = TRUE; 28564 } 28565 28566 retry: 28567 if (un->un_f_cfg_is_atapi == TRUE) { 28568 /* 28569 * The mode page contents are not required; set the allocation 28570 * length for the mode page header only 28571 */ 28572 hdrlen = MODE_HEADER_LENGTH_GRP2; 28573 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28574 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 28575 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28576 device_specific = 28577 ((struct mode_header_grp2 *)sense)->device_specific; 28578 } else { 28579 hdrlen = MODE_HEADER_LENGTH; 28580 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28581 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 28582 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28583 device_specific = 28584 ((struct mode_header *)sense)->device_specific; 28585 } 28586 28587 if (rval != 0) { 28588 if ((un->un_f_cfg_is_atapi == TRUE) && (retry_flag)) { 28589 /* 28590 * For an Atapi Zip drive, observed the drive 28591 * reporting check condition for the first attempt. 28592 * Sense data indicating power on or bus device/reset. 28593 * Hence in case of failure need to try at least once 28594 * for Atapi devices. 28595 */ 28596 retry_flag = FALSE; 28597 kmem_free(sense, hdrlen); 28598 goto retry; 28599 } else { 28600 /* 28601 * Write protect mode sense failed; not all disks 28602 * understand this query. Return FALSE assuming that 28603 * these devices are not writable. 28604 */ 28605 rval = FALSE; 28606 } 28607 } else { 28608 if (device_specific & WRITE_PROTECT) { 28609 rval = TRUE; 28610 } else { 28611 rval = FALSE; 28612 } 28613 } 28614 kmem_free(sense, hdrlen); 28615 return (rval); 28616 } 28617 28618 28619 /* 28620 * Function: sr_volume_ctrl() 28621 * 28622 * Description: This routine is the driver entry point for handling CD-ROM 28623 * audio output volume ioctl requests. (CDROMVOLCTRL) 28624 * 28625 * Arguments: dev - the device 'dev_t' 28626 * data - pointer to user audio volume control structure 28627 * flag - this argument is a pass through to ddi_copyxxx() 28628 * directly from the mode argument of ioctl(). 28629 * 28630 * Return Code: the code returned by sd_send_scsi_cmd() 28631 * EFAULT if ddi_copyxxx() fails 28632 * ENXIO if fail ddi_get_soft_state 28633 * EINVAL if data pointer is NULL 28634 * 28635 */ 28636 28637 static int 28638 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28639 { 28640 struct sd_lun *un; 28641 struct cdrom_volctrl volume; 28642 struct cdrom_volctrl *vol = &volume; 28643 uchar_t *sense_page; 28644 uchar_t *select_page; 28645 uchar_t *sense; 28646 uchar_t *select; 28647 int sense_buflen; 28648 int select_buflen; 28649 int rval; 28650 28651 if (data == NULL) { 28652 return (EINVAL); 28653 } 28654 28655 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28656 (un->un_state == SD_STATE_OFFLINE)) { 28657 return (ENXIO); 28658 } 28659 28660 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28661 return (EFAULT); 28662 } 28663 28664 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28665 struct mode_header_grp2 *sense_mhp; 28666 struct mode_header_grp2 *select_mhp; 28667 int bd_len; 28668 28669 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28670 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28671 MODEPAGE_AUDIO_CTRL_LEN; 28672 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28673 select = kmem_zalloc(select_buflen, KM_SLEEP); 28674 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 28675 sense_buflen, MODEPAGE_AUDIO_CTRL, 28676 SD_PATH_STANDARD)) != 0) { 28677 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28678 "sr_volume_ctrl: Mode Sense Failed\n"); 28679 kmem_free(sense, sense_buflen); 28680 kmem_free(select, select_buflen); 28681 return (rval); 28682 } 28683 sense_mhp = (struct mode_header_grp2 *)sense; 28684 select_mhp = (struct mode_header_grp2 *)select; 28685 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28686 sense_mhp->bdesc_length_lo; 28687 if (bd_len > MODE_BLK_DESC_LENGTH) { 28688 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28689 "sr_volume_ctrl: Mode Sense returned invalid " 28690 "block descriptor length\n"); 28691 kmem_free(sense, sense_buflen); 28692 kmem_free(select, select_buflen); 28693 return (EIO); 28694 } 28695 sense_page = (uchar_t *) 28696 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28697 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28698 select_mhp->length_msb = 0; 28699 select_mhp->length_lsb = 0; 28700 select_mhp->bdesc_length_hi = 0; 28701 select_mhp->bdesc_length_lo = 0; 28702 } else { 28703 struct mode_header *sense_mhp, *select_mhp; 28704 28705 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28706 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28707 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28708 select = kmem_zalloc(select_buflen, KM_SLEEP); 28709 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 28710 sense_buflen, MODEPAGE_AUDIO_CTRL, 28711 SD_PATH_STANDARD)) != 0) { 28712 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28713 "sr_volume_ctrl: Mode Sense Failed\n"); 28714 kmem_free(sense, sense_buflen); 28715 kmem_free(select, select_buflen); 28716 return (rval); 28717 } 28718 sense_mhp = (struct mode_header *)sense; 28719 select_mhp = (struct mode_header *)select; 28720 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28721 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28722 "sr_volume_ctrl: Mode Sense returned invalid " 28723 "block descriptor length\n"); 28724 kmem_free(sense, sense_buflen); 28725 kmem_free(select, select_buflen); 28726 return (EIO); 28727 } 28728 sense_page = (uchar_t *) 28729 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28730 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28731 select_mhp->length = 0; 28732 select_mhp->bdesc_length = 0; 28733 } 28734 /* 28735 * Note: An audio control data structure could be created and overlayed 28736 * on the following in place of the array indexing method implemented. 28737 */ 28738 28739 /* Build the select data for the user volume data */ 28740 select_page[0] = MODEPAGE_AUDIO_CTRL; 28741 select_page[1] = 0xE; 28742 /* Set the immediate bit */ 28743 select_page[2] = 0x04; 28744 /* Zero out reserved fields */ 28745 select_page[3] = 0x00; 28746 select_page[4] = 0x00; 28747 /* Return sense data for fields not to be modified */ 28748 select_page[5] = sense_page[5]; 28749 select_page[6] = sense_page[6]; 28750 select_page[7] = sense_page[7]; 28751 /* Set the user specified volume levels for channel 0 and 1 */ 28752 select_page[8] = 0x01; 28753 select_page[9] = vol->channel0; 28754 select_page[10] = 0x02; 28755 select_page[11] = vol->channel1; 28756 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28757 select_page[12] = sense_page[12]; 28758 select_page[13] = sense_page[13]; 28759 select_page[14] = sense_page[14]; 28760 select_page[15] = sense_page[15]; 28761 28762 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28763 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 28764 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28765 } else { 28766 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 28767 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28768 } 28769 28770 kmem_free(sense, sense_buflen); 28771 kmem_free(select, select_buflen); 28772 return (rval); 28773 } 28774 28775 28776 /* 28777 * Function: sr_read_sony_session_offset() 28778 * 28779 * Description: This routine is the driver entry point for handling CD-ROM 28780 * ioctl requests for session offset information. (CDROMREADOFFSET) 28781 * The address of the first track in the last session of a 28782 * multi-session CD-ROM is returned 28783 * 28784 * Note: This routine uses a vendor specific key value in the 28785 * command control field without implementing any vendor check here 28786 * or in the ioctl routine. 28787 * 28788 * Arguments: dev - the device 'dev_t' 28789 * data - pointer to an int to hold the requested address 28790 * flag - this argument is a pass through to ddi_copyxxx() 28791 * directly from the mode argument of ioctl(). 28792 * 28793 * Return Code: the code returned by sd_send_scsi_cmd() 28794 * EFAULT if ddi_copyxxx() fails 28795 * ENXIO if fail ddi_get_soft_state 28796 * EINVAL if data pointer is NULL 28797 */ 28798 28799 static int 28800 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28801 { 28802 struct sd_lun *un; 28803 struct uscsi_cmd *com; 28804 caddr_t buffer; 28805 char cdb[CDB_GROUP1]; 28806 int session_offset = 0; 28807 int rval; 28808 28809 if (data == NULL) { 28810 return (EINVAL); 28811 } 28812 28813 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28814 (un->un_state == SD_STATE_OFFLINE)) { 28815 return (ENXIO); 28816 } 28817 28818 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28819 bzero(cdb, CDB_GROUP1); 28820 cdb[0] = SCMD_READ_TOC; 28821 /* 28822 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28823 * (4 byte TOC response header + 8 byte response data) 28824 */ 28825 cdb[8] = SONY_SESSION_OFFSET_LEN; 28826 /* Byte 9 is the control byte. A vendor specific value is used */ 28827 cdb[9] = SONY_SESSION_OFFSET_KEY; 28828 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28829 com->uscsi_cdb = cdb; 28830 com->uscsi_cdblen = CDB_GROUP1; 28831 com->uscsi_bufaddr = buffer; 28832 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28833 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28834 28835 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 28836 UIO_SYSSPACE, SD_PATH_STANDARD); 28837 if (rval != 0) { 28838 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28839 kmem_free(com, sizeof (*com)); 28840 return (rval); 28841 } 28842 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28843 session_offset = 28844 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28845 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28846 /* 28847 * Offset returned offset in current lbasize block's. Convert to 28848 * 2k block's to return to the user 28849 */ 28850 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28851 session_offset >>= 2; 28852 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28853 session_offset >>= 1; 28854 } 28855 } 28856 28857 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28858 rval = EFAULT; 28859 } 28860 28861 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28862 kmem_free(com, sizeof (*com)); 28863 return (rval); 28864 } 28865 28866 28867 /* 28868 * Function: sd_wm_cache_constructor() 28869 * 28870 * Description: Cache Constructor for the wmap cache for the read/modify/write 28871 * devices. 28872 * 28873 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28874 * un - sd_lun structure for the device. 28875 * flag - the km flags passed to constructor 28876 * 28877 * Return Code: 0 on success. 28878 * -1 on failure. 28879 */ 28880 28881 /*ARGSUSED*/ 28882 static int 28883 sd_wm_cache_constructor(void *wm, void *un, int flags) 28884 { 28885 bzero(wm, sizeof (struct sd_w_map)); 28886 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28887 return (0); 28888 } 28889 28890 28891 /* 28892 * Function: sd_wm_cache_destructor() 28893 * 28894 * Description: Cache destructor for the wmap cache for the read/modify/write 28895 * devices. 28896 * 28897 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28898 * un - sd_lun structure for the device. 28899 */ 28900 /*ARGSUSED*/ 28901 static void 28902 sd_wm_cache_destructor(void *wm, void *un) 28903 { 28904 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28905 } 28906 28907 28908 /* 28909 * Function: sd_range_lock() 28910 * 28911 * Description: Lock the range of blocks specified as parameter to ensure 28912 * that read, modify write is atomic and no other i/o writes 28913 * to the same location. The range is specified in terms 28914 * of start and end blocks. Block numbers are the actual 28915 * media block numbers and not system. 28916 * 28917 * Arguments: un - sd_lun structure for the device. 28918 * startb - The starting block number 28919 * endb - The end block number 28920 * typ - type of i/o - simple/read_modify_write 28921 * 28922 * Return Code: wm - pointer to the wmap structure. 28923 * 28924 * Context: This routine can sleep. 28925 */ 28926 28927 static struct sd_w_map * 28928 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28929 { 28930 struct sd_w_map *wmp = NULL; 28931 struct sd_w_map *sl_wmp = NULL; 28932 struct sd_w_map *tmp_wmp; 28933 wm_state state = SD_WM_CHK_LIST; 28934 28935 28936 ASSERT(un != NULL); 28937 ASSERT(!mutex_owned(SD_MUTEX(un))); 28938 28939 mutex_enter(SD_MUTEX(un)); 28940 28941 while (state != SD_WM_DONE) { 28942 28943 switch (state) { 28944 case SD_WM_CHK_LIST: 28945 /* 28946 * This is the starting state. Check the wmap list 28947 * to see if the range is currently available. 28948 */ 28949 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28950 /* 28951 * If this is a simple write and no rmw 28952 * i/o is pending then try to lock the 28953 * range as the range should be available. 28954 */ 28955 state = SD_WM_LOCK_RANGE; 28956 } else { 28957 tmp_wmp = sd_get_range(un, startb, endb); 28958 if (tmp_wmp != NULL) { 28959 if ((wmp != NULL) && ONLIST(un, wmp)) { 28960 /* 28961 * Should not keep onlist wmps 28962 * while waiting this macro 28963 * will also do wmp = NULL; 28964 */ 28965 FREE_ONLIST_WMAP(un, wmp); 28966 } 28967 /* 28968 * sl_wmp is the wmap on which wait 28969 * is done, since the tmp_wmp points 28970 * to the inuse wmap, set sl_wmp to 28971 * tmp_wmp and change the state to sleep 28972 */ 28973 sl_wmp = tmp_wmp; 28974 state = SD_WM_WAIT_MAP; 28975 } else { 28976 state = SD_WM_LOCK_RANGE; 28977 } 28978 28979 } 28980 break; 28981 28982 case SD_WM_LOCK_RANGE: 28983 ASSERT(un->un_wm_cache); 28984 /* 28985 * The range need to be locked, try to get a wmap. 28986 * First attempt it with NO_SLEEP, want to avoid a sleep 28987 * if possible as we will have to release the sd mutex 28988 * if we have to sleep. 28989 */ 28990 if (wmp == NULL) 28991 wmp = kmem_cache_alloc(un->un_wm_cache, 28992 KM_NOSLEEP); 28993 if (wmp == NULL) { 28994 mutex_exit(SD_MUTEX(un)); 28995 _NOTE(DATA_READABLE_WITHOUT_LOCK 28996 (sd_lun::un_wm_cache)) 28997 wmp = kmem_cache_alloc(un->un_wm_cache, 28998 KM_SLEEP); 28999 mutex_enter(SD_MUTEX(un)); 29000 /* 29001 * we released the mutex so recheck and go to 29002 * check list state. 29003 */ 29004 state = SD_WM_CHK_LIST; 29005 } else { 29006 /* 29007 * We exit out of state machine since we 29008 * have the wmap. Do the housekeeping first. 29009 * place the wmap on the wmap list if it is not 29010 * on it already and then set the state to done. 29011 */ 29012 wmp->wm_start = startb; 29013 wmp->wm_end = endb; 29014 wmp->wm_flags = typ | SD_WM_BUSY; 29015 if (typ & SD_WTYPE_RMW) { 29016 un->un_rmw_count++; 29017 } 29018 /* 29019 * If not already on the list then link 29020 */ 29021 if (!ONLIST(un, wmp)) { 29022 wmp->wm_next = un->un_wm; 29023 wmp->wm_prev = NULL; 29024 if (wmp->wm_next) 29025 wmp->wm_next->wm_prev = wmp; 29026 un->un_wm = wmp; 29027 } 29028 state = SD_WM_DONE; 29029 } 29030 break; 29031 29032 case SD_WM_WAIT_MAP: 29033 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 29034 /* 29035 * Wait is done on sl_wmp, which is set in the 29036 * check_list state. 29037 */ 29038 sl_wmp->wm_wanted_count++; 29039 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 29040 sl_wmp->wm_wanted_count--; 29041 if (!(sl_wmp->wm_flags & SD_WM_BUSY)) { 29042 if (wmp != NULL) 29043 CHK_N_FREEWMP(un, wmp); 29044 wmp = sl_wmp; 29045 } 29046 sl_wmp = NULL; 29047 /* 29048 * After waking up, need to recheck for availability of 29049 * range. 29050 */ 29051 state = SD_WM_CHK_LIST; 29052 break; 29053 29054 default: 29055 panic("sd_range_lock: " 29056 "Unknown state %d in sd_range_lock", state); 29057 /*NOTREACHED*/ 29058 } /* switch(state) */ 29059 29060 } /* while(state != SD_WM_DONE) */ 29061 29062 mutex_exit(SD_MUTEX(un)); 29063 29064 ASSERT(wmp != NULL); 29065 29066 return (wmp); 29067 } 29068 29069 29070 /* 29071 * Function: sd_get_range() 29072 * 29073 * Description: Find if there any overlapping I/O to this one 29074 * Returns the write-map of 1st such I/O, NULL otherwise. 29075 * 29076 * Arguments: un - sd_lun structure for the device. 29077 * startb - The starting block number 29078 * endb - The end block number 29079 * 29080 * Return Code: wm - pointer to the wmap structure. 29081 */ 29082 29083 static struct sd_w_map * 29084 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 29085 { 29086 struct sd_w_map *wmp; 29087 29088 ASSERT(un != NULL); 29089 29090 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 29091 if (!(wmp->wm_flags & SD_WM_BUSY)) { 29092 continue; 29093 } 29094 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 29095 break; 29096 } 29097 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 29098 break; 29099 } 29100 } 29101 29102 return (wmp); 29103 } 29104 29105 29106 /* 29107 * Function: sd_free_inlist_wmap() 29108 * 29109 * Description: Unlink and free a write map struct. 29110 * 29111 * Arguments: un - sd_lun structure for the device. 29112 * wmp - sd_w_map which needs to be unlinked. 29113 */ 29114 29115 static void 29116 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29117 { 29118 ASSERT(un != NULL); 29119 29120 if (un->un_wm == wmp) { 29121 un->un_wm = wmp->wm_next; 29122 } else { 29123 wmp->wm_prev->wm_next = wmp->wm_next; 29124 } 29125 29126 if (wmp->wm_next) { 29127 wmp->wm_next->wm_prev = wmp->wm_prev; 29128 } 29129 29130 wmp->wm_next = wmp->wm_prev = NULL; 29131 29132 kmem_cache_free(un->un_wm_cache, wmp); 29133 } 29134 29135 29136 /* 29137 * Function: sd_range_unlock() 29138 * 29139 * Description: Unlock the range locked by wm. 29140 * Free write map if nobody else is waiting on it. 29141 * 29142 * Arguments: un - sd_lun structure for the device. 29143 * wmp - sd_w_map which needs to be unlinked. 29144 */ 29145 29146 static void 29147 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29148 { 29149 ASSERT(un != NULL); 29150 ASSERT(wm != NULL); 29151 ASSERT(!mutex_owned(SD_MUTEX(un))); 29152 29153 mutex_enter(SD_MUTEX(un)); 29154 29155 if (wm->wm_flags & SD_WTYPE_RMW) { 29156 un->un_rmw_count--; 29157 } 29158 29159 if (wm->wm_wanted_count) { 29160 wm->wm_flags = 0; 29161 /* 29162 * Broadcast that the wmap is available now. 29163 */ 29164 cv_broadcast(&wm->wm_avail); 29165 } else { 29166 /* 29167 * If no one is waiting on the map, it should be free'ed. 29168 */ 29169 sd_free_inlist_wmap(un, wm); 29170 } 29171 29172 mutex_exit(SD_MUTEX(un)); 29173 } 29174 29175 29176 /* 29177 * Function: sd_read_modify_write_task 29178 * 29179 * Description: Called from a taskq thread to initiate the write phase of 29180 * a read-modify-write request. This is used for targets where 29181 * un->un_sys_blocksize != un->un_tgt_blocksize. 29182 * 29183 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29184 * 29185 * Context: Called under taskq thread context. 29186 */ 29187 29188 static void 29189 sd_read_modify_write_task(void *arg) 29190 { 29191 struct sd_mapblocksize_info *bsp; 29192 struct buf *bp; 29193 struct sd_xbuf *xp; 29194 struct sd_lun *un; 29195 29196 bp = arg; /* The bp is given in arg */ 29197 ASSERT(bp != NULL); 29198 29199 /* Get the pointer to the layer-private data struct */ 29200 xp = SD_GET_XBUF(bp); 29201 ASSERT(xp != NULL); 29202 bsp = xp->xb_private; 29203 ASSERT(bsp != NULL); 29204 29205 un = SD_GET_UN(bp); 29206 ASSERT(un != NULL); 29207 ASSERT(!mutex_owned(SD_MUTEX(un))); 29208 29209 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29210 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29211 29212 /* 29213 * This is the write phase of a read-modify-write request, called 29214 * under the context of a taskq thread in response to the completion 29215 * of the read portion of the rmw request completing under interrupt 29216 * context. The write request must be sent from here down the iostart 29217 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29218 * we use the layer index saved in the layer-private data area. 29219 */ 29220 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29221 29222 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29223 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29224 } 29225 29226 29227 /* 29228 * Function: sddump_do_read_of_rmw() 29229 * 29230 * Description: This routine will be called from sddump, If sddump is called 29231 * with an I/O which not aligned on device blocksize boundary 29232 * then the write has to be converted to read-modify-write. 29233 * Do the read part here in order to keep sddump simple. 29234 * Note - That the sd_mutex is held across the call to this 29235 * routine. 29236 * 29237 * Arguments: un - sd_lun 29238 * blkno - block number in terms of media block size. 29239 * nblk - number of blocks. 29240 * bpp - pointer to pointer to the buf structure. On return 29241 * from this function, *bpp points to the valid buffer 29242 * to which the write has to be done. 29243 * 29244 * Return Code: 0 for success or errno-type return code 29245 */ 29246 29247 static int 29248 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29249 struct buf **bpp) 29250 { 29251 int err; 29252 int i; 29253 int rval; 29254 struct buf *bp; 29255 struct scsi_pkt *pkt = NULL; 29256 uint32_t target_blocksize; 29257 29258 ASSERT(un != NULL); 29259 ASSERT(mutex_owned(SD_MUTEX(un))); 29260 29261 target_blocksize = un->un_tgt_blocksize; 29262 29263 mutex_exit(SD_MUTEX(un)); 29264 29265 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29266 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29267 if (bp == NULL) { 29268 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29269 "no resources for dumping; giving up"); 29270 err = ENOMEM; 29271 goto done; 29272 } 29273 29274 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29275 blkno, nblk); 29276 if (rval != 0) { 29277 scsi_free_consistent_buf(bp); 29278 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29279 "no resources for dumping; giving up"); 29280 err = ENOMEM; 29281 goto done; 29282 } 29283 29284 pkt->pkt_flags |= FLAG_NOINTR; 29285 29286 err = EIO; 29287 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29288 29289 /* 29290 * Scsi_poll returns 0 (success) if the command completes and 29291 * the status block is STATUS_GOOD. We should only check 29292 * errors if this condition is not true. Even then we should 29293 * send our own request sense packet only if we have a check 29294 * condition and auto request sense has not been performed by 29295 * the hba. 29296 */ 29297 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29298 29299 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29300 err = 0; 29301 break; 29302 } 29303 29304 /* 29305 * Check CMD_DEV_GONE 1st, give up if device is gone, 29306 * no need to read RQS data. 29307 */ 29308 if (pkt->pkt_reason == CMD_DEV_GONE) { 29309 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29310 "Device is gone\n"); 29311 break; 29312 } 29313 29314 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29315 SD_INFO(SD_LOG_DUMP, un, 29316 "sddump: read failed with CHECK, try # %d\n", i); 29317 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29318 (void) sd_send_polled_RQS(un); 29319 } 29320 29321 continue; 29322 } 29323 29324 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29325 int reset_retval = 0; 29326 29327 SD_INFO(SD_LOG_DUMP, un, 29328 "sddump: read failed with BUSY, try # %d\n", i); 29329 29330 if (un->un_f_lun_reset_enabled == TRUE) { 29331 reset_retval = scsi_reset(SD_ADDRESS(un), 29332 RESET_LUN); 29333 } 29334 if (reset_retval == 0) { 29335 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29336 } 29337 (void) sd_send_polled_RQS(un); 29338 29339 } else { 29340 SD_INFO(SD_LOG_DUMP, un, 29341 "sddump: read failed with 0x%x, try # %d\n", 29342 SD_GET_PKT_STATUS(pkt), i); 29343 mutex_enter(SD_MUTEX(un)); 29344 sd_reset_target(un, pkt); 29345 mutex_exit(SD_MUTEX(un)); 29346 } 29347 29348 /* 29349 * If we are not getting anywhere with lun/target resets, 29350 * let's reset the bus. 29351 */ 29352 if (i > SD_NDUMP_RETRIES/2) { 29353 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29354 (void) sd_send_polled_RQS(un); 29355 } 29356 29357 } 29358 scsi_destroy_pkt(pkt); 29359 29360 if (err != 0) { 29361 scsi_free_consistent_buf(bp); 29362 *bpp = NULL; 29363 } else { 29364 *bpp = bp; 29365 } 29366 29367 done: 29368 mutex_enter(SD_MUTEX(un)); 29369 return (err); 29370 } 29371 29372 29373 /* 29374 * Function: sd_failfast_flushq 29375 * 29376 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29377 * in b_flags and move them onto the failfast queue, then kick 29378 * off a thread to return all bp's on the failfast queue to 29379 * their owners with an error set. 29380 * 29381 * Arguments: un - pointer to the soft state struct for the instance. 29382 * 29383 * Context: may execute in interrupt context. 29384 */ 29385 29386 static void 29387 sd_failfast_flushq(struct sd_lun *un) 29388 { 29389 struct buf *bp; 29390 struct buf *next_waitq_bp; 29391 struct buf *prev_waitq_bp = NULL; 29392 29393 ASSERT(un != NULL); 29394 ASSERT(mutex_owned(SD_MUTEX(un))); 29395 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29396 ASSERT(un->un_failfast_bp == NULL); 29397 29398 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29399 "sd_failfast_flushq: entry: un:0x%p\n", un); 29400 29401 /* 29402 * Check if we should flush all bufs when entering failfast state, or 29403 * just those with B_FAILFAST set. 29404 */ 29405 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29406 /* 29407 * Move *all* bp's on the wait queue to the failfast flush 29408 * queue, including those that do NOT have B_FAILFAST set. 29409 */ 29410 if (un->un_failfast_headp == NULL) { 29411 ASSERT(un->un_failfast_tailp == NULL); 29412 un->un_failfast_headp = un->un_waitq_headp; 29413 } else { 29414 ASSERT(un->un_failfast_tailp != NULL); 29415 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29416 } 29417 29418 un->un_failfast_tailp = un->un_waitq_tailp; 29419 29420 /* update kstat for each bp moved out of the waitq */ 29421 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) { 29422 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29423 } 29424 29425 /* empty the waitq */ 29426 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29427 29428 } else { 29429 /* 29430 * Go thru the wait queue, pick off all entries with 29431 * B_FAILFAST set, and move these onto the failfast queue. 29432 */ 29433 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29434 /* 29435 * Save the pointer to the next bp on the wait queue, 29436 * so we get to it on the next iteration of this loop. 29437 */ 29438 next_waitq_bp = bp->av_forw; 29439 29440 /* 29441 * If this bp from the wait queue does NOT have 29442 * B_FAILFAST set, just move on to the next element 29443 * in the wait queue. Note, this is the only place 29444 * where it is correct to set prev_waitq_bp. 29445 */ 29446 if ((bp->b_flags & B_FAILFAST) == 0) { 29447 prev_waitq_bp = bp; 29448 continue; 29449 } 29450 29451 /* 29452 * Remove the bp from the wait queue. 29453 */ 29454 if (bp == un->un_waitq_headp) { 29455 /* The bp is the first element of the waitq. */ 29456 un->un_waitq_headp = next_waitq_bp; 29457 if (un->un_waitq_headp == NULL) { 29458 /* The wait queue is now empty */ 29459 un->un_waitq_tailp = NULL; 29460 } 29461 } else { 29462 /* 29463 * The bp is either somewhere in the middle 29464 * or at the end of the wait queue. 29465 */ 29466 ASSERT(un->un_waitq_headp != NULL); 29467 ASSERT(prev_waitq_bp != NULL); 29468 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29469 == 0); 29470 if (bp == un->un_waitq_tailp) { 29471 /* bp is the last entry on the waitq. */ 29472 ASSERT(next_waitq_bp == NULL); 29473 un->un_waitq_tailp = prev_waitq_bp; 29474 } 29475 prev_waitq_bp->av_forw = next_waitq_bp; 29476 } 29477 bp->av_forw = NULL; 29478 29479 /* 29480 * update kstat since the bp is moved out of 29481 * the waitq 29482 */ 29483 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 29484 29485 /* 29486 * Now put the bp onto the failfast queue. 29487 */ 29488 if (un->un_failfast_headp == NULL) { 29489 /* failfast queue is currently empty */ 29490 ASSERT(un->un_failfast_tailp == NULL); 29491 un->un_failfast_headp = 29492 un->un_failfast_tailp = bp; 29493 } else { 29494 /* Add the bp to the end of the failfast q */ 29495 ASSERT(un->un_failfast_tailp != NULL); 29496 ASSERT(un->un_failfast_tailp->b_flags & 29497 B_FAILFAST); 29498 un->un_failfast_tailp->av_forw = bp; 29499 un->un_failfast_tailp = bp; 29500 } 29501 } 29502 } 29503 29504 /* 29505 * Now return all bp's on the failfast queue to their owners. 29506 */ 29507 while ((bp = un->un_failfast_headp) != NULL) { 29508 29509 un->un_failfast_headp = bp->av_forw; 29510 if (un->un_failfast_headp == NULL) { 29511 un->un_failfast_tailp = NULL; 29512 } 29513 29514 /* 29515 * We want to return the bp with a failure error code, but 29516 * we do not want a call to sd_start_cmds() to occur here, 29517 * so use sd_return_failed_command_no_restart() instead of 29518 * sd_return_failed_command(). 29519 */ 29520 sd_return_failed_command_no_restart(un, bp, EIO); 29521 } 29522 29523 /* Flush the xbuf queues if required. */ 29524 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29525 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29526 } 29527 29528 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29529 "sd_failfast_flushq: exit: un:0x%p\n", un); 29530 } 29531 29532 29533 /* 29534 * Function: sd_failfast_flushq_callback 29535 * 29536 * Description: Return TRUE if the given bp meets the criteria for failfast 29537 * flushing. Used with ddi_xbuf_flushq(9F). 29538 * 29539 * Arguments: bp - ptr to buf struct to be examined. 29540 * 29541 * Context: Any 29542 */ 29543 29544 static int 29545 sd_failfast_flushq_callback(struct buf *bp) 29546 { 29547 /* 29548 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29549 * state is entered; OR (2) the given bp has B_FAILFAST set. 29550 */ 29551 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29552 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29553 } 29554 29555 29556 29557 #if defined(__i386) || defined(__amd64) 29558 /* 29559 * Function: sd_setup_next_xfer 29560 * 29561 * Description: Prepare next I/O operation using DMA_PARTIAL 29562 * 29563 */ 29564 29565 static int 29566 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29567 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29568 { 29569 ssize_t num_blks_not_xfered; 29570 daddr_t strt_blk_num; 29571 ssize_t bytes_not_xfered; 29572 int rval; 29573 29574 ASSERT(pkt->pkt_resid == 0); 29575 29576 /* 29577 * Calculate next block number and amount to be transferred. 29578 * 29579 * How much data NOT transfered to the HBA yet. 29580 */ 29581 bytes_not_xfered = xp->xb_dma_resid; 29582 29583 /* 29584 * figure how many blocks NOT transfered to the HBA yet. 29585 */ 29586 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29587 29588 /* 29589 * set starting block number to the end of what WAS transfered. 29590 */ 29591 strt_blk_num = xp->xb_blkno + 29592 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29593 29594 /* 29595 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29596 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29597 * the disk mutex here. 29598 */ 29599 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29600 strt_blk_num, num_blks_not_xfered); 29601 29602 if (rval == 0) { 29603 29604 /* 29605 * Success. 29606 * 29607 * Adjust things if there are still more blocks to be 29608 * transfered. 29609 */ 29610 xp->xb_dma_resid = pkt->pkt_resid; 29611 pkt->pkt_resid = 0; 29612 29613 return (1); 29614 } 29615 29616 /* 29617 * There's really only one possible return value from 29618 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29619 * returns NULL. 29620 */ 29621 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29622 29623 bp->b_resid = bp->b_bcount; 29624 bp->b_flags |= B_ERROR; 29625 29626 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29627 "Error setting up next portion of DMA transfer\n"); 29628 29629 return (0); 29630 } 29631 #endif 29632 29633 /* 29634 * Note: The following sd_faultinjection_ioctl( ) routines implement 29635 * driver support for handling fault injection for error analysis 29636 * causing faults in multiple layers of the driver. 29637 * 29638 */ 29639 29640 #ifdef SD_FAULT_INJECTION 29641 static uint_t sd_fault_injection_on = 0; 29642 29643 /* 29644 * Function: sd_faultinjection_ioctl() 29645 * 29646 * Description: This routine is the driver entry point for handling 29647 * faultinjection ioctls to inject errors into the 29648 * layer model 29649 * 29650 * Arguments: cmd - the ioctl cmd recieved 29651 * arg - the arguments from user and returns 29652 */ 29653 29654 static void 29655 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29656 29657 uint_t i; 29658 uint_t rval; 29659 29660 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29661 29662 mutex_enter(SD_MUTEX(un)); 29663 29664 switch (cmd) { 29665 case SDIOCRUN: 29666 /* Allow pushed faults to be injected */ 29667 SD_INFO(SD_LOG_SDTEST, un, 29668 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29669 29670 sd_fault_injection_on = 1; 29671 29672 SD_INFO(SD_LOG_IOERR, un, 29673 "sd_faultinjection_ioctl: run finished\n"); 29674 break; 29675 29676 case SDIOCSTART: 29677 /* Start Injection Session */ 29678 SD_INFO(SD_LOG_SDTEST, un, 29679 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29680 29681 sd_fault_injection_on = 0; 29682 un->sd_injection_mask = 0xFFFFFFFF; 29683 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29684 un->sd_fi_fifo_pkt[i] = NULL; 29685 un->sd_fi_fifo_xb[i] = NULL; 29686 un->sd_fi_fifo_un[i] = NULL; 29687 un->sd_fi_fifo_arq[i] = NULL; 29688 } 29689 un->sd_fi_fifo_start = 0; 29690 un->sd_fi_fifo_end = 0; 29691 29692 mutex_enter(&(un->un_fi_mutex)); 29693 un->sd_fi_log[0] = '\0'; 29694 un->sd_fi_buf_len = 0; 29695 mutex_exit(&(un->un_fi_mutex)); 29696 29697 SD_INFO(SD_LOG_IOERR, un, 29698 "sd_faultinjection_ioctl: start finished\n"); 29699 break; 29700 29701 case SDIOCSTOP: 29702 /* Stop Injection Session */ 29703 SD_INFO(SD_LOG_SDTEST, un, 29704 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29705 sd_fault_injection_on = 0; 29706 un->sd_injection_mask = 0x0; 29707 29708 /* Empty stray or unuseds structs from fifo */ 29709 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29710 if (un->sd_fi_fifo_pkt[i] != NULL) { 29711 kmem_free(un->sd_fi_fifo_pkt[i], 29712 sizeof (struct sd_fi_pkt)); 29713 } 29714 if (un->sd_fi_fifo_xb[i] != NULL) { 29715 kmem_free(un->sd_fi_fifo_xb[i], 29716 sizeof (struct sd_fi_xb)); 29717 } 29718 if (un->sd_fi_fifo_un[i] != NULL) { 29719 kmem_free(un->sd_fi_fifo_un[i], 29720 sizeof (struct sd_fi_un)); 29721 } 29722 if (un->sd_fi_fifo_arq[i] != NULL) { 29723 kmem_free(un->sd_fi_fifo_arq[i], 29724 sizeof (struct sd_fi_arq)); 29725 } 29726 un->sd_fi_fifo_pkt[i] = NULL; 29727 un->sd_fi_fifo_un[i] = NULL; 29728 un->sd_fi_fifo_xb[i] = NULL; 29729 un->sd_fi_fifo_arq[i] = NULL; 29730 } 29731 un->sd_fi_fifo_start = 0; 29732 un->sd_fi_fifo_end = 0; 29733 29734 SD_INFO(SD_LOG_IOERR, un, 29735 "sd_faultinjection_ioctl: stop finished\n"); 29736 break; 29737 29738 case SDIOCINSERTPKT: 29739 /* Store a packet struct to be pushed onto fifo */ 29740 SD_INFO(SD_LOG_SDTEST, un, 29741 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29742 29743 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29744 29745 sd_fault_injection_on = 0; 29746 29747 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29748 if (un->sd_fi_fifo_pkt[i] != NULL) { 29749 kmem_free(un->sd_fi_fifo_pkt[i], 29750 sizeof (struct sd_fi_pkt)); 29751 } 29752 if (arg != NULL) { 29753 un->sd_fi_fifo_pkt[i] = 29754 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29755 if (un->sd_fi_fifo_pkt[i] == NULL) { 29756 /* Alloc failed don't store anything */ 29757 break; 29758 } 29759 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29760 sizeof (struct sd_fi_pkt), 0); 29761 if (rval == -1) { 29762 kmem_free(un->sd_fi_fifo_pkt[i], 29763 sizeof (struct sd_fi_pkt)); 29764 un->sd_fi_fifo_pkt[i] = NULL; 29765 } 29766 } else { 29767 SD_INFO(SD_LOG_IOERR, un, 29768 "sd_faultinjection_ioctl: pkt null\n"); 29769 } 29770 break; 29771 29772 case SDIOCINSERTXB: 29773 /* Store a xb struct to be pushed onto fifo */ 29774 SD_INFO(SD_LOG_SDTEST, un, 29775 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29776 29777 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29778 29779 sd_fault_injection_on = 0; 29780 29781 if (un->sd_fi_fifo_xb[i] != NULL) { 29782 kmem_free(un->sd_fi_fifo_xb[i], 29783 sizeof (struct sd_fi_xb)); 29784 un->sd_fi_fifo_xb[i] = NULL; 29785 } 29786 if (arg != NULL) { 29787 un->sd_fi_fifo_xb[i] = 29788 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29789 if (un->sd_fi_fifo_xb[i] == NULL) { 29790 /* Alloc failed don't store anything */ 29791 break; 29792 } 29793 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29794 sizeof (struct sd_fi_xb), 0); 29795 29796 if (rval == -1) { 29797 kmem_free(un->sd_fi_fifo_xb[i], 29798 sizeof (struct sd_fi_xb)); 29799 un->sd_fi_fifo_xb[i] = NULL; 29800 } 29801 } else { 29802 SD_INFO(SD_LOG_IOERR, un, 29803 "sd_faultinjection_ioctl: xb null\n"); 29804 } 29805 break; 29806 29807 case SDIOCINSERTUN: 29808 /* Store a un struct to be pushed onto fifo */ 29809 SD_INFO(SD_LOG_SDTEST, un, 29810 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29811 29812 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29813 29814 sd_fault_injection_on = 0; 29815 29816 if (un->sd_fi_fifo_un[i] != NULL) { 29817 kmem_free(un->sd_fi_fifo_un[i], 29818 sizeof (struct sd_fi_un)); 29819 un->sd_fi_fifo_un[i] = NULL; 29820 } 29821 if (arg != NULL) { 29822 un->sd_fi_fifo_un[i] = 29823 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29824 if (un->sd_fi_fifo_un[i] == NULL) { 29825 /* Alloc failed don't store anything */ 29826 break; 29827 } 29828 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29829 sizeof (struct sd_fi_un), 0); 29830 if (rval == -1) { 29831 kmem_free(un->sd_fi_fifo_un[i], 29832 sizeof (struct sd_fi_un)); 29833 un->sd_fi_fifo_un[i] = NULL; 29834 } 29835 29836 } else { 29837 SD_INFO(SD_LOG_IOERR, un, 29838 "sd_faultinjection_ioctl: un null\n"); 29839 } 29840 29841 break; 29842 29843 case SDIOCINSERTARQ: 29844 /* Store a arq struct to be pushed onto fifo */ 29845 SD_INFO(SD_LOG_SDTEST, un, 29846 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29847 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29848 29849 sd_fault_injection_on = 0; 29850 29851 if (un->sd_fi_fifo_arq[i] != NULL) { 29852 kmem_free(un->sd_fi_fifo_arq[i], 29853 sizeof (struct sd_fi_arq)); 29854 un->sd_fi_fifo_arq[i] = NULL; 29855 } 29856 if (arg != NULL) { 29857 un->sd_fi_fifo_arq[i] = 29858 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29859 if (un->sd_fi_fifo_arq[i] == NULL) { 29860 /* Alloc failed don't store anything */ 29861 break; 29862 } 29863 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29864 sizeof (struct sd_fi_arq), 0); 29865 if (rval == -1) { 29866 kmem_free(un->sd_fi_fifo_arq[i], 29867 sizeof (struct sd_fi_arq)); 29868 un->sd_fi_fifo_arq[i] = NULL; 29869 } 29870 29871 } else { 29872 SD_INFO(SD_LOG_IOERR, un, 29873 "sd_faultinjection_ioctl: arq null\n"); 29874 } 29875 29876 break; 29877 29878 case SDIOCPUSH: 29879 /* Push stored xb, pkt, un, and arq onto fifo */ 29880 sd_fault_injection_on = 0; 29881 29882 if (arg != NULL) { 29883 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29884 if (rval != -1 && 29885 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29886 un->sd_fi_fifo_end += i; 29887 } 29888 } else { 29889 SD_INFO(SD_LOG_IOERR, un, 29890 "sd_faultinjection_ioctl: push arg null\n"); 29891 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29892 un->sd_fi_fifo_end++; 29893 } 29894 } 29895 SD_INFO(SD_LOG_IOERR, un, 29896 "sd_faultinjection_ioctl: push to end=%d\n", 29897 un->sd_fi_fifo_end); 29898 break; 29899 29900 case SDIOCRETRIEVE: 29901 /* Return buffer of log from Injection session */ 29902 SD_INFO(SD_LOG_SDTEST, un, 29903 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29904 29905 sd_fault_injection_on = 0; 29906 29907 mutex_enter(&(un->un_fi_mutex)); 29908 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29909 un->sd_fi_buf_len+1, 0); 29910 mutex_exit(&(un->un_fi_mutex)); 29911 29912 if (rval == -1) { 29913 /* 29914 * arg is possibly invalid setting 29915 * it to NULL for return 29916 */ 29917 arg = NULL; 29918 } 29919 break; 29920 } 29921 29922 mutex_exit(SD_MUTEX(un)); 29923 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29924 " exit\n"); 29925 } 29926 29927 29928 /* 29929 * Function: sd_injection_log() 29930 * 29931 * Description: This routine adds buff to the already existing injection log 29932 * for retrieval via faultinjection_ioctl for use in fault 29933 * detection and recovery 29934 * 29935 * Arguments: buf - the string to add to the log 29936 */ 29937 29938 static void 29939 sd_injection_log(char *buf, struct sd_lun *un) 29940 { 29941 uint_t len; 29942 29943 ASSERT(un != NULL); 29944 ASSERT(buf != NULL); 29945 29946 mutex_enter(&(un->un_fi_mutex)); 29947 29948 len = min(strlen(buf), 255); 29949 /* Add logged value to Injection log to be returned later */ 29950 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29951 uint_t offset = strlen((char *)un->sd_fi_log); 29952 char *destp = (char *)un->sd_fi_log + offset; 29953 int i; 29954 for (i = 0; i < len; i++) { 29955 *destp++ = *buf++; 29956 } 29957 un->sd_fi_buf_len += len; 29958 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29959 } 29960 29961 mutex_exit(&(un->un_fi_mutex)); 29962 } 29963 29964 29965 /* 29966 * Function: sd_faultinjection() 29967 * 29968 * Description: This routine takes the pkt and changes its 29969 * content based on error injection scenerio. 29970 * 29971 * Arguments: pktp - packet to be changed 29972 */ 29973 29974 static void 29975 sd_faultinjection(struct scsi_pkt *pktp) 29976 { 29977 uint_t i; 29978 struct sd_fi_pkt *fi_pkt; 29979 struct sd_fi_xb *fi_xb; 29980 struct sd_fi_un *fi_un; 29981 struct sd_fi_arq *fi_arq; 29982 struct buf *bp; 29983 struct sd_xbuf *xb; 29984 struct sd_lun *un; 29985 29986 ASSERT(pktp != NULL); 29987 29988 /* pull bp xb and un from pktp */ 29989 bp = (struct buf *)pktp->pkt_private; 29990 xb = SD_GET_XBUF(bp); 29991 un = SD_GET_UN(bp); 29992 29993 ASSERT(un != NULL); 29994 29995 mutex_enter(SD_MUTEX(un)); 29996 29997 SD_TRACE(SD_LOG_SDTEST, un, 29998 "sd_faultinjection: entry Injection from sdintr\n"); 29999 30000 /* if injection is off return */ 30001 if (sd_fault_injection_on == 0 || 30002 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 30003 mutex_exit(SD_MUTEX(un)); 30004 return; 30005 } 30006 30007 30008 /* take next set off fifo */ 30009 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 30010 30011 fi_pkt = un->sd_fi_fifo_pkt[i]; 30012 fi_xb = un->sd_fi_fifo_xb[i]; 30013 fi_un = un->sd_fi_fifo_un[i]; 30014 fi_arq = un->sd_fi_fifo_arq[i]; 30015 30016 30017 /* set variables accordingly */ 30018 /* set pkt if it was on fifo */ 30019 if (fi_pkt != NULL) { 30020 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 30021 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 30022 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 30023 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 30024 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 30025 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 30026 30027 } 30028 30029 /* set xb if it was on fifo */ 30030 if (fi_xb != NULL) { 30031 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 30032 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 30033 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 30034 SD_CONDSET(xb, xb, xb_victim_retry_count, 30035 "xb_victim_retry_count"); 30036 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 30037 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 30038 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 30039 30040 /* copy in block data from sense */ 30041 if (fi_xb->xb_sense_data[0] != -1) { 30042 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 30043 SENSE_LENGTH); 30044 } 30045 30046 /* copy in extended sense codes */ 30047 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 30048 "es_code"); 30049 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 30050 "es_key"); 30051 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 30052 "es_add_code"); 30053 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 30054 es_qual_code, "es_qual_code"); 30055 } 30056 30057 /* set un if it was on fifo */ 30058 if (fi_un != NULL) { 30059 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 30060 SD_CONDSET(un, un, un_ctype, "un_ctype"); 30061 SD_CONDSET(un, un, un_reset_retry_count, 30062 "un_reset_retry_count"); 30063 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 30064 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 30065 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 30066 SD_CONDSET(un, un, un_f_geometry_is_valid, 30067 "un_f_geometry_is_valid"); 30068 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 30069 "un_f_allow_bus_device_reset"); 30070 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 30071 30072 } 30073 30074 /* copy in auto request sense if it was on fifo */ 30075 if (fi_arq != NULL) { 30076 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 30077 } 30078 30079 /* free structs */ 30080 if (un->sd_fi_fifo_pkt[i] != NULL) { 30081 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 30082 } 30083 if (un->sd_fi_fifo_xb[i] != NULL) { 30084 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 30085 } 30086 if (un->sd_fi_fifo_un[i] != NULL) { 30087 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 30088 } 30089 if (un->sd_fi_fifo_arq[i] != NULL) { 30090 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 30091 } 30092 30093 /* 30094 * kmem_free does not gurantee to set to NULL 30095 * since we uses these to determine if we set 30096 * values or not lets confirm they are always 30097 * NULL after free 30098 */ 30099 un->sd_fi_fifo_pkt[i] = NULL; 30100 un->sd_fi_fifo_un[i] = NULL; 30101 un->sd_fi_fifo_xb[i] = NULL; 30102 un->sd_fi_fifo_arq[i] = NULL; 30103 30104 un->sd_fi_fifo_start++; 30105 30106 mutex_exit(SD_MUTEX(un)); 30107 30108 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 30109 } 30110 30111 #endif /* SD_FAULT_INJECTION */ 30112