1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * SCSI disk target driver. 31 */ 32 33 #include <sys/scsi/scsi.h> 34 #include <sys/dkbad.h> 35 #include <sys/dklabel.h> 36 #include <sys/dkio.h> 37 #include <sys/fdio.h> 38 #include <sys/cdio.h> 39 #include <sys/mhd.h> 40 #include <sys/vtoc.h> 41 #include <sys/dktp/fdisk.h> 42 #include <sys/file.h> 43 #include <sys/stat.h> 44 #include <sys/kstat.h> 45 #include <sys/vtrace.h> 46 #include <sys/note.h> 47 #include <sys/thread.h> 48 #include <sys/proc.h> 49 #include <sys/efi_partition.h> 50 #include <sys/var.h> 51 #include <sys/aio_req.h> 52 #if (defined(__fibre)) 53 /* Note: is there a leadville version of the following? */ 54 #include <sys/fc4/fcal_linkapp.h> 55 #endif 56 #include <sys/taskq.h> 57 #include <sys/uuid.h> 58 #include <sys/byteorder.h> 59 #include <sys/sdt.h> 60 61 #include "sd_xbuf.h" 62 63 #include <sys/scsi/targets/sddef.h> 64 65 66 /* 67 * Loadable module info. 68 */ 69 #if (defined(__fibre)) 70 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver %I%" 71 char _depends_on[] = "misc/scsi drv/fcp"; 72 #else 73 #define SD_MODULE_NAME "SCSI Disk Driver %I%" 74 char _depends_on[] = "misc/scsi"; 75 #endif 76 77 /* 78 * Define the interconnect type, to allow the driver to distinguish 79 * between parallel SCSI (sd) and fibre channel (ssd) behaviors. 80 * 81 * This is really for backward compatability. In the future, the driver 82 * should actually check the "interconnect-type" property as reported by 83 * the HBA; however at present this property is not defined by all HBAs, 84 * so we will use this #define (1) to permit the driver to run in 85 * backward-compatability mode; and (2) to print a notification message 86 * if an FC HBA does not support the "interconnect-type" property. The 87 * behavior of the driver will be to assume parallel SCSI behaviors unless 88 * the "interconnect-type" property is defined by the HBA **AND** has a 89 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or 90 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre 91 * Channel behaviors (as per the old ssd). (Note that the 92 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and 93 * will result in the driver assuming parallel SCSI behaviors.) 94 * 95 * (see common/sys/scsi/impl/services.h) 96 * 97 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default 98 * since some FC HBAs may already support that, and there is some code in 99 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the 100 * default would confuse that code, and besides things should work fine 101 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the 102 * "interconnect_type" property. 103 */ 104 #if (defined(__fibre)) 105 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE 106 #else 107 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL 108 #endif 109 110 /* 111 * The name of the driver, established from the module name in _init. 112 */ 113 static char *sd_label = NULL; 114 115 /* 116 * Driver name is unfortunately prefixed on some driver.conf properties. 117 */ 118 #if (defined(__fibre)) 119 #define sd_max_xfer_size ssd_max_xfer_size 120 #define sd_config_list ssd_config_list 121 static char *sd_max_xfer_size = "ssd_max_xfer_size"; 122 static char *sd_config_list = "ssd-config-list"; 123 #else 124 static char *sd_max_xfer_size = "sd_max_xfer_size"; 125 static char *sd_config_list = "sd-config-list"; 126 #endif 127 128 /* 129 * Driver global variables 130 */ 131 132 #if (defined(__fibre)) 133 /* 134 * These #defines are to avoid namespace collisions that occur because this 135 * code is currently used to compile two seperate driver modules: sd and ssd. 136 * All global variables need to be treated this way (even if declared static) 137 * in order to allow the debugger to resolve the names properly. 138 * It is anticipated that in the near future the ssd module will be obsoleted, 139 * at which time this namespace issue should go away. 140 */ 141 #define sd_state ssd_state 142 #define sd_io_time ssd_io_time 143 #define sd_failfast_enable ssd_failfast_enable 144 #define sd_ua_retry_count ssd_ua_retry_count 145 #define sd_report_pfa ssd_report_pfa 146 #define sd_max_throttle ssd_max_throttle 147 #define sd_min_throttle ssd_min_throttle 148 #define sd_rot_delay ssd_rot_delay 149 150 #define sd_retry_on_reservation_conflict \ 151 ssd_retry_on_reservation_conflict 152 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay 153 #define sd_resv_conflict_name ssd_resv_conflict_name 154 155 #define sd_component_mask ssd_component_mask 156 #define sd_level_mask ssd_level_mask 157 #define sd_debug_un ssd_debug_un 158 #define sd_error_level ssd_error_level 159 160 #define sd_xbuf_active_limit ssd_xbuf_active_limit 161 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit 162 163 #define sd_tr ssd_tr 164 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout 165 #define sd_check_media_time ssd_check_media_time 166 #define sd_wait_cmds_complete ssd_wait_cmds_complete 167 #define sd_label_mutex ssd_label_mutex 168 #define sd_detach_mutex ssd_detach_mutex 169 #define sd_log_buf ssd_log_buf 170 #define sd_log_mutex ssd_log_mutex 171 172 #define sd_disk_table ssd_disk_table 173 #define sd_disk_table_size ssd_disk_table_size 174 #define sd_sense_mutex ssd_sense_mutex 175 #define sd_cdbtab ssd_cdbtab 176 177 #define sd_cb_ops ssd_cb_ops 178 #define sd_ops ssd_ops 179 #define sd_additional_codes ssd_additional_codes 180 181 #define sd_minor_data ssd_minor_data 182 #define sd_minor_data_efi ssd_minor_data_efi 183 184 #define sd_tq ssd_tq 185 #define sd_wmr_tq ssd_wmr_tq 186 #define sd_taskq_name ssd_taskq_name 187 #define sd_wmr_taskq_name ssd_wmr_taskq_name 188 #define sd_taskq_minalloc ssd_taskq_minalloc 189 #define sd_taskq_maxalloc ssd_taskq_maxalloc 190 191 #define sd_dump_format_string ssd_dump_format_string 192 193 #define sd_iostart_chain ssd_iostart_chain 194 #define sd_iodone_chain ssd_iodone_chain 195 196 #define sd_pm_idletime ssd_pm_idletime 197 198 #define sd_force_pm_supported ssd_force_pm_supported 199 200 #define sd_dtype_optical_bind ssd_dtype_optical_bind 201 #endif 202 203 204 #ifdef SDDEBUG 205 int sd_force_pm_supported = 0; 206 #endif /* SDDEBUG */ 207 208 void *sd_state = NULL; 209 int sd_io_time = SD_IO_TIME; 210 int sd_failfast_enable = 1; 211 int sd_ua_retry_count = SD_UA_RETRY_COUNT; 212 int sd_report_pfa = 1; 213 int sd_max_throttle = SD_MAX_THROTTLE; 214 int sd_min_throttle = SD_MIN_THROTTLE; 215 int sd_rot_delay = 4; /* Default 4ms Rotation delay */ 216 217 int sd_retry_on_reservation_conflict = 1; 218 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 219 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay)) 220 221 static int sd_dtype_optical_bind = -1; 222 223 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */ 224 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict"; 225 226 /* 227 * Global data for debug logging. To enable debug printing, sd_component_mask 228 * and sd_level_mask should be set to the desired bit patterns as outlined in 229 * sddef.h. 230 */ 231 uint_t sd_component_mask = 0x0; 232 uint_t sd_level_mask = 0x0; 233 struct sd_lun *sd_debug_un = NULL; 234 uint_t sd_error_level = SCSI_ERR_RETRYABLE; 235 236 /* Note: these may go away in the future... */ 237 static uint32_t sd_xbuf_active_limit = 512; 238 static uint32_t sd_xbuf_reserve_limit = 16; 239 240 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 }; 241 242 /* 243 * Timer value used to reset the throttle after it has been reduced 244 * (typically in response to TRAN_BUSY) 245 */ 246 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT; 247 248 /* 249 * Interval value associated with the media change scsi watch. 250 */ 251 static int sd_check_media_time = 3000000; 252 253 /* 254 * Wait value used for in progress operations during a DDI_SUSPEND 255 */ 256 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE; 257 258 /* 259 * sd_label_mutex protects a static buffer used in the disk label 260 * component of the driver 261 */ 262 static kmutex_t sd_label_mutex; 263 264 /* 265 * sd_detach_mutex protects un_layer_count, un_detach_count, and 266 * un_opens_in_progress in the sd_lun structure. 267 */ 268 static kmutex_t sd_detach_mutex; 269 270 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex, 271 sd_lun::{un_layer_count un_detach_count un_opens_in_progress})) 272 273 /* 274 * Global buffer and mutex for debug logging 275 */ 276 static char sd_log_buf[1024]; 277 static kmutex_t sd_log_mutex; 278 279 280 /* 281 * "Smart" Probe Caching structs, globals, #defines, etc. 282 * For parallel scsi and non-self-identify device only. 283 */ 284 285 /* 286 * The following resources and routines are implemented to support 287 * "smart" probing, which caches the scsi_probe() results in an array, 288 * in order to help avoid long probe times. 289 */ 290 struct sd_scsi_probe_cache { 291 struct sd_scsi_probe_cache *next; 292 dev_info_t *pdip; 293 int cache[NTARGETS_WIDE]; 294 }; 295 296 static kmutex_t sd_scsi_probe_cache_mutex; 297 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL; 298 299 /* 300 * Really we only need protection on the head of the linked list, but 301 * better safe than sorry. 302 */ 303 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 304 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip)) 305 306 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex, 307 sd_scsi_probe_cache_head)) 308 309 310 /* 311 * Vendor specific data name property declarations 312 */ 313 314 #if defined(__fibre) || defined(__i386) ||defined(__amd64) 315 316 static sd_tunables seagate_properties = { 317 SEAGATE_THROTTLE_VALUE, 318 0, 319 0, 320 0, 321 0, 322 0, 323 0, 324 0, 325 0 326 }; 327 328 static sd_tunables lsi_properties = { 329 0, 330 0, 331 LSI_NOTREADY_RETRIES, 332 0, 333 0, 334 0, 335 0, 336 0, 337 0 338 }; 339 340 static sd_tunables lsi_oem_properties = { 341 0, 342 0, 343 LSI_OEM_NOTREADY_RETRIES, 344 0, 345 0, 346 0, 347 0, 348 0, 349 0 350 }; 351 352 static sd_tunables fujitsu_properties = { 353 FUJITSU_THROTTLE_VALUE, 354 0, 355 0, 356 0, 357 0, 358 0, 359 0, 360 0, 361 0 362 }; 363 364 static sd_tunables ibm_properties = { 365 IBM_THROTTLE_VALUE, 366 0, 367 0, 368 0, 369 0, 370 0, 371 0, 372 0, 373 0 374 }; 375 376 static sd_tunables purple_properties = { 377 PURPLE_THROTTLE_VALUE, 378 0, 379 0, 380 PURPLE_BUSY_RETRIES, 381 PURPLE_RESET_RETRY_COUNT, 382 PURPLE_RESERVE_RELEASE_TIME, 383 0, 384 0, 385 0 386 }; 387 388 static sd_tunables sve_properties = { 389 SVE_THROTTLE_VALUE, 390 0, 391 0, 392 SVE_BUSY_RETRIES, 393 SVE_RESET_RETRY_COUNT, 394 SVE_RESERVE_RELEASE_TIME, 395 SVE_MIN_THROTTLE_VALUE, 396 SVE_DISKSORT_DISABLED_FLAG, 397 0 398 }; 399 400 static sd_tunables maserati_properties = { 401 0, 402 0, 403 0, 404 0, 405 0, 406 0, 407 0, 408 MASERATI_DISKSORT_DISABLED_FLAG, 409 MASERATI_LUN_RESET_ENABLED_FLAG 410 }; 411 412 static sd_tunables pirus_properties = { 413 PIRUS_THROTTLE_VALUE, 414 0, 415 PIRUS_NRR_COUNT, 416 PIRUS_BUSY_RETRIES, 417 PIRUS_RESET_RETRY_COUNT, 418 0, 419 PIRUS_MIN_THROTTLE_VALUE, 420 PIRUS_DISKSORT_DISABLED_FLAG, 421 PIRUS_LUN_RESET_ENABLED_FLAG 422 }; 423 424 #endif 425 #if (defined(__sparc) && !defined(__fibre)) || \ 426 (defined(__i386) || defined(__amd64)) 427 428 static sd_tunables lsi_properties_scsi = { 429 LSI_THROTTLE_VALUE, 430 0, 431 LSI_NOTREADY_RETRIES, 432 0, 433 0, 434 0, 435 0, 436 0, 437 0 438 }; 439 440 static sd_tunables elite_properties = { 441 ELITE_THROTTLE_VALUE, 442 0, 443 0, 444 0, 445 0, 446 0, 447 0, 448 0, 449 0 450 }; 451 452 static sd_tunables st31200n_properties = { 453 ST31200N_THROTTLE_VALUE, 454 0, 455 0, 456 0, 457 0, 458 0, 459 0, 460 0, 461 0 462 }; 463 464 #endif /* Fibre or not */ 465 466 static sd_tunables symbios_properties = { 467 SYMBIOS_THROTTLE_VALUE, 468 0, 469 SYMBIOS_NOTREADY_RETRIES, 470 0, 471 0, 472 0, 473 0, 474 0, 475 0 476 }; 477 478 479 480 481 #if (defined(SD_PROP_TST)) 482 483 #define SD_TST_CTYPE_VAL CTYPE_CDROM 484 #define SD_TST_THROTTLE_VAL 16 485 #define SD_TST_NOTREADY_VAL 12 486 #define SD_TST_BUSY_VAL 60 487 #define SD_TST_RST_RETRY_VAL 36 488 #define SD_TST_RSV_REL_TIME 60 489 490 static sd_tunables tst_properties = { 491 SD_TST_THROTTLE_VAL, 492 SD_TST_CTYPE_VAL, 493 SD_TST_NOTREADY_VAL, 494 SD_TST_BUSY_VAL, 495 SD_TST_RST_RETRY_VAL, 496 SD_TST_RSV_REL_TIME, 497 0, 498 0, 499 0 500 }; 501 #endif 502 503 /* This is similiar to the ANSI toupper implementation */ 504 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C)) 505 506 /* 507 * Static Driver Configuration Table 508 * 509 * This is the table of disks which need throttle adjustment (or, perhaps 510 * something else as defined by the flags at a future time.) device_id 511 * is a string consisting of concatenated vid (vendor), pid (product/model) 512 * and revision strings as defined in the scsi_inquiry structure. Offsets of 513 * the parts of the string are as defined by the sizes in the scsi_inquiry 514 * structure. Device type is searched as far as the device_id string is 515 * defined. Flags defines which values are to be set in the driver from the 516 * properties list. 517 * 518 * Entries below which begin and end with a "*" are a special case. 519 * These do not have a specific vendor, and the string which follows 520 * can appear anywhere in the 16 byte PID portion of the inquiry data. 521 * 522 * Entries below which begin and end with a " " (blank) are a special 523 * case. The comparison function will treat multiple consecutive blanks 524 * as equivalent to a single blank. For example, this causes a 525 * sd_disk_table entry of " NEC CDROM " to match a device's id string 526 * of "NEC CDROM". 527 * 528 * Note: The MD21 controller type has been obsoleted. 529 * ST318202F is a Legacy device 530 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been 531 * made with an FC connection. The entries here are a legacy. 532 */ 533 static sd_disk_config_t sd_disk_table[] = { 534 #if defined(__fibre) || defined(__i386) || defined(__amd64) 535 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 536 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 537 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 538 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties }, 539 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 540 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 541 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 542 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 543 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 544 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 545 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 546 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 547 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 548 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties }, 549 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 550 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 551 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 552 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 553 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 554 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 555 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 556 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 557 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties }, 558 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties }, 559 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties }, 560 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties }, 561 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties }, 562 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 563 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 564 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 565 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 566 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 567 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 568 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 569 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 570 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 571 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 572 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 573 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 574 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 575 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties }, 576 { "SUN T3", SD_CONF_BSET_THROTTLE | 577 SD_CONF_BSET_BSY_RETRY_COUNT| 578 SD_CONF_BSET_RST_RETRIES| 579 SD_CONF_BSET_RSV_REL_TIME, 580 &purple_properties }, 581 { "SUN SESS01", SD_CONF_BSET_THROTTLE | 582 SD_CONF_BSET_BSY_RETRY_COUNT| 583 SD_CONF_BSET_RST_RETRIES| 584 SD_CONF_BSET_RSV_REL_TIME| 585 SD_CONF_BSET_MIN_THROTTLE| 586 SD_CONF_BSET_DISKSORT_DISABLED, 587 &sve_properties }, 588 { "SUN T4", SD_CONF_BSET_THROTTLE | 589 SD_CONF_BSET_BSY_RETRY_COUNT| 590 SD_CONF_BSET_RST_RETRIES| 591 SD_CONF_BSET_RSV_REL_TIME, 592 &purple_properties }, 593 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED | 594 SD_CONF_BSET_LUN_RESET_ENABLED, 595 &maserati_properties }, 596 { "SUN SE6920", SD_CONF_BSET_THROTTLE | 597 SD_CONF_BSET_NRR_COUNT| 598 SD_CONF_BSET_BSY_RETRY_COUNT| 599 SD_CONF_BSET_RST_RETRIES| 600 SD_CONF_BSET_MIN_THROTTLE| 601 SD_CONF_BSET_DISKSORT_DISABLED| 602 SD_CONF_BSET_LUN_RESET_ENABLED, 603 &pirus_properties }, 604 { "SUN PSX1000", SD_CONF_BSET_THROTTLE | 605 SD_CONF_BSET_NRR_COUNT| 606 SD_CONF_BSET_BSY_RETRY_COUNT| 607 SD_CONF_BSET_RST_RETRIES| 608 SD_CONF_BSET_MIN_THROTTLE| 609 SD_CONF_BSET_DISKSORT_DISABLED| 610 SD_CONF_BSET_LUN_RESET_ENABLED, 611 &pirus_properties }, 612 { "SUN SE6330", SD_CONF_BSET_THROTTLE | 613 SD_CONF_BSET_NRR_COUNT| 614 SD_CONF_BSET_BSY_RETRY_COUNT| 615 SD_CONF_BSET_RST_RETRIES| 616 SD_CONF_BSET_MIN_THROTTLE| 617 SD_CONF_BSET_DISKSORT_DISABLED| 618 SD_CONF_BSET_LUN_RESET_ENABLED, 619 &pirus_properties }, 620 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 621 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 622 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 623 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties }, 624 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties }, 625 #endif /* fibre or NON-sparc platforms */ 626 #if ((defined(__sparc) && !defined(__fibre)) ||\ 627 (defined(__i386) || defined(__amd64))) 628 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties }, 629 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties }, 630 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL }, 631 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL }, 632 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL }, 633 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL }, 634 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL }, 635 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL }, 636 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL }, 637 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL }, 638 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL }, 639 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL }, 640 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT, 641 &symbios_properties }, 642 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT, 643 &lsi_properties_scsi }, 644 #if defined(__i386) || defined(__amd64) 645 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD 646 | SD_CONF_BSET_READSUB_BCD 647 | SD_CONF_BSET_READ_TOC_ADDR_BCD 648 | SD_CONF_BSET_NO_READ_HEADER 649 | SD_CONF_BSET_READ_CD_XD4), NULL }, 650 651 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD 652 | SD_CONF_BSET_READSUB_BCD 653 | SD_CONF_BSET_READ_TOC_ADDR_BCD 654 | SD_CONF_BSET_NO_READ_HEADER 655 | SD_CONF_BSET_READ_CD_XD4), NULL }, 656 #endif /* __i386 || __amd64 */ 657 #endif /* sparc NON-fibre or NON-sparc platforms */ 658 659 #if (defined(SD_PROP_TST)) 660 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE 661 | SD_CONF_BSET_CTYPE 662 | SD_CONF_BSET_NRR_COUNT 663 | SD_CONF_BSET_FAB_DEVID 664 | SD_CONF_BSET_NOCACHE 665 | SD_CONF_BSET_BSY_RETRY_COUNT 666 | SD_CONF_BSET_PLAYMSF_BCD 667 | SD_CONF_BSET_READSUB_BCD 668 | SD_CONF_BSET_READ_TOC_TRK_BCD 669 | SD_CONF_BSET_READ_TOC_ADDR_BCD 670 | SD_CONF_BSET_NO_READ_HEADER 671 | SD_CONF_BSET_READ_CD_XD4 672 | SD_CONF_BSET_RST_RETRIES 673 | SD_CONF_BSET_RSV_REL_TIME 674 | SD_CONF_BSET_TUR_CHECK), &tst_properties}, 675 #endif 676 }; 677 678 static const int sd_disk_table_size = 679 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t); 680 681 682 /* 683 * Return codes of sd_uselabel(). 684 */ 685 #define SD_LABEL_IS_VALID 0 686 #define SD_LABEL_IS_INVALID 1 687 688 #define SD_INTERCONNECT_PARALLEL 0 689 #define SD_INTERCONNECT_FABRIC 1 690 #define SD_INTERCONNECT_FIBRE 2 691 #define SD_INTERCONNECT_SSA 3 692 #define SD_IS_PARALLEL_SCSI(un) \ 693 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL) 694 695 /* 696 * Definitions used by device id registration routines 697 */ 698 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */ 699 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */ 700 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */ 701 #define WD_NODE 7 /* the whole disk minor */ 702 703 static kmutex_t sd_sense_mutex = {0}; 704 705 /* 706 * Macros for updates of the driver state 707 */ 708 #define New_state(un, s) \ 709 (un)->un_last_state = (un)->un_state, (un)->un_state = (s) 710 #define Restore_state(un) \ 711 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); } 712 713 static struct sd_cdbinfo sd_cdbtab[] = { 714 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, }, 715 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, }, 716 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, }, 717 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, }, 718 }; 719 720 /* 721 * Specifies the number of seconds that must have elapsed since the last 722 * cmd. has completed for a device to be declared idle to the PM framework. 723 */ 724 static int sd_pm_idletime = 1; 725 726 /* 727 * Internal function prototypes 728 */ 729 730 #if (defined(__fibre)) 731 /* 732 * These #defines are to avoid namespace collisions that occur because this 733 * code is currently used to compile two seperate driver modules: sd and ssd. 734 * All function names need to be treated this way (even if declared static) 735 * in order to allow the debugger to resolve the names properly. 736 * It is anticipated that in the near future the ssd module will be obsoleted, 737 * at which time this ugliness should go away. 738 */ 739 #define sd_log_trace ssd_log_trace 740 #define sd_log_info ssd_log_info 741 #define sd_log_err ssd_log_err 742 #define sdprobe ssdprobe 743 #define sdinfo ssdinfo 744 #define sd_prop_op ssd_prop_op 745 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init 746 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini 747 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache 748 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache 749 #define sd_spin_up_unit ssd_spin_up_unit 750 #define sd_enable_descr_sense ssd_enable_descr_sense 751 #define sd_set_mmc_caps ssd_set_mmc_caps 752 #define sd_read_unit_properties ssd_read_unit_properties 753 #define sd_process_sdconf_file ssd_process_sdconf_file 754 #define sd_process_sdconf_table ssd_process_sdconf_table 755 #define sd_sdconf_id_match ssd_sdconf_id_match 756 #define sd_blank_cmp ssd_blank_cmp 757 #define sd_chk_vers1_data ssd_chk_vers1_data 758 #define sd_set_vers1_properties ssd_set_vers1_properties 759 #define sd_validate_geometry ssd_validate_geometry 760 761 #if defined(_SUNOS_VTOC_16) 762 #define sd_convert_geometry ssd_convert_geometry 763 #endif 764 765 #define sd_resync_geom_caches ssd_resync_geom_caches 766 #define sd_read_fdisk ssd_read_fdisk 767 #define sd_get_physical_geometry ssd_get_physical_geometry 768 #define sd_get_virtual_geometry ssd_get_virtual_geometry 769 #define sd_update_block_info ssd_update_block_info 770 #define sd_swap_efi_gpt ssd_swap_efi_gpt 771 #define sd_swap_efi_gpe ssd_swap_efi_gpe 772 #define sd_validate_efi ssd_validate_efi 773 #define sd_use_efi ssd_use_efi 774 #define sd_uselabel ssd_uselabel 775 #define sd_build_default_label ssd_build_default_label 776 #define sd_has_max_chs_vals ssd_has_max_chs_vals 777 #define sd_inq_fill ssd_inq_fill 778 #define sd_register_devid ssd_register_devid 779 #define sd_get_devid_block ssd_get_devid_block 780 #define sd_get_devid ssd_get_devid 781 #define sd_create_devid ssd_create_devid 782 #define sd_write_deviceid ssd_write_deviceid 783 #define sd_check_vpd_page_support ssd_check_vpd_page_support 784 #define sd_setup_pm ssd_setup_pm 785 #define sd_create_pm_components ssd_create_pm_components 786 #define sd_ddi_suspend ssd_ddi_suspend 787 #define sd_ddi_pm_suspend ssd_ddi_pm_suspend 788 #define sd_ddi_resume ssd_ddi_resume 789 #define sd_ddi_pm_resume ssd_ddi_pm_resume 790 #define sdpower ssdpower 791 #define sdattach ssdattach 792 #define sddetach ssddetach 793 #define sd_unit_attach ssd_unit_attach 794 #define sd_unit_detach ssd_unit_detach 795 #define sd_create_minor_nodes ssd_create_minor_nodes 796 #define sd_create_errstats ssd_create_errstats 797 #define sd_set_errstats ssd_set_errstats 798 #define sd_set_pstats ssd_set_pstats 799 #define sddump ssddump 800 #define sd_scsi_poll ssd_scsi_poll 801 #define sd_send_polled_RQS ssd_send_polled_RQS 802 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll 803 #define sd_init_event_callbacks ssd_init_event_callbacks 804 #define sd_event_callback ssd_event_callback 805 #define sd_disable_caching ssd_disable_caching 806 #define sd_make_device ssd_make_device 807 #define sdopen ssdopen 808 #define sdclose ssdclose 809 #define sd_ready_and_valid ssd_ready_and_valid 810 #define sdmin ssdmin 811 #define sdread ssdread 812 #define sdwrite ssdwrite 813 #define sdaread ssdaread 814 #define sdawrite ssdawrite 815 #define sdstrategy ssdstrategy 816 #define sdioctl ssdioctl 817 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart 818 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart 819 #define sd_checksum_iostart ssd_checksum_iostart 820 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart 821 #define sd_pm_iostart ssd_pm_iostart 822 #define sd_core_iostart ssd_core_iostart 823 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone 824 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone 825 #define sd_checksum_iodone ssd_checksum_iodone 826 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone 827 #define sd_pm_iodone ssd_pm_iodone 828 #define sd_initpkt_for_buf ssd_initpkt_for_buf 829 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf 830 #define sd_setup_rw_pkt ssd_setup_rw_pkt 831 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt 832 #define sd_buf_iodone ssd_buf_iodone 833 #define sd_uscsi_strategy ssd_uscsi_strategy 834 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi 835 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi 836 #define sd_uscsi_iodone ssd_uscsi_iodone 837 #define sd_xbuf_strategy ssd_xbuf_strategy 838 #define sd_xbuf_init ssd_xbuf_init 839 #define sd_pm_entry ssd_pm_entry 840 #define sd_pm_exit ssd_pm_exit 841 842 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler 843 #define sd_pm_timeout_handler ssd_pm_timeout_handler 844 845 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq 846 #define sdintr ssdintr 847 #define sd_start_cmds ssd_start_cmds 848 #define sd_send_scsi_cmd ssd_send_scsi_cmd 849 #define sd_bioclone_alloc ssd_bioclone_alloc 850 #define sd_bioclone_free ssd_bioclone_free 851 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc 852 #define sd_shadow_buf_free ssd_shadow_buf_free 853 #define sd_print_transport_rejected_message \ 854 ssd_print_transport_rejected_message 855 #define sd_retry_command ssd_retry_command 856 #define sd_set_retry_bp ssd_set_retry_bp 857 #define sd_send_request_sense_command ssd_send_request_sense_command 858 #define sd_start_retry_command ssd_start_retry_command 859 #define sd_start_direct_priority_command \ 860 ssd_start_direct_priority_command 861 #define sd_return_failed_command ssd_return_failed_command 862 #define sd_return_failed_command_no_restart \ 863 ssd_return_failed_command_no_restart 864 #define sd_return_command ssd_return_command 865 #define sd_sync_with_callback ssd_sync_with_callback 866 #define sdrunout ssdrunout 867 #define sd_mark_rqs_busy ssd_mark_rqs_busy 868 #define sd_mark_rqs_idle ssd_mark_rqs_idle 869 #define sd_reduce_throttle ssd_reduce_throttle 870 #define sd_restore_throttle ssd_restore_throttle 871 #define sd_print_incomplete_msg ssd_print_incomplete_msg 872 #define sd_init_cdb_limits ssd_init_cdb_limits 873 #define sd_pkt_status_good ssd_pkt_status_good 874 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition 875 #define sd_pkt_status_busy ssd_pkt_status_busy 876 #define sd_pkt_status_reservation_conflict \ 877 ssd_pkt_status_reservation_conflict 878 #define sd_pkt_status_qfull ssd_pkt_status_qfull 879 #define sd_handle_request_sense ssd_handle_request_sense 880 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense 881 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg 882 #define sd_validate_sense_data ssd_validate_sense_data 883 #define sd_decode_sense ssd_decode_sense 884 #define sd_print_sense_msg ssd_print_sense_msg 885 #define sd_extract_sense_info_descr ssd_extract_sense_info_descr 886 #define sd_sense_key_no_sense ssd_sense_key_no_sense 887 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error 888 #define sd_sense_key_not_ready ssd_sense_key_not_ready 889 #define sd_sense_key_medium_or_hardware_error \ 890 ssd_sense_key_medium_or_hardware_error 891 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request 892 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention 893 #define sd_sense_key_fail_command ssd_sense_key_fail_command 894 #define sd_sense_key_blank_check ssd_sense_key_blank_check 895 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command 896 #define sd_sense_key_default ssd_sense_key_default 897 #define sd_print_retry_msg ssd_print_retry_msg 898 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg 899 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete 900 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err 901 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset 902 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted 903 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout 904 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free 905 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject 906 #define sd_pkt_reason_default ssd_pkt_reason_default 907 #define sd_reset_target ssd_reset_target 908 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback 909 #define sd_start_stop_unit_task ssd_start_stop_unit_task 910 #define sd_taskq_create ssd_taskq_create 911 #define sd_taskq_delete ssd_taskq_delete 912 #define sd_media_change_task ssd_media_change_task 913 #define sd_handle_mchange ssd_handle_mchange 914 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK 915 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY 916 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16 917 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION 918 #define sd_send_scsi_feature_GET_CONFIGURATION \ 919 sd_send_scsi_feature_GET_CONFIGURATION 920 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT 921 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY 922 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY 923 #define sd_send_scsi_PERSISTENT_RESERVE_IN \ 924 ssd_send_scsi_PERSISTENT_RESERVE_IN 925 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \ 926 ssd_send_scsi_PERSISTENT_RESERVE_OUT 927 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE 928 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE 929 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT 930 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR 931 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE 932 #define sd_alloc_rqs ssd_alloc_rqs 933 #define sd_free_rqs ssd_free_rqs 934 #define sd_dump_memory ssd_dump_memory 935 #define sd_uscsi_ioctl ssd_uscsi_ioctl 936 #define sd_get_media_info ssd_get_media_info 937 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info 938 #define sd_dkio_get_geometry ssd_dkio_get_geometry 939 #define sd_dkio_set_geometry ssd_dkio_set_geometry 940 #define sd_dkio_get_partition ssd_dkio_get_partition 941 #define sd_dkio_set_partition ssd_dkio_set_partition 942 #define sd_dkio_partition ssd_dkio_partition 943 #define sd_dkio_get_vtoc ssd_dkio_get_vtoc 944 #define sd_dkio_get_efi ssd_dkio_get_efi 945 #define sd_build_user_vtoc ssd_build_user_vtoc 946 #define sd_dkio_set_vtoc ssd_dkio_set_vtoc 947 #define sd_dkio_set_efi ssd_dkio_set_efi 948 #define sd_build_label_vtoc ssd_build_label_vtoc 949 #define sd_write_label ssd_write_label 950 #define sd_clear_vtoc ssd_clear_vtoc 951 #define sd_clear_efi ssd_clear_efi 952 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf 953 #define sd_setup_next_xfer ssd_setup_next_xfer 954 #define sd_dkio_get_temp ssd_dkio_get_temp 955 #define sd_dkio_get_mboot ssd_dkio_get_mboot 956 #define sd_dkio_set_mboot ssd_dkio_set_mboot 957 #define sd_setup_default_geometry ssd_setup_default_geometry 958 #define sd_update_fdisk_and_vtoc ssd_update_fdisk_and_vtoc 959 #define sd_check_mhd ssd_check_mhd 960 #define sd_mhd_watch_cb ssd_mhd_watch_cb 961 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete 962 #define sd_sname ssd_sname 963 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover 964 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread 965 #define sd_take_ownership ssd_take_ownership 966 #define sd_reserve_release ssd_reserve_release 967 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req 968 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb 969 #define sd_persistent_reservation_in_read_keys \ 970 ssd_persistent_reservation_in_read_keys 971 #define sd_persistent_reservation_in_read_resv \ 972 ssd_persistent_reservation_in_read_resv 973 #define sd_mhdioc_takeown ssd_mhdioc_takeown 974 #define sd_mhdioc_failfast ssd_mhdioc_failfast 975 #define sd_mhdioc_release ssd_mhdioc_release 976 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid 977 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys 978 #define sd_mhdioc_inresv ssd_mhdioc_inresv 979 #define sr_change_blkmode ssr_change_blkmode 980 #define sr_change_speed ssr_change_speed 981 #define sr_atapi_change_speed ssr_atapi_change_speed 982 #define sr_pause_resume ssr_pause_resume 983 #define sr_play_msf ssr_play_msf 984 #define sr_play_trkind ssr_play_trkind 985 #define sr_read_all_subcodes ssr_read_all_subcodes 986 #define sr_read_subchannel ssr_read_subchannel 987 #define sr_read_tocentry ssr_read_tocentry 988 #define sr_read_tochdr ssr_read_tochdr 989 #define sr_read_cdda ssr_read_cdda 990 #define sr_read_cdxa ssr_read_cdxa 991 #define sr_read_mode1 ssr_read_mode1 992 #define sr_read_mode2 ssr_read_mode2 993 #define sr_read_cd_mode2 ssr_read_cd_mode2 994 #define sr_sector_mode ssr_sector_mode 995 #define sr_eject ssr_eject 996 #define sr_ejected ssr_ejected 997 #define sr_check_wp ssr_check_wp 998 #define sd_check_media ssd_check_media 999 #define sd_media_watch_cb ssd_media_watch_cb 1000 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast 1001 #define sr_volume_ctrl ssr_volume_ctrl 1002 #define sr_read_sony_session_offset ssr_read_sony_session_offset 1003 #define sd_log_page_supported ssd_log_page_supported 1004 #define sd_check_for_writable_cd ssd_check_for_writable_cd 1005 #define sd_wm_cache_constructor ssd_wm_cache_constructor 1006 #define sd_wm_cache_destructor ssd_wm_cache_destructor 1007 #define sd_range_lock ssd_range_lock 1008 #define sd_get_range ssd_get_range 1009 #define sd_free_inlist_wmap ssd_free_inlist_wmap 1010 #define sd_range_unlock ssd_range_unlock 1011 #define sd_read_modify_write_task ssd_read_modify_write_task 1012 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw 1013 1014 #define sd_iostart_chain ssd_iostart_chain 1015 #define sd_iodone_chain ssd_iodone_chain 1016 #define sd_initpkt_map ssd_initpkt_map 1017 #define sd_destroypkt_map ssd_destroypkt_map 1018 #define sd_chain_type_map ssd_chain_type_map 1019 #define sd_chain_index_map ssd_chain_index_map 1020 1021 #define sd_failfast_flushctl ssd_failfast_flushctl 1022 #define sd_failfast_flushq ssd_failfast_flushq 1023 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback 1024 1025 #endif /* #if (defined(__fibre)) */ 1026 1027 1028 int _init(void); 1029 int _fini(void); 1030 int _info(struct modinfo *modinfop); 1031 1032 /*PRINTFLIKE3*/ 1033 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1034 /*PRINTFLIKE3*/ 1035 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1036 /*PRINTFLIKE3*/ 1037 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...); 1038 1039 static int sdprobe(dev_info_t *devi); 1040 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 1041 void **result); 1042 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1043 int mod_flags, char *name, caddr_t valuep, int *lengthp); 1044 1045 /* 1046 * Smart probe for parallel scsi 1047 */ 1048 static void sd_scsi_probe_cache_init(void); 1049 static void sd_scsi_probe_cache_fini(void); 1050 static void sd_scsi_clear_probe_cache(void); 1051 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)()); 1052 1053 static int sd_spin_up_unit(struct sd_lun *un); 1054 static void sd_enable_descr_sense(struct sd_lun *un); 1055 static void sd_set_mmc_caps(struct sd_lun *un); 1056 1057 static void sd_read_unit_properties(struct sd_lun *un); 1058 static int sd_process_sdconf_file(struct sd_lun *un); 1059 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags, 1060 int *data_list, sd_tunables *values); 1061 static void sd_process_sdconf_table(struct sd_lun *un); 1062 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen); 1063 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen); 1064 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 1065 int list_len, char *dataname_ptr); 1066 static void sd_set_vers1_properties(struct sd_lun *un, int flags, 1067 sd_tunables *prop_list); 1068 static int sd_validate_geometry(struct sd_lun *un, int path_flag); 1069 1070 #if defined(_SUNOS_VTOC_16) 1071 static void sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g); 1072 #endif 1073 1074 static void sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 1075 int path_flag); 1076 static int sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, 1077 int path_flag); 1078 static void sd_get_physical_geometry(struct sd_lun *un, 1079 struct geom_cache *pgeom_p, int capacity, int lbasize, int path_flag); 1080 static void sd_get_virtual_geometry(struct sd_lun *un, int capacity, 1081 int lbasize); 1082 static int sd_uselabel(struct sd_lun *un, struct dk_label *l, int path_flag); 1083 static void sd_swap_efi_gpt(efi_gpt_t *); 1084 static void sd_swap_efi_gpe(int nparts, efi_gpe_t *); 1085 static int sd_validate_efi(efi_gpt_t *); 1086 static int sd_use_efi(struct sd_lun *, int); 1087 static void sd_build_default_label(struct sd_lun *un); 1088 1089 #if defined(_FIRMWARE_NEEDS_FDISK) 1090 static int sd_has_max_chs_vals(struct ipart *fdp); 1091 #endif 1092 static void sd_inq_fill(char *p, int l, char *s); 1093 1094 1095 static void sd_register_devid(struct sd_lun *un, dev_info_t *devi, 1096 int reservation_flag); 1097 static daddr_t sd_get_devid_block(struct sd_lun *un); 1098 static int sd_get_devid(struct sd_lun *un); 1099 static int sd_get_serialnum(struct sd_lun *un, uchar_t *wwn, int *len); 1100 static ddi_devid_t sd_create_devid(struct sd_lun *un); 1101 static int sd_write_deviceid(struct sd_lun *un); 1102 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len); 1103 static int sd_check_vpd_page_support(struct sd_lun *un); 1104 1105 static void sd_setup_pm(struct sd_lun *un, dev_info_t *devi); 1106 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un); 1107 1108 static int sd_ddi_suspend(dev_info_t *devi); 1109 static int sd_ddi_pm_suspend(struct sd_lun *un); 1110 static int sd_ddi_resume(dev_info_t *devi); 1111 static int sd_ddi_pm_resume(struct sd_lun *un); 1112 static int sdpower(dev_info_t *devi, int component, int level); 1113 1114 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 1115 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 1116 static int sd_unit_attach(dev_info_t *devi); 1117 static int sd_unit_detach(dev_info_t *devi); 1118 1119 static int sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi); 1120 static void sd_create_errstats(struct sd_lun *un, int instance); 1121 static void sd_set_errstats(struct sd_lun *un); 1122 static void sd_set_pstats(struct sd_lun *un); 1123 1124 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 1125 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt); 1126 static int sd_send_polled_RQS(struct sd_lun *un); 1127 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt); 1128 1129 #if (defined(__fibre)) 1130 /* 1131 * Event callbacks (photon) 1132 */ 1133 static void sd_init_event_callbacks(struct sd_lun *un); 1134 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *); 1135 #endif 1136 1137 1138 static int sd_disable_caching(struct sd_lun *un); 1139 static dev_t sd_make_device(dev_info_t *devi); 1140 1141 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize, 1142 uint64_t capacity); 1143 1144 /* 1145 * Driver entry point functions. 1146 */ 1147 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 1148 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 1149 static int sd_ready_and_valid(struct sd_lun *un); 1150 1151 static void sdmin(struct buf *bp); 1152 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p); 1153 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 1154 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1155 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 1156 1157 static int sdstrategy(struct buf *bp); 1158 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1159 1160 /* 1161 * Function prototypes for layering functions in the iostart chain. 1162 */ 1163 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un, 1164 struct buf *bp); 1165 static void sd_mapblocksize_iostart(int index, struct sd_lun *un, 1166 struct buf *bp); 1167 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp); 1168 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un, 1169 struct buf *bp); 1170 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp); 1171 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp); 1172 1173 /* 1174 * Function prototypes for layering functions in the iodone chain. 1175 */ 1176 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp); 1177 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp); 1178 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un, 1179 struct buf *bp); 1180 static void sd_mapblocksize_iodone(int index, struct sd_lun *un, 1181 struct buf *bp); 1182 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp); 1183 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un, 1184 struct buf *bp); 1185 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp); 1186 1187 /* 1188 * Prototypes for functions to support buf(9S) based IO. 1189 */ 1190 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg); 1191 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **); 1192 static void sd_destroypkt_for_buf(struct buf *); 1193 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp, 1194 struct buf *bp, int flags, 1195 int (*callback)(caddr_t), caddr_t callback_arg, 1196 diskaddr_t lba, uint32_t blockcount); 1197 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp, 1198 struct buf *bp, diskaddr_t lba, uint32_t blockcount); 1199 1200 /* 1201 * Prototypes for functions to support USCSI IO. 1202 */ 1203 static int sd_uscsi_strategy(struct buf *bp); 1204 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **); 1205 static void sd_destroypkt_for_uscsi(struct buf *); 1206 1207 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 1208 uchar_t chain_type, void *pktinfop); 1209 1210 static int sd_pm_entry(struct sd_lun *un); 1211 static void sd_pm_exit(struct sd_lun *un); 1212 1213 static void sd_pm_idletimeout_handler(void *arg); 1214 1215 /* 1216 * sd_core internal functions (used at the sd_core_io layer). 1217 */ 1218 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp); 1219 static void sdintr(struct scsi_pkt *pktp); 1220 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp); 1221 1222 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 1223 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 1224 int path_flag); 1225 1226 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen, 1227 daddr_t blkno, int (*func)(struct buf *)); 1228 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen, 1229 uint_t bflags, daddr_t blkno, int (*func)(struct buf *)); 1230 static void sd_bioclone_free(struct buf *bp); 1231 static void sd_shadow_buf_free(struct buf *bp); 1232 1233 static void sd_print_transport_rejected_message(struct sd_lun *un, 1234 struct sd_xbuf *xp, int code); 1235 1236 static void sd_retry_command(struct sd_lun *un, struct buf *bp, 1237 int retry_check_flag, 1238 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, 1239 int c), 1240 void *user_arg, int failure_code, clock_t retry_delay, 1241 void (*statp)(kstat_io_t *)); 1242 1243 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp, 1244 clock_t retry_delay, void (*statp)(kstat_io_t *)); 1245 1246 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 1247 struct scsi_pkt *pktp); 1248 static void sd_start_retry_command(void *arg); 1249 static void sd_start_direct_priority_command(void *arg); 1250 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp, 1251 int errcode); 1252 static void sd_return_failed_command_no_restart(struct sd_lun *un, 1253 struct buf *bp, int errcode); 1254 static void sd_return_command(struct sd_lun *un, struct buf *bp); 1255 static void sd_sync_with_callback(struct sd_lun *un); 1256 static int sdrunout(caddr_t arg); 1257 1258 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp); 1259 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp); 1260 1261 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type); 1262 static void sd_restore_throttle(void *arg); 1263 1264 static void sd_init_cdb_limits(struct sd_lun *un); 1265 1266 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 1267 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1268 1269 /* 1270 * Error handling functions 1271 */ 1272 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 1273 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1274 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, 1275 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1276 static void sd_pkt_status_reservation_conflict(struct sd_lun *un, 1277 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1278 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 1279 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1280 1281 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp, 1282 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1283 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 1284 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1285 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp, 1286 struct sd_xbuf *xp); 1287 static void sd_decode_sense(struct sd_lun *un, struct buf *bp, 1288 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1289 1290 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp, 1291 void *arg, int code); 1292 static diskaddr_t sd_extract_sense_info_descr( 1293 struct scsi_descr_sense_hdr *sdsp); 1294 1295 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 1296 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1297 static void sd_sense_key_recoverable_error(struct sd_lun *un, 1298 uint8_t asc, 1299 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1300 static void sd_sense_key_not_ready(struct sd_lun *un, 1301 uint8_t asc, uint8_t ascq, 1302 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1303 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 1304 int sense_key, uint8_t asc, 1305 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1306 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 1307 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1308 static void sd_sense_key_unit_attention(struct sd_lun *un, 1309 uint8_t asc, 1310 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1311 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 1312 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1313 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 1314 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1315 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 1316 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1317 static void sd_sense_key_default(struct sd_lun *un, 1318 int sense_key, 1319 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp); 1320 1321 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp, 1322 void *arg, int flag); 1323 1324 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 1325 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1326 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 1327 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1328 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 1329 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1330 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 1331 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1332 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 1333 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1334 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 1335 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1336 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 1337 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1338 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 1339 struct sd_xbuf *xp, struct scsi_pkt *pktp); 1340 1341 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp); 1342 1343 static void sd_start_stop_unit_callback(void *arg); 1344 static void sd_start_stop_unit_task(void *arg); 1345 1346 static void sd_taskq_create(void); 1347 static void sd_taskq_delete(void); 1348 static void sd_media_change_task(void *arg); 1349 1350 static int sd_handle_mchange(struct sd_lun *un); 1351 static int sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag); 1352 static int sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, 1353 uint32_t *lbap, int path_flag); 1354 static int sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 1355 uint32_t *lbap, int path_flag); 1356 static int sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, 1357 int path_flag); 1358 static int sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, 1359 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp); 1360 static int sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag); 1361 static int sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, 1362 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp); 1363 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, 1364 uchar_t usr_cmd, uchar_t *usr_bufp); 1365 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un); 1366 static int sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, 1367 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1368 uchar_t *bufaddr, uint_t buflen); 1369 static int sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 1370 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 1371 uchar_t *bufaddr, uint_t buflen, char feature); 1372 static int sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, 1373 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag); 1374 static int sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, 1375 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag); 1376 static int sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 1377 size_t buflen, daddr_t start_block, int path_flag); 1378 #define sd_send_scsi_READ(un, bufaddr, buflen, start_block, path_flag) \ 1379 sd_send_scsi_RDWR(un, SCMD_READ, bufaddr, buflen, start_block, \ 1380 path_flag) 1381 #define sd_send_scsi_WRITE(un, bufaddr, buflen, start_block, path_flag) \ 1382 sd_send_scsi_RDWR(un, SCMD_WRITE, bufaddr, buflen, start_block,\ 1383 path_flag) 1384 1385 static int sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, 1386 uint16_t buflen, uchar_t page_code, uchar_t page_control, 1387 uint16_t param_ptr, int path_flag); 1388 1389 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un); 1390 static void sd_free_rqs(struct sd_lun *un); 1391 1392 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, 1393 uchar_t *data, int len, int fmt); 1394 1395 /* 1396 * Disk Ioctl Function Prototypes 1397 */ 1398 static int sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag); 1399 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag); 1400 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag); 1401 static int sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, 1402 int geom_validated); 1403 static int sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag); 1404 static int sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, 1405 int geom_validated); 1406 static int sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag); 1407 static int sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, 1408 int geom_validated); 1409 static int sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag); 1410 static int sd_dkio_partition(dev_t dev, caddr_t arg, int flag); 1411 static void sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1412 static int sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag); 1413 static int sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag); 1414 static int sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc); 1415 static int sd_write_label(dev_t dev); 1416 static int sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl); 1417 static void sd_clear_vtoc(struct sd_lun *un); 1418 static void sd_clear_efi(struct sd_lun *un); 1419 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag); 1420 static int sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag); 1421 static int sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag); 1422 static void sd_setup_default_geometry(struct sd_lun *un); 1423 #if defined(__i386) || defined(__amd64) 1424 static int sd_update_fdisk_and_vtoc(struct sd_lun *un); 1425 #endif 1426 1427 /* 1428 * Multi-host Ioctl Prototypes 1429 */ 1430 static int sd_check_mhd(dev_t dev, int interval); 1431 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1432 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt); 1433 static char *sd_sname(uchar_t status); 1434 static void sd_mhd_resvd_recover(void *arg); 1435 static void sd_resv_reclaim_thread(); 1436 static int sd_take_ownership(dev_t dev, struct mhioctkown *p); 1437 static int sd_reserve_release(dev_t dev, int cmd); 1438 static void sd_rmv_resv_reclaim_req(dev_t dev); 1439 static void sd_mhd_reset_notify_cb(caddr_t arg); 1440 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un, 1441 mhioc_inkeys_t *usrp, int flag); 1442 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un, 1443 mhioc_inresvs_t *usrp, int flag); 1444 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag); 1445 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag); 1446 static int sd_mhdioc_release(dev_t dev); 1447 static int sd_mhdioc_register_devid(dev_t dev); 1448 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag); 1449 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag); 1450 1451 /* 1452 * SCSI removable prototypes 1453 */ 1454 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag); 1455 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1456 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag); 1457 static int sr_pause_resume(dev_t dev, int mode); 1458 static int sr_play_msf(dev_t dev, caddr_t data, int flag); 1459 static int sr_play_trkind(dev_t dev, caddr_t data, int flag); 1460 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag); 1461 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag); 1462 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag); 1463 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag); 1464 static int sr_read_cdda(dev_t dev, caddr_t data, int flag); 1465 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag); 1466 static int sr_read_mode1(dev_t dev, caddr_t data, int flag); 1467 static int sr_read_mode2(dev_t dev, caddr_t data, int flag); 1468 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag); 1469 static int sr_sector_mode(dev_t dev, uint32_t blksize); 1470 static int sr_eject(dev_t dev); 1471 static void sr_ejected(register struct sd_lun *un); 1472 static int sr_check_wp(dev_t dev); 1473 static int sd_check_media(dev_t dev, enum dkio_state state); 1474 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp); 1475 static void sd_delayed_cv_broadcast(void *arg); 1476 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag); 1477 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag); 1478 1479 static int sd_log_page_supported(struct sd_lun *un, int log_page); 1480 1481 /* 1482 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions. 1483 */ 1484 static void sd_check_for_writable_cd(struct sd_lun *un); 1485 static int sd_wm_cache_constructor(void *wm, void *un, int flags); 1486 static void sd_wm_cache_destructor(void *wm, void *un); 1487 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb, 1488 daddr_t endb, ushort_t typ); 1489 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb, 1490 daddr_t endb); 1491 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp); 1492 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm); 1493 static void sd_read_modify_write_task(void * arg); 1494 static int 1495 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 1496 struct buf **bpp); 1497 1498 1499 /* 1500 * Function prototypes for failfast support. 1501 */ 1502 static void sd_failfast_flushq(struct sd_lun *un); 1503 static int sd_failfast_flushq_callback(struct buf *bp); 1504 1505 /* 1506 * Function prototypes for x86 support 1507 */ 1508 #if defined(__i386) || defined(__amd64) 1509 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 1510 struct scsi_pkt *pkt, struct sd_xbuf *xp); 1511 #endif 1512 1513 /* 1514 * Constants for failfast support: 1515 * 1516 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO 1517 * failfast processing being performed. 1518 * 1519 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing 1520 * failfast processing on all bufs with B_FAILFAST set. 1521 */ 1522 1523 #define SD_FAILFAST_INACTIVE 0 1524 #define SD_FAILFAST_ACTIVE 1 1525 1526 /* 1527 * Bitmask to control behavior of buf(9S) flushes when a transition to 1528 * the failfast state occurs. Optional bits include: 1529 * 1530 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that 1531 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will 1532 * be flushed. 1533 * 1534 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the 1535 * driver, in addition to the regular wait queue. This includes the xbuf 1536 * queues. When clear, only the driver's wait queue will be flushed. 1537 */ 1538 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01 1539 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02 1540 1541 /* 1542 * The default behavior is to only flush bufs that have B_FAILFAST set, but 1543 * to flush all queues within the driver. 1544 */ 1545 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES; 1546 1547 1548 /* 1549 * SD Testing Fault Injection 1550 */ 1551 #ifdef SD_FAULT_INJECTION 1552 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un); 1553 static void sd_faultinjection(struct scsi_pkt *pktp); 1554 static void sd_injection_log(char *buf, struct sd_lun *un); 1555 #endif 1556 1557 /* 1558 * Device driver ops vector 1559 */ 1560 static struct cb_ops sd_cb_ops = { 1561 sdopen, /* open */ 1562 sdclose, /* close */ 1563 sdstrategy, /* strategy */ 1564 nodev, /* print */ 1565 sddump, /* dump */ 1566 sdread, /* read */ 1567 sdwrite, /* write */ 1568 sdioctl, /* ioctl */ 1569 nodev, /* devmap */ 1570 nodev, /* mmap */ 1571 nodev, /* segmap */ 1572 nochpoll, /* poll */ 1573 sd_prop_op, /* cb_prop_op */ 1574 0, /* streamtab */ 1575 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */ 1576 CB_REV, /* cb_rev */ 1577 sdaread, /* async I/O read entry point */ 1578 sdawrite /* async I/O write entry point */ 1579 }; 1580 1581 static struct dev_ops sd_ops = { 1582 DEVO_REV, /* devo_rev, */ 1583 0, /* refcnt */ 1584 sdinfo, /* info */ 1585 nulldev, /* identify */ 1586 sdprobe, /* probe */ 1587 sdattach, /* attach */ 1588 sddetach, /* detach */ 1589 nodev, /* reset */ 1590 &sd_cb_ops, /* driver operations */ 1591 NULL, /* bus operations */ 1592 sdpower /* power */ 1593 }; 1594 1595 1596 /* 1597 * This is the loadable module wrapper. 1598 */ 1599 #include <sys/modctl.h> 1600 1601 static struct modldrv modldrv = { 1602 &mod_driverops, /* Type of module. This one is a driver */ 1603 SD_MODULE_NAME, /* Module name. */ 1604 &sd_ops /* driver ops */ 1605 }; 1606 1607 1608 static struct modlinkage modlinkage = { 1609 MODREV_1, 1610 &modldrv, 1611 NULL 1612 }; 1613 1614 1615 static struct scsi_asq_key_strings sd_additional_codes[] = { 1616 0x81, 0, "Logical Unit is Reserved", 1617 0x85, 0, "Audio Address Not Valid", 1618 0xb6, 0, "Media Load Mechanism Failed", 1619 0xB9, 0, "Audio Play Operation Aborted", 1620 0xbf, 0, "Buffer Overflow for Read All Subcodes Command", 1621 0x53, 2, "Medium removal prevented", 1622 0x6f, 0, "Authentication failed during key exchange", 1623 0x6f, 1, "Key not present", 1624 0x6f, 2, "Key not established", 1625 0x6f, 3, "Read without proper authentication", 1626 0x6f, 4, "Mismatched region to this logical unit", 1627 0x6f, 5, "Region reset count error", 1628 0xffff, 0x0, NULL 1629 }; 1630 1631 1632 /* 1633 * Struct for passing printing information for sense data messages 1634 */ 1635 struct sd_sense_info { 1636 int ssi_severity; 1637 int ssi_pfa_flag; 1638 }; 1639 1640 /* 1641 * Table of function pointers for iostart-side routines. Seperate "chains" 1642 * of layered function calls are formed by placing the function pointers 1643 * sequentially in the desired order. Functions are called according to an 1644 * incrementing table index ordering. The last function in each chain must 1645 * be sd_core_iostart(). The corresponding iodone-side routines are expected 1646 * in the sd_iodone_chain[] array. 1647 * 1648 * Note: It may seem more natural to organize both the iostart and iodone 1649 * functions together, into an array of structures (or some similar 1650 * organization) with a common index, rather than two seperate arrays which 1651 * must be maintained in synchronization. The purpose of this division is 1652 * to achiece improved performance: individual arrays allows for more 1653 * effective cache line utilization on certain platforms. 1654 */ 1655 1656 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp); 1657 1658 1659 static sd_chain_t sd_iostart_chain[] = { 1660 1661 /* Chain for buf IO for disk drive targets (PM enabled) */ 1662 sd_mapblockaddr_iostart, /* Index: 0 */ 1663 sd_pm_iostart, /* Index: 1 */ 1664 sd_core_iostart, /* Index: 2 */ 1665 1666 /* Chain for buf IO for disk drive targets (PM disabled) */ 1667 sd_mapblockaddr_iostart, /* Index: 3 */ 1668 sd_core_iostart, /* Index: 4 */ 1669 1670 /* Chain for buf IO for removable-media targets (PM enabled) */ 1671 sd_mapblockaddr_iostart, /* Index: 5 */ 1672 sd_mapblocksize_iostart, /* Index: 6 */ 1673 sd_pm_iostart, /* Index: 7 */ 1674 sd_core_iostart, /* Index: 8 */ 1675 1676 /* Chain for buf IO for removable-media targets (PM disabled) */ 1677 sd_mapblockaddr_iostart, /* Index: 9 */ 1678 sd_mapblocksize_iostart, /* Index: 10 */ 1679 sd_core_iostart, /* Index: 11 */ 1680 1681 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1682 sd_mapblockaddr_iostart, /* Index: 12 */ 1683 sd_checksum_iostart, /* Index: 13 */ 1684 sd_pm_iostart, /* Index: 14 */ 1685 sd_core_iostart, /* Index: 15 */ 1686 1687 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1688 sd_mapblockaddr_iostart, /* Index: 16 */ 1689 sd_checksum_iostart, /* Index: 17 */ 1690 sd_core_iostart, /* Index: 18 */ 1691 1692 /* Chain for USCSI commands (all targets) */ 1693 sd_pm_iostart, /* Index: 19 */ 1694 sd_core_iostart, /* Index: 20 */ 1695 1696 /* Chain for checksumming USCSI commands (all targets) */ 1697 sd_checksum_uscsi_iostart, /* Index: 21 */ 1698 sd_pm_iostart, /* Index: 22 */ 1699 sd_core_iostart, /* Index: 23 */ 1700 1701 /* Chain for "direct" USCSI commands (all targets) */ 1702 sd_core_iostart, /* Index: 24 */ 1703 1704 /* Chain for "direct priority" USCSI commands (all targets) */ 1705 sd_core_iostart, /* Index: 25 */ 1706 }; 1707 1708 /* 1709 * Macros to locate the first function of each iostart chain in the 1710 * sd_iostart_chain[] array. These are located by the index in the array. 1711 */ 1712 #define SD_CHAIN_DISK_IOSTART 0 1713 #define SD_CHAIN_DISK_IOSTART_NO_PM 3 1714 #define SD_CHAIN_RMMEDIA_IOSTART 5 1715 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9 1716 #define SD_CHAIN_CHKSUM_IOSTART 12 1717 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16 1718 #define SD_CHAIN_USCSI_CMD_IOSTART 19 1719 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21 1720 #define SD_CHAIN_DIRECT_CMD_IOSTART 24 1721 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25 1722 1723 1724 /* 1725 * Table of function pointers for the iodone-side routines for the driver- 1726 * internal layering mechanism. The calling sequence for iodone routines 1727 * uses a decrementing table index, so the last routine called in a chain 1728 * must be at the lowest array index location for that chain. The last 1729 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs) 1730 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering 1731 * of the functions in an iodone side chain must correspond to the ordering 1732 * of the iostart routines for that chain. Note that there is no iodone 1733 * side routine that corresponds to sd_core_iostart(), so there is no 1734 * entry in the table for this. 1735 */ 1736 1737 static sd_chain_t sd_iodone_chain[] = { 1738 1739 /* Chain for buf IO for disk drive targets (PM enabled) */ 1740 sd_buf_iodone, /* Index: 0 */ 1741 sd_mapblockaddr_iodone, /* Index: 1 */ 1742 sd_pm_iodone, /* Index: 2 */ 1743 1744 /* Chain for buf IO for disk drive targets (PM disabled) */ 1745 sd_buf_iodone, /* Index: 3 */ 1746 sd_mapblockaddr_iodone, /* Index: 4 */ 1747 1748 /* Chain for buf IO for removable-media targets (PM enabled) */ 1749 sd_buf_iodone, /* Index: 5 */ 1750 sd_mapblockaddr_iodone, /* Index: 6 */ 1751 sd_mapblocksize_iodone, /* Index: 7 */ 1752 sd_pm_iodone, /* Index: 8 */ 1753 1754 /* Chain for buf IO for removable-media targets (PM disabled) */ 1755 sd_buf_iodone, /* Index: 9 */ 1756 sd_mapblockaddr_iodone, /* Index: 10 */ 1757 sd_mapblocksize_iodone, /* Index: 11 */ 1758 1759 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1760 sd_buf_iodone, /* Index: 12 */ 1761 sd_mapblockaddr_iodone, /* Index: 13 */ 1762 sd_checksum_iodone, /* Index: 14 */ 1763 sd_pm_iodone, /* Index: 15 */ 1764 1765 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1766 sd_buf_iodone, /* Index: 16 */ 1767 sd_mapblockaddr_iodone, /* Index: 17 */ 1768 sd_checksum_iodone, /* Index: 18 */ 1769 1770 /* Chain for USCSI commands (non-checksum targets) */ 1771 sd_uscsi_iodone, /* Index: 19 */ 1772 sd_pm_iodone, /* Index: 20 */ 1773 1774 /* Chain for USCSI commands (checksum targets) */ 1775 sd_uscsi_iodone, /* Index: 21 */ 1776 sd_checksum_uscsi_iodone, /* Index: 22 */ 1777 sd_pm_iodone, /* Index: 22 */ 1778 1779 /* Chain for "direct" USCSI commands (all targets) */ 1780 sd_uscsi_iodone, /* Index: 24 */ 1781 1782 /* Chain for "direct priority" USCSI commands (all targets) */ 1783 sd_uscsi_iodone, /* Index: 25 */ 1784 }; 1785 1786 1787 /* 1788 * Macros to locate the "first" function in the sd_iodone_chain[] array for 1789 * each iodone-side chain. These are located by the array index, but as the 1790 * iodone side functions are called in a decrementing-index order, the 1791 * highest index number in each chain must be specified (as these correspond 1792 * to the first function in the iodone chain that will be called by the core 1793 * at IO completion time). 1794 */ 1795 1796 #define SD_CHAIN_DISK_IODONE 2 1797 #define SD_CHAIN_DISK_IODONE_NO_PM 4 1798 #define SD_CHAIN_RMMEDIA_IODONE 8 1799 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11 1800 #define SD_CHAIN_CHKSUM_IODONE 15 1801 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18 1802 #define SD_CHAIN_USCSI_CMD_IODONE 20 1803 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22 1804 #define SD_CHAIN_DIRECT_CMD_IODONE 24 1805 #define SD_CHAIN_PRIORITY_CMD_IODONE 25 1806 1807 1808 1809 1810 /* 1811 * Array to map a layering chain index to the appropriate initpkt routine. 1812 * The redundant entries are present so that the index used for accessing 1813 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1814 * with this table as well. 1815 */ 1816 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **); 1817 1818 static sd_initpkt_t sd_initpkt_map[] = { 1819 1820 /* Chain for buf IO for disk drive targets (PM enabled) */ 1821 sd_initpkt_for_buf, /* Index: 0 */ 1822 sd_initpkt_for_buf, /* Index: 1 */ 1823 sd_initpkt_for_buf, /* Index: 2 */ 1824 1825 /* Chain for buf IO for disk drive targets (PM disabled) */ 1826 sd_initpkt_for_buf, /* Index: 3 */ 1827 sd_initpkt_for_buf, /* Index: 4 */ 1828 1829 /* Chain for buf IO for removable-media targets (PM enabled) */ 1830 sd_initpkt_for_buf, /* Index: 5 */ 1831 sd_initpkt_for_buf, /* Index: 6 */ 1832 sd_initpkt_for_buf, /* Index: 7 */ 1833 sd_initpkt_for_buf, /* Index: 8 */ 1834 1835 /* Chain for buf IO for removable-media targets (PM disabled) */ 1836 sd_initpkt_for_buf, /* Index: 9 */ 1837 sd_initpkt_for_buf, /* Index: 10 */ 1838 sd_initpkt_for_buf, /* Index: 11 */ 1839 1840 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1841 sd_initpkt_for_buf, /* Index: 12 */ 1842 sd_initpkt_for_buf, /* Index: 13 */ 1843 sd_initpkt_for_buf, /* Index: 14 */ 1844 sd_initpkt_for_buf, /* Index: 15 */ 1845 1846 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1847 sd_initpkt_for_buf, /* Index: 16 */ 1848 sd_initpkt_for_buf, /* Index: 17 */ 1849 sd_initpkt_for_buf, /* Index: 18 */ 1850 1851 /* Chain for USCSI commands (non-checksum targets) */ 1852 sd_initpkt_for_uscsi, /* Index: 19 */ 1853 sd_initpkt_for_uscsi, /* Index: 20 */ 1854 1855 /* Chain for USCSI commands (checksum targets) */ 1856 sd_initpkt_for_uscsi, /* Index: 21 */ 1857 sd_initpkt_for_uscsi, /* Index: 22 */ 1858 sd_initpkt_for_uscsi, /* Index: 22 */ 1859 1860 /* Chain for "direct" USCSI commands (all targets) */ 1861 sd_initpkt_for_uscsi, /* Index: 24 */ 1862 1863 /* Chain for "direct priority" USCSI commands (all targets) */ 1864 sd_initpkt_for_uscsi, /* Index: 25 */ 1865 1866 }; 1867 1868 1869 /* 1870 * Array to map a layering chain index to the appropriate destroypktpkt routine. 1871 * The redundant entries are present so that the index used for accessing 1872 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1873 * with this table as well. 1874 */ 1875 typedef void (*sd_destroypkt_t)(struct buf *); 1876 1877 static sd_destroypkt_t sd_destroypkt_map[] = { 1878 1879 /* Chain for buf IO for disk drive targets (PM enabled) */ 1880 sd_destroypkt_for_buf, /* Index: 0 */ 1881 sd_destroypkt_for_buf, /* Index: 1 */ 1882 sd_destroypkt_for_buf, /* Index: 2 */ 1883 1884 /* Chain for buf IO for disk drive targets (PM disabled) */ 1885 sd_destroypkt_for_buf, /* Index: 3 */ 1886 sd_destroypkt_for_buf, /* Index: 4 */ 1887 1888 /* Chain for buf IO for removable-media targets (PM enabled) */ 1889 sd_destroypkt_for_buf, /* Index: 5 */ 1890 sd_destroypkt_for_buf, /* Index: 6 */ 1891 sd_destroypkt_for_buf, /* Index: 7 */ 1892 sd_destroypkt_for_buf, /* Index: 8 */ 1893 1894 /* Chain for buf IO for removable-media targets (PM disabled) */ 1895 sd_destroypkt_for_buf, /* Index: 9 */ 1896 sd_destroypkt_for_buf, /* Index: 10 */ 1897 sd_destroypkt_for_buf, /* Index: 11 */ 1898 1899 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1900 sd_destroypkt_for_buf, /* Index: 12 */ 1901 sd_destroypkt_for_buf, /* Index: 13 */ 1902 sd_destroypkt_for_buf, /* Index: 14 */ 1903 sd_destroypkt_for_buf, /* Index: 15 */ 1904 1905 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1906 sd_destroypkt_for_buf, /* Index: 16 */ 1907 sd_destroypkt_for_buf, /* Index: 17 */ 1908 sd_destroypkt_for_buf, /* Index: 18 */ 1909 1910 /* Chain for USCSI commands (non-checksum targets) */ 1911 sd_destroypkt_for_uscsi, /* Index: 19 */ 1912 sd_destroypkt_for_uscsi, /* Index: 20 */ 1913 1914 /* Chain for USCSI commands (checksum targets) */ 1915 sd_destroypkt_for_uscsi, /* Index: 21 */ 1916 sd_destroypkt_for_uscsi, /* Index: 22 */ 1917 sd_destroypkt_for_uscsi, /* Index: 22 */ 1918 1919 /* Chain for "direct" USCSI commands (all targets) */ 1920 sd_destroypkt_for_uscsi, /* Index: 24 */ 1921 1922 /* Chain for "direct priority" USCSI commands (all targets) */ 1923 sd_destroypkt_for_uscsi, /* Index: 25 */ 1924 1925 }; 1926 1927 1928 1929 /* 1930 * Array to map a layering chain index to the appropriate chain "type". 1931 * The chain type indicates a specific property/usage of the chain. 1932 * The redundant entries are present so that the index used for accessing 1933 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly 1934 * with this table as well. 1935 */ 1936 1937 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */ 1938 #define SD_CHAIN_BUFIO 1 /* regular buf IO */ 1939 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */ 1940 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */ 1941 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */ 1942 /* (for error recovery) */ 1943 1944 static int sd_chain_type_map[] = { 1945 1946 /* Chain for buf IO for disk drive targets (PM enabled) */ 1947 SD_CHAIN_BUFIO, /* Index: 0 */ 1948 SD_CHAIN_BUFIO, /* Index: 1 */ 1949 SD_CHAIN_BUFIO, /* Index: 2 */ 1950 1951 /* Chain for buf IO for disk drive targets (PM disabled) */ 1952 SD_CHAIN_BUFIO, /* Index: 3 */ 1953 SD_CHAIN_BUFIO, /* Index: 4 */ 1954 1955 /* Chain for buf IO for removable-media targets (PM enabled) */ 1956 SD_CHAIN_BUFIO, /* Index: 5 */ 1957 SD_CHAIN_BUFIO, /* Index: 6 */ 1958 SD_CHAIN_BUFIO, /* Index: 7 */ 1959 SD_CHAIN_BUFIO, /* Index: 8 */ 1960 1961 /* Chain for buf IO for removable-media targets (PM disabled) */ 1962 SD_CHAIN_BUFIO, /* Index: 9 */ 1963 SD_CHAIN_BUFIO, /* Index: 10 */ 1964 SD_CHAIN_BUFIO, /* Index: 11 */ 1965 1966 /* Chain for buf IO for disk drives with checksumming (PM enabled) */ 1967 SD_CHAIN_BUFIO, /* Index: 12 */ 1968 SD_CHAIN_BUFIO, /* Index: 13 */ 1969 SD_CHAIN_BUFIO, /* Index: 14 */ 1970 SD_CHAIN_BUFIO, /* Index: 15 */ 1971 1972 /* Chain for buf IO for disk drives with checksumming (PM disabled) */ 1973 SD_CHAIN_BUFIO, /* Index: 16 */ 1974 SD_CHAIN_BUFIO, /* Index: 17 */ 1975 SD_CHAIN_BUFIO, /* Index: 18 */ 1976 1977 /* Chain for USCSI commands (non-checksum targets) */ 1978 SD_CHAIN_USCSI, /* Index: 19 */ 1979 SD_CHAIN_USCSI, /* Index: 20 */ 1980 1981 /* Chain for USCSI commands (checksum targets) */ 1982 SD_CHAIN_USCSI, /* Index: 21 */ 1983 SD_CHAIN_USCSI, /* Index: 22 */ 1984 SD_CHAIN_USCSI, /* Index: 22 */ 1985 1986 /* Chain for "direct" USCSI commands (all targets) */ 1987 SD_CHAIN_DIRECT, /* Index: 24 */ 1988 1989 /* Chain for "direct priority" USCSI commands (all targets) */ 1990 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */ 1991 }; 1992 1993 1994 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */ 1995 #define SD_IS_BUFIO(xp) \ 1996 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO) 1997 1998 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */ 1999 #define SD_IS_DIRECT_PRIORITY(xp) \ 2000 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY) 2001 2002 2003 2004 /* 2005 * Struct, array, and macros to map a specific chain to the appropriate 2006 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays. 2007 * 2008 * The sd_chain_index_map[] array is used at attach time to set the various 2009 * un_xxx_chain type members of the sd_lun softstate to the specific layering 2010 * chain to be used with the instance. This allows different instances to use 2011 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart 2012 * and xb_chain_iodone index values in the sd_xbuf are initialized to these 2013 * values at sd_xbuf init time, this allows (1) layering chains may be changed 2014 * dynamically & without the use of locking; and (2) a layer may update the 2015 * xb_chain_io[start|done] member in a given xbuf with its current index value, 2016 * to allow for deferred processing of an IO within the same chain from a 2017 * different execution context. 2018 */ 2019 2020 struct sd_chain_index { 2021 int sci_iostart_index; 2022 int sci_iodone_index; 2023 }; 2024 2025 static struct sd_chain_index sd_chain_index_map[] = { 2026 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE }, 2027 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM }, 2028 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE }, 2029 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM }, 2030 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE }, 2031 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM }, 2032 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE }, 2033 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE }, 2034 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE }, 2035 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE }, 2036 }; 2037 2038 2039 /* 2040 * The following are indexes into the sd_chain_index_map[] array. 2041 */ 2042 2043 /* un->un_buf_chain_type must be set to one of these */ 2044 #define SD_CHAIN_INFO_DISK 0 2045 #define SD_CHAIN_INFO_DISK_NO_PM 1 2046 #define SD_CHAIN_INFO_RMMEDIA 2 2047 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3 2048 #define SD_CHAIN_INFO_CHKSUM 4 2049 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5 2050 2051 /* un->un_uscsi_chain_type must be set to one of these */ 2052 #define SD_CHAIN_INFO_USCSI_CMD 6 2053 /* USCSI with PM disabled is the same as DIRECT */ 2054 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8 2055 #define SD_CHAIN_INFO_USCSI_CHKSUM 7 2056 2057 /* un->un_direct_chain_type must be set to one of these */ 2058 #define SD_CHAIN_INFO_DIRECT_CMD 8 2059 2060 /* un->un_priority_chain_type must be set to one of these */ 2061 #define SD_CHAIN_INFO_PRIORITY_CMD 9 2062 2063 /* size for devid inquiries */ 2064 #define MAX_INQUIRY_SIZE 0xF0 2065 2066 /* 2067 * Macros used by functions to pass a given buf(9S) struct along to the 2068 * next function in the layering chain for further processing. 2069 * 2070 * In the following macros, passing more than three arguments to the called 2071 * routines causes the optimizer for the SPARC compiler to stop doing tail 2072 * call elimination which results in significant performance degradation. 2073 */ 2074 #define SD_BEGIN_IOSTART(index, un, bp) \ 2075 ((*(sd_iostart_chain[index]))(index, un, bp)) 2076 2077 #define SD_BEGIN_IODONE(index, un, bp) \ 2078 ((*(sd_iodone_chain[index]))(index, un, bp)) 2079 2080 #define SD_NEXT_IOSTART(index, un, bp) \ 2081 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp)) 2082 2083 #define SD_NEXT_IODONE(index, un, bp) \ 2084 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp)) 2085 2086 2087 /* 2088 * Function: _init 2089 * 2090 * Description: This is the driver _init(9E) entry point. 2091 * 2092 * Return Code: Returns the value from mod_install(9F) or 2093 * ddi_soft_state_init(9F) as appropriate. 2094 * 2095 * Context: Called when driver module loaded. 2096 */ 2097 2098 int 2099 _init(void) 2100 { 2101 int err; 2102 2103 /* establish driver name from module name */ 2104 sd_label = mod_modname(&modlinkage); 2105 2106 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun), 2107 SD_MAXUNIT); 2108 2109 if (err != 0) { 2110 return (err); 2111 } 2112 2113 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL); 2114 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL); 2115 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL); 2116 2117 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL); 2118 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL); 2119 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL); 2120 2121 /* 2122 * it's ok to init here even for fibre device 2123 */ 2124 sd_scsi_probe_cache_init(); 2125 2126 /* 2127 * Creating taskq before mod_install ensures that all callers (threads) 2128 * that enter the module after a successfull mod_install encounter 2129 * a valid taskq. 2130 */ 2131 sd_taskq_create(); 2132 2133 err = mod_install(&modlinkage); 2134 if (err != 0) { 2135 /* delete taskq if install fails */ 2136 sd_taskq_delete(); 2137 2138 mutex_destroy(&sd_detach_mutex); 2139 mutex_destroy(&sd_log_mutex); 2140 mutex_destroy(&sd_label_mutex); 2141 2142 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2143 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2144 cv_destroy(&sd_tr.srq_inprocess_cv); 2145 2146 sd_scsi_probe_cache_fini(); 2147 2148 ddi_soft_state_fini(&sd_state); 2149 return (err); 2150 } 2151 2152 return (err); 2153 } 2154 2155 2156 /* 2157 * Function: _fini 2158 * 2159 * Description: This is the driver _fini(9E) entry point. 2160 * 2161 * Return Code: Returns the value from mod_remove(9F) 2162 * 2163 * Context: Called when driver module is unloaded. 2164 */ 2165 2166 int 2167 _fini(void) 2168 { 2169 int err; 2170 2171 if ((err = mod_remove(&modlinkage)) != 0) { 2172 return (err); 2173 } 2174 2175 sd_taskq_delete(); 2176 2177 mutex_destroy(&sd_detach_mutex); 2178 mutex_destroy(&sd_log_mutex); 2179 mutex_destroy(&sd_label_mutex); 2180 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex); 2181 2182 sd_scsi_probe_cache_fini(); 2183 2184 cv_destroy(&sd_tr.srq_resv_reclaim_cv); 2185 cv_destroy(&sd_tr.srq_inprocess_cv); 2186 2187 ddi_soft_state_fini(&sd_state); 2188 2189 return (err); 2190 } 2191 2192 2193 /* 2194 * Function: _info 2195 * 2196 * Description: This is the driver _info(9E) entry point. 2197 * 2198 * Arguments: modinfop - pointer to the driver modinfo structure 2199 * 2200 * Return Code: Returns the value from mod_info(9F). 2201 * 2202 * Context: Kernel thread context 2203 */ 2204 2205 int 2206 _info(struct modinfo *modinfop) 2207 { 2208 return (mod_info(&modlinkage, modinfop)); 2209 } 2210 2211 2212 /* 2213 * The following routines implement the driver message logging facility. 2214 * They provide component- and level- based debug output filtering. 2215 * Output may also be restricted to messages for a single instance by 2216 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set 2217 * to NULL, then messages for all instances are printed. 2218 * 2219 * These routines have been cloned from each other due to the language 2220 * constraints of macros and variable argument list processing. 2221 */ 2222 2223 2224 /* 2225 * Function: sd_log_err 2226 * 2227 * Description: This routine is called by the SD_ERROR macro for debug 2228 * logging of error conditions. 2229 * 2230 * Arguments: comp - driver component being logged 2231 * dev - pointer to driver info structure 2232 * fmt - error string and format to be logged 2233 */ 2234 2235 static void 2236 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...) 2237 { 2238 va_list ap; 2239 dev_info_t *dev; 2240 2241 ASSERT(un != NULL); 2242 dev = SD_DEVINFO(un); 2243 ASSERT(dev != NULL); 2244 2245 /* 2246 * Filter messages based on the global component and level masks. 2247 * Also print if un matches the value of sd_debug_un, or if 2248 * sd_debug_un is set to NULL. 2249 */ 2250 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) && 2251 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2252 mutex_enter(&sd_log_mutex); 2253 va_start(ap, fmt); 2254 (void) vsprintf(sd_log_buf, fmt, ap); 2255 va_end(ap); 2256 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2257 mutex_exit(&sd_log_mutex); 2258 } 2259 #ifdef SD_FAULT_INJECTION 2260 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2261 if (un->sd_injection_mask & comp) { 2262 mutex_enter(&sd_log_mutex); 2263 va_start(ap, fmt); 2264 (void) vsprintf(sd_log_buf, fmt, ap); 2265 va_end(ap); 2266 sd_injection_log(sd_log_buf, un); 2267 mutex_exit(&sd_log_mutex); 2268 } 2269 #endif 2270 } 2271 2272 2273 /* 2274 * Function: sd_log_info 2275 * 2276 * Description: This routine is called by the SD_INFO macro for debug 2277 * logging of general purpose informational conditions. 2278 * 2279 * Arguments: comp - driver component being logged 2280 * dev - pointer to driver info structure 2281 * fmt - info string and format to be logged 2282 */ 2283 2284 static void 2285 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...) 2286 { 2287 va_list ap; 2288 dev_info_t *dev; 2289 2290 ASSERT(un != NULL); 2291 dev = SD_DEVINFO(un); 2292 ASSERT(dev != NULL); 2293 2294 /* 2295 * Filter messages based on the global component and level masks. 2296 * Also print if un matches the value of sd_debug_un, or if 2297 * sd_debug_un is set to NULL. 2298 */ 2299 if ((sd_component_mask & component) && 2300 (sd_level_mask & SD_LOGMASK_INFO) && 2301 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2302 mutex_enter(&sd_log_mutex); 2303 va_start(ap, fmt); 2304 (void) vsprintf(sd_log_buf, fmt, ap); 2305 va_end(ap); 2306 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2307 mutex_exit(&sd_log_mutex); 2308 } 2309 #ifdef SD_FAULT_INJECTION 2310 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2311 if (un->sd_injection_mask & component) { 2312 mutex_enter(&sd_log_mutex); 2313 va_start(ap, fmt); 2314 (void) vsprintf(sd_log_buf, fmt, ap); 2315 va_end(ap); 2316 sd_injection_log(sd_log_buf, un); 2317 mutex_exit(&sd_log_mutex); 2318 } 2319 #endif 2320 } 2321 2322 2323 /* 2324 * Function: sd_log_trace 2325 * 2326 * Description: This routine is called by the SD_TRACE macro for debug 2327 * logging of trace conditions (i.e. function entry/exit). 2328 * 2329 * Arguments: comp - driver component being logged 2330 * dev - pointer to driver info structure 2331 * fmt - trace string and format to be logged 2332 */ 2333 2334 static void 2335 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...) 2336 { 2337 va_list ap; 2338 dev_info_t *dev; 2339 2340 ASSERT(un != NULL); 2341 dev = SD_DEVINFO(un); 2342 ASSERT(dev != NULL); 2343 2344 /* 2345 * Filter messages based on the global component and level masks. 2346 * Also print if un matches the value of sd_debug_un, or if 2347 * sd_debug_un is set to NULL. 2348 */ 2349 if ((sd_component_mask & component) && 2350 (sd_level_mask & SD_LOGMASK_TRACE) && 2351 ((sd_debug_un == NULL) || (sd_debug_un == un))) { 2352 mutex_enter(&sd_log_mutex); 2353 va_start(ap, fmt); 2354 (void) vsprintf(sd_log_buf, fmt, ap); 2355 va_end(ap); 2356 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf); 2357 mutex_exit(&sd_log_mutex); 2358 } 2359 #ifdef SD_FAULT_INJECTION 2360 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask)); 2361 if (un->sd_injection_mask & component) { 2362 mutex_enter(&sd_log_mutex); 2363 va_start(ap, fmt); 2364 (void) vsprintf(sd_log_buf, fmt, ap); 2365 va_end(ap); 2366 sd_injection_log(sd_log_buf, un); 2367 mutex_exit(&sd_log_mutex); 2368 } 2369 #endif 2370 } 2371 2372 2373 /* 2374 * Function: sdprobe 2375 * 2376 * Description: This is the driver probe(9e) entry point function. 2377 * 2378 * Arguments: devi - opaque device info handle 2379 * 2380 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful. 2381 * DDI_PROBE_FAILURE: If the probe failed. 2382 * DDI_PROBE_PARTIAL: If the instance is not present now, 2383 * but may be present in the future. 2384 */ 2385 2386 static int 2387 sdprobe(dev_info_t *devi) 2388 { 2389 struct scsi_device *devp; 2390 int rval; 2391 int instance; 2392 2393 /* 2394 * if it wasn't for pln, sdprobe could actually be nulldev 2395 * in the "__fibre" case. 2396 */ 2397 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) { 2398 return (DDI_PROBE_DONTCARE); 2399 } 2400 2401 devp = ddi_get_driver_private(devi); 2402 2403 if (devp == NULL) { 2404 /* Ooops... nexus driver is mis-configured... */ 2405 return (DDI_PROBE_FAILURE); 2406 } 2407 2408 instance = ddi_get_instance(devi); 2409 2410 if (ddi_get_soft_state(sd_state, instance) != NULL) { 2411 return (DDI_PROBE_PARTIAL); 2412 } 2413 2414 /* 2415 * Call the SCSA utility probe routine to see if we actually 2416 * have a target at this SCSI nexus. 2417 */ 2418 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) { 2419 case SCSIPROBE_EXISTS: 2420 switch (devp->sd_inq->inq_dtype) { 2421 case DTYPE_DIRECT: 2422 rval = DDI_PROBE_SUCCESS; 2423 break; 2424 case DTYPE_RODIRECT: 2425 /* CDs etc. Can be removable media */ 2426 rval = DDI_PROBE_SUCCESS; 2427 break; 2428 case DTYPE_OPTICAL: 2429 /* 2430 * Rewritable optical driver HP115AA 2431 * Can also be removable media 2432 */ 2433 2434 /* 2435 * Do not attempt to bind to DTYPE_OPTICAL if 2436 * pre solaris 9 sparc sd behavior is required 2437 * 2438 * If first time through and sd_dtype_optical_bind 2439 * has not been set in /etc/system check properties 2440 */ 2441 2442 if (sd_dtype_optical_bind < 0) { 2443 sd_dtype_optical_bind = ddi_prop_get_int 2444 (DDI_DEV_T_ANY, devi, 0, 2445 "optical-device-bind", 1); 2446 } 2447 2448 if (sd_dtype_optical_bind == 0) { 2449 rval = DDI_PROBE_FAILURE; 2450 } else { 2451 rval = DDI_PROBE_SUCCESS; 2452 } 2453 break; 2454 2455 case DTYPE_NOTPRESENT: 2456 default: 2457 rval = DDI_PROBE_FAILURE; 2458 break; 2459 } 2460 break; 2461 default: 2462 rval = DDI_PROBE_PARTIAL; 2463 break; 2464 } 2465 2466 /* 2467 * This routine checks for resource allocation prior to freeing, 2468 * so it will take care of the "smart probing" case where a 2469 * scsi_probe() may or may not have been issued and will *not* 2470 * free previously-freed resources. 2471 */ 2472 scsi_unprobe(devp); 2473 return (rval); 2474 } 2475 2476 2477 /* 2478 * Function: sdinfo 2479 * 2480 * Description: This is the driver getinfo(9e) entry point function. 2481 * Given the device number, return the devinfo pointer from 2482 * the scsi_device structure or the instance number 2483 * associated with the dev_t. 2484 * 2485 * Arguments: dip - pointer to device info structure 2486 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO, 2487 * DDI_INFO_DEVT2INSTANCE) 2488 * arg - driver dev_t 2489 * resultp - user buffer for request response 2490 * 2491 * Return Code: DDI_SUCCESS 2492 * DDI_FAILURE 2493 */ 2494 /* ARGSUSED */ 2495 static int 2496 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2497 { 2498 struct sd_lun *un; 2499 dev_t dev; 2500 int instance; 2501 int error; 2502 2503 switch (infocmd) { 2504 case DDI_INFO_DEVT2DEVINFO: 2505 dev = (dev_t)arg; 2506 instance = SDUNIT(dev); 2507 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 2508 return (DDI_FAILURE); 2509 } 2510 *result = (void *) SD_DEVINFO(un); 2511 error = DDI_SUCCESS; 2512 break; 2513 case DDI_INFO_DEVT2INSTANCE: 2514 dev = (dev_t)arg; 2515 instance = SDUNIT(dev); 2516 *result = (void *)(uintptr_t)instance; 2517 error = DDI_SUCCESS; 2518 break; 2519 default: 2520 error = DDI_FAILURE; 2521 } 2522 return (error); 2523 } 2524 2525 /* 2526 * Function: sd_prop_op 2527 * 2528 * Description: This is the driver prop_op(9e) entry point function. 2529 * Return the number of blocks for the partition in question 2530 * or forward the request to the property facilities. 2531 * 2532 * Arguments: dev - device number 2533 * dip - pointer to device info structure 2534 * prop_op - property operator 2535 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent 2536 * name - pointer to property name 2537 * valuep - pointer or address of the user buffer 2538 * lengthp - property length 2539 * 2540 * Return Code: DDI_PROP_SUCCESS 2541 * DDI_PROP_NOT_FOUND 2542 * DDI_PROP_UNDEFINED 2543 * DDI_PROP_NO_MEMORY 2544 * DDI_PROP_BUF_TOO_SMALL 2545 */ 2546 2547 static int 2548 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 2549 char *name, caddr_t valuep, int *lengthp) 2550 { 2551 int instance = ddi_get_instance(dip); 2552 struct sd_lun *un; 2553 uint64_t nblocks64; 2554 2555 /* 2556 * Our dynamic properties are all device specific and size oriented. 2557 * Requests issued under conditions where size is valid are passed 2558 * to ddi_prop_op_nblocks with the size information, otherwise the 2559 * request is passed to ddi_prop_op. Size depends on valid geometry. 2560 */ 2561 un = ddi_get_soft_state(sd_state, instance); 2562 if ((dev == DDI_DEV_T_ANY) || (un == NULL) || 2563 (un->un_f_geometry_is_valid == FALSE)) { 2564 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 2565 name, valuep, lengthp)); 2566 } else { 2567 /* get nblocks value */ 2568 ASSERT(!mutex_owned(SD_MUTEX(un))); 2569 mutex_enter(SD_MUTEX(un)); 2570 nblocks64 = (ulong_t)un->un_map[SDPART(dev)].dkl_nblk; 2571 mutex_exit(SD_MUTEX(un)); 2572 2573 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 2574 name, valuep, lengthp, nblocks64)); 2575 } 2576 } 2577 2578 /* 2579 * The following functions are for smart probing: 2580 * sd_scsi_probe_cache_init() 2581 * sd_scsi_probe_cache_fini() 2582 * sd_scsi_clear_probe_cache() 2583 * sd_scsi_probe_with_cache() 2584 */ 2585 2586 /* 2587 * Function: sd_scsi_probe_cache_init 2588 * 2589 * Description: Initializes the probe response cache mutex and head pointer. 2590 * 2591 * Context: Kernel thread context 2592 */ 2593 2594 static void 2595 sd_scsi_probe_cache_init(void) 2596 { 2597 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL); 2598 sd_scsi_probe_cache_head = NULL; 2599 } 2600 2601 2602 /* 2603 * Function: sd_scsi_probe_cache_fini 2604 * 2605 * Description: Frees all resources associated with the probe response cache. 2606 * 2607 * Context: Kernel thread context 2608 */ 2609 2610 static void 2611 sd_scsi_probe_cache_fini(void) 2612 { 2613 struct sd_scsi_probe_cache *cp; 2614 struct sd_scsi_probe_cache *ncp; 2615 2616 /* Clean up our smart probing linked list */ 2617 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) { 2618 ncp = cp->next; 2619 kmem_free(cp, sizeof (struct sd_scsi_probe_cache)); 2620 } 2621 sd_scsi_probe_cache_head = NULL; 2622 mutex_destroy(&sd_scsi_probe_cache_mutex); 2623 } 2624 2625 2626 /* 2627 * Function: sd_scsi_clear_probe_cache 2628 * 2629 * Description: This routine clears the probe response cache. This is 2630 * done when open() returns ENXIO so that when deferred 2631 * attach is attempted (possibly after a device has been 2632 * turned on) we will retry the probe. Since we don't know 2633 * which target we failed to open, we just clear the 2634 * entire cache. 2635 * 2636 * Context: Kernel thread context 2637 */ 2638 2639 static void 2640 sd_scsi_clear_probe_cache(void) 2641 { 2642 struct sd_scsi_probe_cache *cp; 2643 int i; 2644 2645 mutex_enter(&sd_scsi_probe_cache_mutex); 2646 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2647 /* 2648 * Reset all entries to SCSIPROBE_EXISTS. This will 2649 * force probing to be performed the next time 2650 * sd_scsi_probe_with_cache is called. 2651 */ 2652 for (i = 0; i < NTARGETS_WIDE; i++) { 2653 cp->cache[i] = SCSIPROBE_EXISTS; 2654 } 2655 } 2656 mutex_exit(&sd_scsi_probe_cache_mutex); 2657 } 2658 2659 2660 /* 2661 * Function: sd_scsi_probe_with_cache 2662 * 2663 * Description: This routine implements support for a scsi device probe 2664 * with cache. The driver maintains a cache of the target 2665 * responses to scsi probes. If we get no response from a 2666 * target during a probe inquiry, we remember that, and we 2667 * avoid additional calls to scsi_probe on non-zero LUNs 2668 * on the same target until the cache is cleared. By doing 2669 * so we avoid the 1/4 sec selection timeout for nonzero 2670 * LUNs. lun0 of a target is always probed. 2671 * 2672 * Arguments: devp - Pointer to a scsi_device(9S) structure 2673 * waitfunc - indicates what the allocator routines should 2674 * do when resources are not available. This value 2675 * is passed on to scsi_probe() when that routine 2676 * is called. 2677 * 2678 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache; 2679 * otherwise the value returned by scsi_probe(9F). 2680 * 2681 * Context: Kernel thread context 2682 */ 2683 2684 static int 2685 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)()) 2686 { 2687 struct sd_scsi_probe_cache *cp; 2688 dev_info_t *pdip = ddi_get_parent(devp->sd_dev); 2689 int lun, tgt; 2690 2691 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2692 SCSI_ADDR_PROP_LUN, 0); 2693 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS, 2694 SCSI_ADDR_PROP_TARGET, -1); 2695 2696 /* Make sure caching enabled and target in range */ 2697 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) { 2698 /* do it the old way (no cache) */ 2699 return (scsi_probe(devp, waitfn)); 2700 } 2701 2702 mutex_enter(&sd_scsi_probe_cache_mutex); 2703 2704 /* Find the cache for this scsi bus instance */ 2705 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) { 2706 if (cp->pdip == pdip) { 2707 break; 2708 } 2709 } 2710 2711 /* If we can't find a cache for this pdip, create one */ 2712 if (cp == NULL) { 2713 int i; 2714 2715 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache), 2716 KM_SLEEP); 2717 cp->pdip = pdip; 2718 cp->next = sd_scsi_probe_cache_head; 2719 sd_scsi_probe_cache_head = cp; 2720 for (i = 0; i < NTARGETS_WIDE; i++) { 2721 cp->cache[i] = SCSIPROBE_EXISTS; 2722 } 2723 } 2724 2725 mutex_exit(&sd_scsi_probe_cache_mutex); 2726 2727 /* Recompute the cache for this target if LUN zero */ 2728 if (lun == 0) { 2729 cp->cache[tgt] = SCSIPROBE_EXISTS; 2730 } 2731 2732 /* Don't probe if cache remembers a NORESP from a previous LUN. */ 2733 if (cp->cache[tgt] != SCSIPROBE_EXISTS) { 2734 return (SCSIPROBE_NORESP); 2735 } 2736 2737 /* Do the actual probe; save & return the result */ 2738 return (cp->cache[tgt] = scsi_probe(devp, waitfn)); 2739 } 2740 2741 2742 /* 2743 * Function: sd_spin_up_unit 2744 * 2745 * Description: Issues the following commands to spin-up the device: 2746 * START STOP UNIT, and INQUIRY. 2747 * 2748 * Arguments: un - driver soft state (unit) structure 2749 * 2750 * Return Code: 0 - success 2751 * EIO - failure 2752 * EACCES - reservation conflict 2753 * 2754 * Context: Kernel thread context 2755 */ 2756 2757 static int 2758 sd_spin_up_unit(struct sd_lun *un) 2759 { 2760 size_t resid = 0; 2761 int has_conflict = FALSE; 2762 uchar_t *bufaddr; 2763 2764 ASSERT(un != NULL); 2765 2766 /* 2767 * Send a throwaway START UNIT command. 2768 * 2769 * If we fail on this, we don't care presently what precisely 2770 * is wrong. EMC's arrays will also fail this with a check 2771 * condition (0x2/0x4/0x3) if the device is "inactive," but 2772 * we don't want to fail the attach because it may become 2773 * "active" later. 2774 */ 2775 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, SD_PATH_DIRECT) 2776 == EACCES) 2777 has_conflict = TRUE; 2778 2779 /* 2780 * Send another INQUIRY command to the target. This is necessary for 2781 * non-removable media direct access devices because their INQUIRY data 2782 * may not be fully qualified until they are spun up (perhaps via the 2783 * START command above). Note: This seems to be needed for some 2784 * legacy devices only.) The INQUIRY command should succeed even if a 2785 * Reservation Conflict is present. 2786 */ 2787 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP); 2788 if (sd_send_scsi_INQUIRY(un, bufaddr, SUN_INQSIZE, 0, 0, &resid) != 0) { 2789 kmem_free(bufaddr, SUN_INQSIZE); 2790 return (EIO); 2791 } 2792 2793 /* 2794 * If we got enough INQUIRY data, copy it over the old INQUIRY data. 2795 * Note that this routine does not return a failure here even if the 2796 * INQUIRY command did not return any data. This is a legacy behavior. 2797 */ 2798 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) { 2799 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE); 2800 } 2801 2802 kmem_free(bufaddr, SUN_INQSIZE); 2803 2804 /* If we hit a reservation conflict above, tell the caller. */ 2805 if (has_conflict == TRUE) { 2806 return (EACCES); 2807 } 2808 2809 return (0); 2810 } 2811 2812 /* 2813 * Function: sd_enable_descr_sense 2814 * 2815 * Description: This routine attempts to select descriptor sense format 2816 * using the Control mode page. Devices that support 64 bit 2817 * LBAs (for >2TB luns) should also implement descriptor 2818 * sense data so we will call this function whenever we see 2819 * a lun larger than 2TB. If for some reason the device 2820 * supports 64 bit LBAs but doesn't support descriptor sense 2821 * presumably the mode select will fail. Everything will 2822 * continue to work normally except that we will not get 2823 * complete sense data for commands that fail with an LBA 2824 * larger than 32 bits. 2825 * 2826 * Arguments: un - driver soft state (unit) structure 2827 * 2828 * Context: Kernel thread context only 2829 */ 2830 2831 static void 2832 sd_enable_descr_sense(struct sd_lun *un) 2833 { 2834 uchar_t *header; 2835 struct mode_control_scsi3 *ctrl_bufp; 2836 size_t buflen; 2837 size_t bd_len; 2838 2839 /* 2840 * Read MODE SENSE page 0xA, Control Mode Page 2841 */ 2842 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH + 2843 sizeof (struct mode_control_scsi3); 2844 header = kmem_zalloc(buflen, KM_SLEEP); 2845 if (sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 2846 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT) != 0) { 2847 SD_ERROR(SD_LOG_COMMON, un, 2848 "sd_enable_descr_sense: mode sense ctrl page failed\n"); 2849 goto eds_exit; 2850 } 2851 2852 /* 2853 * Determine size of Block Descriptors in order to locate 2854 * the mode page data. ATAPI devices return 0, SCSI devices 2855 * should return MODE_BLK_DESC_LENGTH. 2856 */ 2857 bd_len = ((struct mode_header *)header)->bdesc_length; 2858 2859 ctrl_bufp = (struct mode_control_scsi3 *) 2860 (header + MODE_HEADER_LENGTH + bd_len); 2861 2862 /* 2863 * Clear PS bit for MODE SELECT 2864 */ 2865 ctrl_bufp->mode_page.ps = 0; 2866 2867 /* 2868 * Set D_SENSE to enable descriptor sense format. 2869 */ 2870 ctrl_bufp->d_sense = 1; 2871 2872 /* 2873 * Use MODE SELECT to commit the change to the D_SENSE bit 2874 */ 2875 if (sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 2876 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT) != 0) { 2877 SD_INFO(SD_LOG_COMMON, un, 2878 "sd_enable_descr_sense: mode select ctrl page failed\n"); 2879 goto eds_exit; 2880 } 2881 2882 eds_exit: 2883 kmem_free(header, buflen); 2884 } 2885 2886 2887 /* 2888 * Function: sd_set_mmc_caps 2889 * 2890 * Description: This routine determines if the device is MMC compliant and if 2891 * the device supports CDDA via a mode sense of the CDVD 2892 * capabilities mode page. Also checks if the device is a 2893 * dvdram writable device. 2894 * 2895 * Arguments: un - driver soft state (unit) structure 2896 * 2897 * Context: Kernel thread context only 2898 */ 2899 2900 static void 2901 sd_set_mmc_caps(struct sd_lun *un) 2902 { 2903 struct mode_header_grp2 *sense_mhp; 2904 uchar_t *sense_page; 2905 caddr_t buf; 2906 int bd_len; 2907 int status; 2908 struct uscsi_cmd com; 2909 int rtn; 2910 uchar_t *out_data_rw, *out_data_hd; 2911 uchar_t *rqbuf_rw, *rqbuf_hd; 2912 2913 ASSERT(un != NULL); 2914 2915 /* 2916 * The flags which will be set in this function are - mmc compliant, 2917 * dvdram writable device, cdda support. Initialize them to FALSE 2918 * and if a capability is detected - it will be set to TRUE. 2919 */ 2920 un->un_f_mmc_cap = FALSE; 2921 un->un_f_dvdram_writable_device = FALSE; 2922 un->un_f_cfg_cdda = FALSE; 2923 2924 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 2925 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 2926 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 2927 2928 if (status != 0) { 2929 /* command failed; just return */ 2930 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2931 return; 2932 } 2933 /* 2934 * If the mode sense request for the CDROM CAPABILITIES 2935 * page (0x2A) succeeds the device is assumed to be MMC. 2936 */ 2937 un->un_f_mmc_cap = TRUE; 2938 2939 /* Get to the page data */ 2940 sense_mhp = (struct mode_header_grp2 *)buf; 2941 bd_len = (sense_mhp->bdesc_length_hi << 8) | 2942 sense_mhp->bdesc_length_lo; 2943 if (bd_len > MODE_BLK_DESC_LENGTH) { 2944 /* 2945 * We did not get back the expected block descriptor 2946 * length so we cannot determine if the device supports 2947 * CDDA. However, we still indicate the device is MMC 2948 * according to the successful response to the page 2949 * 0x2A mode sense request. 2950 */ 2951 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 2952 "sd_set_mmc_caps: Mode Sense returned " 2953 "invalid block descriptor length\n"); 2954 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2955 return; 2956 } 2957 2958 /* See if read CDDA is supported */ 2959 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + 2960 bd_len); 2961 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE; 2962 2963 /* See if writing DVD RAM is supported. */ 2964 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE; 2965 if (un->un_f_dvdram_writable_device == TRUE) { 2966 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2967 return; 2968 } 2969 2970 /* 2971 * If the device presents DVD or CD capabilities in the mode 2972 * page, we can return here since a RRD will not have 2973 * these capabilities. 2974 */ 2975 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 2976 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2977 return; 2978 } 2979 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 2980 2981 /* 2982 * If un->un_f_dvdram_writable_device is still FALSE, 2983 * check for a Removable Rigid Disk (RRD). A RRD 2984 * device is identified by the features RANDOM_WRITABLE and 2985 * HARDWARE_DEFECT_MANAGEMENT. 2986 */ 2987 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 2988 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 2989 2990 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 2991 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 2992 RANDOM_WRITABLE); 2993 if (rtn != 0) { 2994 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 2995 kmem_free(rqbuf_rw, SENSE_LENGTH); 2996 return; 2997 } 2998 2999 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3000 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3001 3002 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3003 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3004 HARDWARE_DEFECT_MANAGEMENT); 3005 if (rtn == 0) { 3006 /* 3007 * We have good information, check for random writable 3008 * and hardware defect features. 3009 */ 3010 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3011 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) { 3012 un->un_f_dvdram_writable_device = TRUE; 3013 } 3014 } 3015 3016 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3017 kmem_free(rqbuf_rw, SENSE_LENGTH); 3018 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3019 kmem_free(rqbuf_hd, SENSE_LENGTH); 3020 } 3021 3022 /* 3023 * Function: sd_check_for_writable_cd 3024 * 3025 * Description: This routine determines if the media in the device is 3026 * writable or not. It uses the get configuration command (0x46) 3027 * to determine if the media is writable 3028 * 3029 * Arguments: un - driver soft state (unit) structure 3030 * 3031 * Context: Never called at interrupt context. 3032 */ 3033 3034 static void 3035 sd_check_for_writable_cd(struct sd_lun *un) 3036 { 3037 struct uscsi_cmd com; 3038 uchar_t *out_data; 3039 uchar_t *rqbuf; 3040 int rtn; 3041 uchar_t *out_data_rw, *out_data_hd; 3042 uchar_t *rqbuf_rw, *rqbuf_hd; 3043 struct mode_header_grp2 *sense_mhp; 3044 uchar_t *sense_page; 3045 caddr_t buf; 3046 int bd_len; 3047 int status; 3048 3049 ASSERT(un != NULL); 3050 ASSERT(mutex_owned(SD_MUTEX(un))); 3051 3052 /* 3053 * Initialize the writable media to false, if configuration info. 3054 * tells us otherwise then only we will set it. 3055 */ 3056 un->un_f_mmc_writable_media = FALSE; 3057 mutex_exit(SD_MUTEX(un)); 3058 3059 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 3060 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3061 3062 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, SENSE_LENGTH, 3063 out_data, SD_PROFILE_HEADER_LEN); 3064 3065 mutex_enter(SD_MUTEX(un)); 3066 if (rtn == 0) { 3067 /* 3068 * We have good information, check for writable DVD. 3069 */ 3070 if ((out_data[6] == 0) && (out_data[7] == 0x12)) { 3071 un->un_f_mmc_writable_media = TRUE; 3072 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3073 kmem_free(rqbuf, SENSE_LENGTH); 3074 return; 3075 } 3076 } 3077 3078 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 3079 kmem_free(rqbuf, SENSE_LENGTH); 3080 3081 /* 3082 * Determine if this is a RRD type device. 3083 */ 3084 mutex_exit(SD_MUTEX(un)); 3085 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 3086 status = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, (uchar_t *)buf, 3087 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT); 3088 mutex_enter(SD_MUTEX(un)); 3089 if (status != 0) { 3090 /* command failed; just return */ 3091 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3092 return; 3093 } 3094 3095 /* Get to the page data */ 3096 sense_mhp = (struct mode_header_grp2 *)buf; 3097 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 3098 if (bd_len > MODE_BLK_DESC_LENGTH) { 3099 /* 3100 * We did not get back the expected block descriptor length so 3101 * we cannot check the mode page. 3102 */ 3103 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3104 "sd_check_for_writable_cd: Mode Sense returned " 3105 "invalid block descriptor length\n"); 3106 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3107 return; 3108 } 3109 3110 /* 3111 * If the device presents DVD or CD capabilities in the mode 3112 * page, we can return here since a RRD device will not have 3113 * these capabilities. 3114 */ 3115 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len); 3116 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) { 3117 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3118 return; 3119 } 3120 kmem_free(buf, BUFLEN_MODE_CDROM_CAP); 3121 3122 /* 3123 * If un->un_f_mmc_writable_media is still FALSE, 3124 * check for RRD type media. A RRD device is identified 3125 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT. 3126 */ 3127 mutex_exit(SD_MUTEX(un)); 3128 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3129 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3130 3131 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_rw, 3132 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN, 3133 RANDOM_WRITABLE); 3134 if (rtn != 0) { 3135 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3136 kmem_free(rqbuf_rw, SENSE_LENGTH); 3137 mutex_enter(SD_MUTEX(un)); 3138 return; 3139 } 3140 3141 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP); 3142 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 3143 3144 rtn = sd_send_scsi_feature_GET_CONFIGURATION(un, &com, rqbuf_hd, 3145 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN, 3146 HARDWARE_DEFECT_MANAGEMENT); 3147 mutex_enter(SD_MUTEX(un)); 3148 if (rtn == 0) { 3149 /* 3150 * We have good information, check for random writable 3151 * and hardware defect features as current. 3152 */ 3153 if ((out_data_rw[9] & RANDOM_WRITABLE) && 3154 (out_data_rw[10] & 0x1) && 3155 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) && 3156 (out_data_hd[10] & 0x1)) { 3157 un->un_f_mmc_writable_media = TRUE; 3158 } 3159 } 3160 3161 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN); 3162 kmem_free(rqbuf_rw, SENSE_LENGTH); 3163 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN); 3164 kmem_free(rqbuf_hd, SENSE_LENGTH); 3165 } 3166 3167 /* 3168 * Function: sd_read_unit_properties 3169 * 3170 * Description: The following implements a property lookup mechanism. 3171 * Properties for particular disks (keyed on vendor, model 3172 * and rev numbers) are sought in the sd.conf file via 3173 * sd_process_sdconf_file(), and if not found there, are 3174 * looked for in a list hardcoded in this driver via 3175 * sd_process_sdconf_table() Once located the properties 3176 * are used to update the driver unit structure. 3177 * 3178 * Arguments: un - driver soft state (unit) structure 3179 */ 3180 3181 static void 3182 sd_read_unit_properties(struct sd_lun *un) 3183 { 3184 /* 3185 * sd_process_sdconf_file returns SD_FAILURE if it cannot find 3186 * the "sd-config-list" property (from the sd.conf file) or if 3187 * there was not a match for the inquiry vid/pid. If this event 3188 * occurs the static driver configuration table is searched for 3189 * a match. 3190 */ 3191 ASSERT(un != NULL); 3192 if (sd_process_sdconf_file(un) == SD_FAILURE) { 3193 sd_process_sdconf_table(un); 3194 } 3195 3196 /* 3197 * Set this in sd.conf to 0 in order to disable kstats. The default 3198 * is 1, so they are enabled by default. 3199 */ 3200 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY, 3201 SD_DEVINFO(un), DDI_PROP_DONTPASS, "enable-partition-kstats", 1)); 3202 } 3203 3204 3205 /* 3206 * Function: sd_process_sdconf_file 3207 * 3208 * Description: Use ddi_getlongprop to obtain the properties from the 3209 * driver's config file (ie, sd.conf) and update the driver 3210 * soft state structure accordingly. 3211 * 3212 * Arguments: un - driver soft state (unit) structure 3213 * 3214 * Return Code: SD_SUCCESS - The properties were successfully set according 3215 * to the driver configuration file. 3216 * SD_FAILURE - The driver config list was not obtained or 3217 * there was no vid/pid match. This indicates that 3218 * the static config table should be used. 3219 * 3220 * The config file has a property, "sd-config-list", which consists of 3221 * one or more duplets as follows: 3222 * 3223 * sd-config-list= 3224 * <duplet>, 3225 * [<duplet>,] 3226 * [<duplet>]; 3227 * 3228 * The structure of each duplet is as follows: 3229 * 3230 * <duplet>:= <vid+pid>,<data-property-name_list> 3231 * 3232 * The first entry of the duplet is the device ID string (the concatenated 3233 * vid & pid; not to be confused with a device_id). This is defined in 3234 * the same way as in the sd_disk_table. 3235 * 3236 * The second part of the duplet is a string that identifies a 3237 * data-property-name-list. The data-property-name-list is defined as 3238 * follows: 3239 * 3240 * <data-property-name-list>:=<data-property-name> [<data-property-name>] 3241 * 3242 * The syntax of <data-property-name> depends on the <version> field. 3243 * 3244 * If version = SD_CONF_VERSION_1 we have the following syntax: 3245 * 3246 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3247 * 3248 * where the prop0 value will be used to set prop0 if bit0 set in the 3249 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1 3250 * 3251 * If version = SD_CONF_VERSION_10 we have the following syntax: 3252 * 3253 * <data-property-name>:=<version>,<prop0>,<prop1>,<prop2>,<prop3> 3254 */ 3255 3256 static int 3257 sd_process_sdconf_file(struct sd_lun *un) 3258 { 3259 char *config_list = NULL; 3260 int config_list_len; 3261 int len; 3262 int dupletlen = 0; 3263 char *vidptr; 3264 int vidlen; 3265 char *dnlist_ptr; 3266 char *dataname_ptr; 3267 int dnlist_len; 3268 int dataname_len; 3269 int *data_list; 3270 int data_list_len; 3271 int rval = SD_FAILURE; 3272 int i; 3273 3274 ASSERT(un != NULL); 3275 3276 /* Obtain the configuration list associated with the .conf file */ 3277 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), DDI_PROP_DONTPASS, 3278 sd_config_list, (caddr_t)&config_list, &config_list_len) 3279 != DDI_PROP_SUCCESS) { 3280 return (SD_FAILURE); 3281 } 3282 3283 /* 3284 * Compare vids in each duplet to the inquiry vid - if a match is 3285 * made, get the data value and update the soft state structure 3286 * accordingly. 3287 * 3288 * Note: This algorithm is complex and difficult to maintain. It should 3289 * be replaced with a more robust implementation. 3290 */ 3291 for (len = config_list_len, vidptr = config_list; len > 0; 3292 vidptr += dupletlen, len -= dupletlen) { 3293 /* 3294 * Note: The assumption here is that each vid entry is on 3295 * a unique line from its associated duplet. 3296 */ 3297 vidlen = dupletlen = (int)strlen(vidptr); 3298 if ((vidlen == 0) || 3299 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) { 3300 dupletlen++; 3301 continue; 3302 } 3303 3304 /* 3305 * dnlist contains 1 or more blank separated 3306 * data-property-name entries 3307 */ 3308 dnlist_ptr = vidptr + vidlen + 1; 3309 dnlist_len = (int)strlen(dnlist_ptr); 3310 dupletlen += dnlist_len + 2; 3311 3312 /* 3313 * Set a pointer for the first data-property-name 3314 * entry in the list 3315 */ 3316 dataname_ptr = dnlist_ptr; 3317 dataname_len = 0; 3318 3319 /* 3320 * Loop through all data-property-name entries in the 3321 * data-property-name-list setting the properties for each. 3322 */ 3323 while (dataname_len < dnlist_len) { 3324 int version; 3325 3326 /* 3327 * Determine the length of the current 3328 * data-property-name entry by indexing until a 3329 * blank or NULL is encountered. When the space is 3330 * encountered reset it to a NULL for compliance 3331 * with ddi_getlongprop(). 3332 */ 3333 for (i = 0; ((dataname_ptr[i] != ' ') && 3334 (dataname_ptr[i] != '\0')); i++) { 3335 ; 3336 } 3337 3338 dataname_len += i; 3339 /* If not null terminated, Make it so */ 3340 if (dataname_ptr[i] == ' ') { 3341 dataname_ptr[i] = '\0'; 3342 } 3343 dataname_len++; 3344 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3345 "sd_process_sdconf_file: disk:%s, data:%s\n", 3346 vidptr, dataname_ptr); 3347 3348 /* Get the data list */ 3349 if (ddi_getlongprop(DDI_DEV_T_ANY, SD_DEVINFO(un), 0, 3350 dataname_ptr, (caddr_t)&data_list, &data_list_len) 3351 != DDI_PROP_SUCCESS) { 3352 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3353 "sd_process_sdconf_file: data property (%s)" 3354 " has no value\n", dataname_ptr); 3355 dataname_ptr = dnlist_ptr + dataname_len; 3356 continue; 3357 } 3358 3359 version = data_list[0]; 3360 3361 if (version == SD_CONF_VERSION_1) { 3362 sd_tunables values; 3363 3364 /* Set the properties */ 3365 if (sd_chk_vers1_data(un, data_list[1], 3366 &data_list[2], data_list_len, dataname_ptr) 3367 == SD_SUCCESS) { 3368 sd_get_tunables_from_conf(un, 3369 data_list[1], &data_list[2], 3370 &values); 3371 sd_set_vers1_properties(un, 3372 data_list[1], &values); 3373 rval = SD_SUCCESS; 3374 } else { 3375 rval = SD_FAILURE; 3376 } 3377 } else { 3378 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3379 "data property %s version 0x%x is invalid.", 3380 dataname_ptr, version); 3381 rval = SD_FAILURE; 3382 } 3383 kmem_free(data_list, data_list_len); 3384 dataname_ptr = dnlist_ptr + dataname_len; 3385 } 3386 } 3387 3388 /* free up the memory allocated by ddi_getlongprop */ 3389 if (config_list) { 3390 kmem_free(config_list, config_list_len); 3391 } 3392 3393 return (rval); 3394 } 3395 3396 /* 3397 * Function: sd_get_tunables_from_conf() 3398 * 3399 * 3400 * This function reads the data list from the sd.conf file and pulls 3401 * the values that can have numeric values as arguments and places 3402 * the values in the apropriate sd_tunables member. 3403 * Since the order of the data list members varies across platforms 3404 * This function reads them from the data list in a platform specific 3405 * order and places them into the correct sd_tunable member that is 3406 * a consistant across all platforms. 3407 */ 3408 static void 3409 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list, 3410 sd_tunables *values) 3411 { 3412 int i; 3413 int mask; 3414 3415 bzero(values, sizeof (sd_tunables)); 3416 3417 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3418 3419 mask = 1 << i; 3420 if (mask > flags) { 3421 break; 3422 } 3423 3424 switch (mask & flags) { 3425 case 0: /* This mask bit not set in flags */ 3426 continue; 3427 case SD_CONF_BSET_THROTTLE: 3428 values->sdt_throttle = data_list[i]; 3429 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3430 "sd_get_tunables_from_conf: throttle = %d\n", 3431 values->sdt_throttle); 3432 break; 3433 case SD_CONF_BSET_CTYPE: 3434 values->sdt_ctype = data_list[i]; 3435 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3436 "sd_get_tunables_from_conf: ctype = %d\n", 3437 values->sdt_ctype); 3438 break; 3439 case SD_CONF_BSET_NRR_COUNT: 3440 values->sdt_not_rdy_retries = data_list[i]; 3441 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3442 "sd_get_tunables_from_conf: not_rdy_retries = %d\n", 3443 values->sdt_not_rdy_retries); 3444 break; 3445 case SD_CONF_BSET_BSY_RETRY_COUNT: 3446 values->sdt_busy_retries = data_list[i]; 3447 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3448 "sd_get_tunables_from_conf: busy_retries = %d\n", 3449 values->sdt_busy_retries); 3450 break; 3451 case SD_CONF_BSET_RST_RETRIES: 3452 values->sdt_reset_retries = data_list[i]; 3453 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3454 "sd_get_tunables_from_conf: reset_retries = %d\n", 3455 values->sdt_reset_retries); 3456 break; 3457 case SD_CONF_BSET_RSV_REL_TIME: 3458 values->sdt_reserv_rel_time = data_list[i]; 3459 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3460 "sd_get_tunables_from_conf: reserv_rel_time = %d\n", 3461 values->sdt_reserv_rel_time); 3462 break; 3463 case SD_CONF_BSET_MIN_THROTTLE: 3464 values->sdt_min_throttle = data_list[i]; 3465 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3466 "sd_get_tunables_from_conf: min_throttle = %d\n", 3467 values->sdt_min_throttle); 3468 break; 3469 case SD_CONF_BSET_DISKSORT_DISABLED: 3470 values->sdt_disk_sort_dis = data_list[i]; 3471 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3472 "sd_get_tunables_from_conf: disk_sort_dis = %d\n", 3473 values->sdt_disk_sort_dis); 3474 break; 3475 case SD_CONF_BSET_LUN_RESET_ENABLED: 3476 values->sdt_lun_reset_enable = data_list[i]; 3477 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3478 "sd_get_tunables_from_conf: lun_reset_enable = %d" 3479 "\n", values->sdt_lun_reset_enable); 3480 break; 3481 } 3482 } 3483 } 3484 3485 /* 3486 * Function: sd_process_sdconf_table 3487 * 3488 * Description: Search the static configuration table for a match on the 3489 * inquiry vid/pid and update the driver soft state structure 3490 * according to the table property values for the device. 3491 * 3492 * The form of a configuration table entry is: 3493 * <vid+pid>,<flags>,<property-data> 3494 * "SEAGATE ST42400N",1,63,0,0 (Fibre) 3495 * "SEAGATE ST42400N",1,63,0,0,0,0 (Sparc) 3496 * "SEAGATE ST42400N",1,63,0,0,0,0,0,0,0,0,0,0 (Intel) 3497 * 3498 * Arguments: un - driver soft state (unit) structure 3499 */ 3500 3501 static void 3502 sd_process_sdconf_table(struct sd_lun *un) 3503 { 3504 char *id = NULL; 3505 int table_index; 3506 int idlen; 3507 3508 ASSERT(un != NULL); 3509 for (table_index = 0; table_index < sd_disk_table_size; 3510 table_index++) { 3511 id = sd_disk_table[table_index].device_id; 3512 idlen = strlen(id); 3513 if (idlen == 0) { 3514 continue; 3515 } 3516 3517 /* 3518 * The static configuration table currently does not 3519 * implement version 10 properties. Additionally, 3520 * multiple data-property-name entries are not 3521 * implemented in the static configuration table. 3522 */ 3523 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) { 3524 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3525 "sd_process_sdconf_table: disk %s\n", id); 3526 sd_set_vers1_properties(un, 3527 sd_disk_table[table_index].flags, 3528 sd_disk_table[table_index].properties); 3529 break; 3530 } 3531 } 3532 } 3533 3534 3535 /* 3536 * Function: sd_sdconf_id_match 3537 * 3538 * Description: This local function implements a case sensitive vid/pid 3539 * comparison as well as the boundary cases of wild card and 3540 * multiple blanks. 3541 * 3542 * Note: An implicit assumption made here is that the scsi 3543 * inquiry structure will always keep the vid, pid and 3544 * revision strings in consecutive sequence, so they can be 3545 * read as a single string. If this assumption is not the 3546 * case, a separate string, to be used for the check, needs 3547 * to be built with these strings concatenated. 3548 * 3549 * Arguments: un - driver soft state (unit) structure 3550 * id - table or config file vid/pid 3551 * idlen - length of the vid/pid (bytes) 3552 * 3553 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3554 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3555 */ 3556 3557 static int 3558 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen) 3559 { 3560 struct scsi_inquiry *sd_inq; 3561 int rval = SD_SUCCESS; 3562 3563 ASSERT(un != NULL); 3564 sd_inq = un->un_sd->sd_inq; 3565 ASSERT(id != NULL); 3566 3567 /* 3568 * We use the inq_vid as a pointer to a buffer containing the 3569 * vid and pid and use the entire vid/pid length of the table 3570 * entry for the comparison. This works because the inq_pid 3571 * data member follows inq_vid in the scsi_inquiry structure. 3572 */ 3573 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) { 3574 /* 3575 * The user id string is compared to the inquiry vid/pid 3576 * using a case insensitive comparison and ignoring 3577 * multiple spaces. 3578 */ 3579 rval = sd_blank_cmp(un, id, idlen); 3580 if (rval != SD_SUCCESS) { 3581 /* 3582 * User id strings that start and end with a "*" 3583 * are a special case. These do not have a 3584 * specific vendor, and the product string can 3585 * appear anywhere in the 16 byte PID portion of 3586 * the inquiry data. This is a simple strstr() 3587 * type search for the user id in the inquiry data. 3588 */ 3589 if ((id[0] == '*') && (id[idlen - 1] == '*')) { 3590 char *pidptr = &id[1]; 3591 int i; 3592 int j; 3593 int pidstrlen = idlen - 2; 3594 j = sizeof (SD_INQUIRY(un)->inq_pid) - 3595 pidstrlen; 3596 3597 if (j < 0) { 3598 return (SD_FAILURE); 3599 } 3600 for (i = 0; i < j; i++) { 3601 if (bcmp(&SD_INQUIRY(un)->inq_pid[i], 3602 pidptr, pidstrlen) == 0) { 3603 rval = SD_SUCCESS; 3604 break; 3605 } 3606 } 3607 } 3608 } 3609 } 3610 return (rval); 3611 } 3612 3613 3614 /* 3615 * Function: sd_blank_cmp 3616 * 3617 * Description: If the id string starts and ends with a space, treat 3618 * multiple consecutive spaces as equivalent to a single 3619 * space. For example, this causes a sd_disk_table entry 3620 * of " NEC CDROM " to match a device's id string of 3621 * "NEC CDROM". 3622 * 3623 * Note: The success exit condition for this routine is if 3624 * the pointer to the table entry is '\0' and the cnt of 3625 * the inquiry length is zero. This will happen if the inquiry 3626 * string returned by the device is padded with spaces to be 3627 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The 3628 * SCSI spec states that the inquiry string is to be padded with 3629 * spaces. 3630 * 3631 * Arguments: un - driver soft state (unit) structure 3632 * id - table or config file vid/pid 3633 * idlen - length of the vid/pid (bytes) 3634 * 3635 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid 3636 * SD_FAILURE - Indicates no match with the inquiry vid/pid 3637 */ 3638 3639 static int 3640 sd_blank_cmp(struct sd_lun *un, char *id, int idlen) 3641 { 3642 char *p1; 3643 char *p2; 3644 int cnt; 3645 cnt = sizeof (SD_INQUIRY(un)->inq_vid) + 3646 sizeof (SD_INQUIRY(un)->inq_pid); 3647 3648 ASSERT(un != NULL); 3649 p2 = un->un_sd->sd_inq->inq_vid; 3650 ASSERT(id != NULL); 3651 p1 = id; 3652 3653 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) { 3654 /* 3655 * Note: string p1 is terminated by a NUL but string p2 3656 * isn't. The end of p2 is determined by cnt. 3657 */ 3658 for (;;) { 3659 /* skip over any extra blanks in both strings */ 3660 while ((*p1 != '\0') && (*p1 == ' ')) { 3661 p1++; 3662 } 3663 while ((cnt != 0) && (*p2 == ' ')) { 3664 p2++; 3665 cnt--; 3666 } 3667 3668 /* compare the two strings */ 3669 if ((cnt == 0) || 3670 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) { 3671 break; 3672 } 3673 while ((cnt > 0) && 3674 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) { 3675 p1++; 3676 p2++; 3677 cnt--; 3678 } 3679 } 3680 } 3681 3682 /* return SD_SUCCESS if both strings match */ 3683 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE); 3684 } 3685 3686 3687 /* 3688 * Function: sd_chk_vers1_data 3689 * 3690 * Description: Verify the version 1 device properties provided by the 3691 * user via the configuration file 3692 * 3693 * Arguments: un - driver soft state (unit) structure 3694 * flags - integer mask indicating properties to be set 3695 * prop_list - integer list of property values 3696 * list_len - length of user provided data 3697 * 3698 * Return Code: SD_SUCCESS - Indicates the user provided data is valid 3699 * SD_FAILURE - Indicates the user provided data is invalid 3700 */ 3701 3702 static int 3703 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list, 3704 int list_len, char *dataname_ptr) 3705 { 3706 int i; 3707 int mask = 1; 3708 int index = 0; 3709 3710 ASSERT(un != NULL); 3711 3712 /* Check for a NULL property name and list */ 3713 if (dataname_ptr == NULL) { 3714 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3715 "sd_chk_vers1_data: NULL data property name."); 3716 return (SD_FAILURE); 3717 } 3718 if (prop_list == NULL) { 3719 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3720 "sd_chk_vers1_data: %s NULL data property list.", 3721 dataname_ptr); 3722 return (SD_FAILURE); 3723 } 3724 3725 /* Display a warning if undefined bits are set in the flags */ 3726 if (flags & ~SD_CONF_BIT_MASK) { 3727 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3728 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. " 3729 "Properties not set.", 3730 (flags & ~SD_CONF_BIT_MASK), dataname_ptr); 3731 return (SD_FAILURE); 3732 } 3733 3734 /* 3735 * Verify the length of the list by identifying the highest bit set 3736 * in the flags and validating that the property list has a length 3737 * up to the index of this bit. 3738 */ 3739 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) { 3740 if (flags & mask) { 3741 index++; 3742 } 3743 mask = 1 << i; 3744 } 3745 if ((list_len / sizeof (int)) < (index + 2)) { 3746 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3747 "sd_chk_vers1_data: " 3748 "Data property list %s size is incorrect. " 3749 "Properties not set.", dataname_ptr); 3750 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: " 3751 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS); 3752 return (SD_FAILURE); 3753 } 3754 return (SD_SUCCESS); 3755 } 3756 3757 3758 /* 3759 * Function: sd_set_vers1_properties 3760 * 3761 * Description: Set version 1 device properties based on a property list 3762 * retrieved from the driver configuration file or static 3763 * configuration table. Version 1 properties have the format: 3764 * 3765 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN> 3766 * 3767 * where the prop0 value will be used to set prop0 if bit0 3768 * is set in the flags 3769 * 3770 * Arguments: un - driver soft state (unit) structure 3771 * flags - integer mask indicating properties to be set 3772 * prop_list - integer list of property values 3773 */ 3774 3775 static void 3776 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list) 3777 { 3778 ASSERT(un != NULL); 3779 3780 /* 3781 * Set the flag to indicate cache is to be disabled. An attempt 3782 * to disable the cache via sd_disable_caching() will be made 3783 * later during attach once the basic initialization is complete. 3784 */ 3785 if (flags & SD_CONF_BSET_NOCACHE) { 3786 un->un_f_opt_disable_cache = TRUE; 3787 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3788 "sd_set_vers1_properties: caching disabled flag set\n"); 3789 } 3790 3791 /* CD-specific configuration parameters */ 3792 if (flags & SD_CONF_BSET_PLAYMSF_BCD) { 3793 un->un_f_cfg_playmsf_bcd = TRUE; 3794 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3795 "sd_set_vers1_properties: playmsf_bcd set\n"); 3796 } 3797 if (flags & SD_CONF_BSET_READSUB_BCD) { 3798 un->un_f_cfg_readsub_bcd = TRUE; 3799 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3800 "sd_set_vers1_properties: readsub_bcd set\n"); 3801 } 3802 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) { 3803 un->un_f_cfg_read_toc_trk_bcd = TRUE; 3804 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3805 "sd_set_vers1_properties: read_toc_trk_bcd set\n"); 3806 } 3807 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) { 3808 un->un_f_cfg_read_toc_addr_bcd = TRUE; 3809 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3810 "sd_set_vers1_properties: read_toc_addr_bcd set\n"); 3811 } 3812 if (flags & SD_CONF_BSET_NO_READ_HEADER) { 3813 un->un_f_cfg_no_read_header = TRUE; 3814 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3815 "sd_set_vers1_properties: no_read_header set\n"); 3816 } 3817 if (flags & SD_CONF_BSET_READ_CD_XD4) { 3818 un->un_f_cfg_read_cd_xd4 = TRUE; 3819 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3820 "sd_set_vers1_properties: read_cd_xd4 set\n"); 3821 } 3822 3823 /* Support for devices which do not have valid/unique serial numbers */ 3824 if (flags & SD_CONF_BSET_FAB_DEVID) { 3825 un->un_f_opt_fab_devid = TRUE; 3826 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3827 "sd_set_vers1_properties: fab_devid bit set\n"); 3828 } 3829 3830 /* Support for user throttle configuration */ 3831 if (flags & SD_CONF_BSET_THROTTLE) { 3832 ASSERT(prop_list != NULL); 3833 un->un_saved_throttle = un->un_throttle = 3834 prop_list->sdt_throttle; 3835 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3836 "sd_set_vers1_properties: throttle set to %d\n", 3837 prop_list->sdt_throttle); 3838 } 3839 3840 /* Set the per disk retry count according to the conf file or table. */ 3841 if (flags & SD_CONF_BSET_NRR_COUNT) { 3842 ASSERT(prop_list != NULL); 3843 if (prop_list->sdt_not_rdy_retries) { 3844 un->un_notready_retry_count = 3845 prop_list->sdt_not_rdy_retries; 3846 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3847 "sd_set_vers1_properties: not ready retry count" 3848 " set to %d\n", un->un_notready_retry_count); 3849 } 3850 } 3851 3852 /* The controller type is reported for generic disk driver ioctls */ 3853 if (flags & SD_CONF_BSET_CTYPE) { 3854 ASSERT(prop_list != NULL); 3855 switch (prop_list->sdt_ctype) { 3856 case CTYPE_CDROM: 3857 un->un_ctype = prop_list->sdt_ctype; 3858 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3859 "sd_set_vers1_properties: ctype set to " 3860 "CTYPE_CDROM\n"); 3861 break; 3862 case CTYPE_CCS: 3863 un->un_ctype = prop_list->sdt_ctype; 3864 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3865 "sd_set_vers1_properties: ctype set to " 3866 "CTYPE_CCS\n"); 3867 break; 3868 case CTYPE_ROD: /* RW optical */ 3869 un->un_ctype = prop_list->sdt_ctype; 3870 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3871 "sd_set_vers1_properties: ctype set to " 3872 "CTYPE_ROD\n"); 3873 break; 3874 default: 3875 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 3876 "sd_set_vers1_properties: Could not set " 3877 "invalid ctype value (%d)", 3878 prop_list->sdt_ctype); 3879 } 3880 } 3881 3882 /* Purple failover timeout */ 3883 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) { 3884 ASSERT(prop_list != NULL); 3885 un->un_busy_retry_count = 3886 prop_list->sdt_busy_retries; 3887 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3888 "sd_set_vers1_properties: " 3889 "busy retry count set to %d\n", 3890 un->un_busy_retry_count); 3891 } 3892 3893 /* Purple reset retry count */ 3894 if (flags & SD_CONF_BSET_RST_RETRIES) { 3895 ASSERT(prop_list != NULL); 3896 un->un_reset_retry_count = 3897 prop_list->sdt_reset_retries; 3898 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3899 "sd_set_vers1_properties: " 3900 "reset retry count set to %d\n", 3901 un->un_reset_retry_count); 3902 } 3903 3904 /* Purple reservation release timeout */ 3905 if (flags & SD_CONF_BSET_RSV_REL_TIME) { 3906 ASSERT(prop_list != NULL); 3907 un->un_reserve_release_time = 3908 prop_list->sdt_reserv_rel_time; 3909 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3910 "sd_set_vers1_properties: " 3911 "reservation release timeout set to %d\n", 3912 un->un_reserve_release_time); 3913 } 3914 3915 /* 3916 * Driver flag telling the driver to verify that no commands are pending 3917 * for a device before issuing a Test Unit Ready. This is a workaround 3918 * for a firmware bug in some Seagate eliteI drives. 3919 */ 3920 if (flags & SD_CONF_BSET_TUR_CHECK) { 3921 un->un_f_cfg_tur_check = TRUE; 3922 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3923 "sd_set_vers1_properties: tur queue check set\n"); 3924 } 3925 3926 if (flags & SD_CONF_BSET_MIN_THROTTLE) { 3927 un->un_min_throttle = prop_list->sdt_min_throttle; 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3929 "sd_set_vers1_properties: min throttle set to %d\n", 3930 un->un_min_throttle); 3931 } 3932 3933 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) { 3934 un->un_f_disksort_disabled = 3935 (prop_list->sdt_disk_sort_dis != 0) ? 3936 TRUE : FALSE; 3937 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3938 "sd_set_vers1_properties: disksort disabled " 3939 "flag set to %d\n", 3940 prop_list->sdt_disk_sort_dis); 3941 } 3942 3943 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) { 3944 un->un_f_lun_reset_enabled = 3945 (prop_list->sdt_lun_reset_enable != 0) ? 3946 TRUE : FALSE; 3947 SD_INFO(SD_LOG_ATTACH_DETACH, un, 3948 "sd_set_vers1_properties: lun reset enabled " 3949 "flag set to %d\n", 3950 prop_list->sdt_lun_reset_enable); 3951 } 3952 3953 /* 3954 * Validate the throttle values. 3955 * If any of the numbers are invalid, set everything to defaults. 3956 */ 3957 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) || 3958 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) || 3959 (un->un_min_throttle > un->un_throttle)) { 3960 un->un_saved_throttle = un->un_throttle = sd_max_throttle; 3961 un->un_min_throttle = sd_min_throttle; 3962 } 3963 } 3964 3965 /* 3966 * The following routines support reading and interpretation of disk labels, 3967 * including Solaris BE (8-slice) vtoc's, Solaris LE (16-slice) vtoc's, and 3968 * fdisk tables. 3969 */ 3970 3971 /* 3972 * Function: sd_validate_geometry 3973 * 3974 * Description: Read the label from the disk (if present). Update the unit's 3975 * geometry and vtoc information from the data in the label. 3976 * Verify that the label is valid. 3977 * 3978 * Arguments: un - driver soft state (unit) structure 3979 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 3980 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 3981 * to use the USCSI "direct" chain and bypass the normal 3982 * command waitq. 3983 * 3984 * Return Code: 0 - Successful completion 3985 * EINVAL - Invalid value in un->un_tgt_blocksize or 3986 * un->un_blockcount; or label on disk is corrupted 3987 * or unreadable. 3988 * EACCES - Reservation conflict at the device. 3989 * ENOMEM - Resource allocation error 3990 * ENOTSUP - geometry not applicable 3991 * 3992 * Context: Kernel thread only (can sleep). 3993 */ 3994 3995 static int 3996 sd_validate_geometry(struct sd_lun *un, int path_flag) 3997 { 3998 static char labelstring[128]; 3999 static char buf[256]; 4000 char *label = NULL; 4001 int label_error = 0; 4002 int gvalid = un->un_f_geometry_is_valid; 4003 int lbasize; 4004 uint_t capacity; 4005 int count; 4006 4007 ASSERT(un != NULL); 4008 ASSERT(mutex_owned(SD_MUTEX(un))); 4009 4010 /* 4011 * If the required values are not valid, then try getting them 4012 * once via read capacity. If that fails, then fail this call. 4013 * This is necessary with the new mpxio failover behavior in 4014 * the T300 where we can get an attach for the inactive path 4015 * before the active path. The inactive path fails commands with 4016 * sense data of 02,04,88 which happens to the read capacity 4017 * before mpxio has had sufficient knowledge to know if it should 4018 * force a fail over or not. (Which it won't do at attach anyhow). 4019 * If the read capacity at attach time fails, un_tgt_blocksize and 4020 * un_blockcount won't be valid. 4021 */ 4022 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4023 (un->un_f_blockcount_is_valid != TRUE)) { 4024 uint64_t cap; 4025 uint32_t lbasz; 4026 int rval; 4027 4028 mutex_exit(SD_MUTEX(un)); 4029 rval = sd_send_scsi_READ_CAPACITY(un, &cap, 4030 &lbasz, SD_PATH_DIRECT); 4031 mutex_enter(SD_MUTEX(un)); 4032 if (rval == 0) { 4033 /* 4034 * The following relies on 4035 * sd_send_scsi_READ_CAPACITY never 4036 * returning 0 for capacity and/or lbasize. 4037 */ 4038 sd_update_block_info(un, lbasz, cap); 4039 } 4040 4041 if ((un->un_f_tgt_blocksize_is_valid != TRUE) || 4042 (un->un_f_blockcount_is_valid != TRUE)) { 4043 return (EINVAL); 4044 } 4045 } 4046 4047 /* 4048 * Copy the lbasize and capacity so that if they're reset while we're 4049 * not holding the SD_MUTEX, we will continue to use valid values 4050 * after the SD_MUTEX is reacquired. (4119659) 4051 */ 4052 lbasize = un->un_tgt_blocksize; 4053 capacity = un->un_blockcount; 4054 4055 #if defined(_SUNOS_VTOC_16) 4056 /* 4057 * Set up the "whole disk" fdisk partition; this should always 4058 * exist, regardless of whether the disk contains an fdisk table 4059 * or vtoc. 4060 */ 4061 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 4062 un->un_map[P0_RAW_DISK].dkl_nblk = capacity; 4063 #endif 4064 4065 /* 4066 * Refresh the logical and physical geometry caches. 4067 * (data from MODE SENSE format/rigid disk geometry pages, 4068 * and scsi_ifgetcap("geometry"). 4069 */ 4070 sd_resync_geom_caches(un, capacity, lbasize, path_flag); 4071 4072 label_error = sd_use_efi(un, path_flag); 4073 if (label_error == 0) { 4074 /* found a valid EFI label */ 4075 SD_TRACE(SD_LOG_IO_PARTITION, un, 4076 "sd_validate_geometry: found EFI label\n"); 4077 un->un_solaris_offset = 0; 4078 un->un_solaris_size = capacity; 4079 return (ENOTSUP); 4080 } 4081 if (un->un_blockcount > DK_MAX_BLOCKS) { 4082 if (label_error == ESRCH) { 4083 /* 4084 * they've configured a LUN over 1TB, but used 4085 * format.dat to restrict format's view of the 4086 * capacity to be under 1TB 4087 */ 4088 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 4089 "is >1TB and has a VTOC label: use format(1M) to either decrease the"); 4090 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 4091 "size to be < 1TB or relabel the disk with an EFI label"); 4092 } else { 4093 /* unlabeled disk over 1TB */ 4094 return (ENOTSUP); 4095 } 4096 } 4097 label_error = 0; 4098 4099 /* 4100 * at this point it is either labeled with a VTOC or it is 4101 * under 1TB 4102 */ 4103 4104 /* 4105 * Only DIRECT ACCESS devices will have Sun labels. 4106 * CD's supposedly have a Sun label, too 4107 */ 4108 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 4109 struct dk_label *dkl; 4110 offset_t dkl1; 4111 offset_t label_addr, real_addr; 4112 int rval; 4113 size_t buffer_size; 4114 4115 /* 4116 * Note: This will set up un->un_solaris_size and 4117 * un->un_solaris_offset. 4118 */ 4119 switch (sd_read_fdisk(un, capacity, lbasize, path_flag)) { 4120 case SD_CMD_RESERVATION_CONFLICT: 4121 ASSERT(mutex_owned(SD_MUTEX(un))); 4122 return (EACCES); 4123 case SD_CMD_FAILURE: 4124 ASSERT(mutex_owned(SD_MUTEX(un))); 4125 return (ENOMEM); 4126 } 4127 4128 if (un->un_solaris_size <= DK_LABEL_LOC) { 4129 /* 4130 * Found fdisk table but no Solaris partition entry, 4131 * so don't call sd_uselabel() and don't create 4132 * a default label. 4133 */ 4134 label_error = 0; 4135 un->un_f_geometry_is_valid = TRUE; 4136 goto no_solaris_partition; 4137 } 4138 label_addr = (daddr_t)(un->un_solaris_offset + DK_LABEL_LOC); 4139 4140 /* 4141 * sys_blocksize != tgt_blocksize, need to re-adjust 4142 * blkno and save the index to beginning of dk_label 4143 */ 4144 real_addr = SD_SYS2TGTBLOCK(un, label_addr); 4145 buffer_size = SD_REQBYTES2TGTBYTES(un, 4146 sizeof (struct dk_label)); 4147 4148 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_validate_geometry: " 4149 "label_addr: 0x%x allocation size: 0x%x\n", 4150 label_addr, buffer_size); 4151 dkl = kmem_zalloc(buffer_size, KM_NOSLEEP); 4152 if (dkl == NULL) { 4153 return (ENOMEM); 4154 } 4155 4156 mutex_exit(SD_MUTEX(un)); 4157 rval = sd_send_scsi_READ(un, dkl, buffer_size, real_addr, 4158 path_flag); 4159 mutex_enter(SD_MUTEX(un)); 4160 4161 switch (rval) { 4162 case 0: 4163 /* 4164 * sd_uselabel will establish that the geometry 4165 * is valid. 4166 * For sys_blocksize != tgt_blocksize, need 4167 * to index into the beginning of dk_label 4168 */ 4169 dkl1 = (daddr_t)dkl 4170 + SD_TGTBYTEOFFSET(un, label_addr, real_addr); 4171 if (sd_uselabel(un, (struct dk_label *)(uintptr_t)dkl1, 4172 path_flag) != SD_LABEL_IS_VALID) { 4173 label_error = EINVAL; 4174 } 4175 break; 4176 case EACCES: 4177 label_error = EACCES; 4178 break; 4179 default: 4180 label_error = EINVAL; 4181 break; 4182 } 4183 4184 kmem_free(dkl, buffer_size); 4185 4186 #if defined(_SUNOS_VTOC_8) 4187 label = (char *)un->un_asciilabel; 4188 #elif defined(_SUNOS_VTOC_16) 4189 label = (char *)un->un_vtoc.v_asciilabel; 4190 #else 4191 #error "No VTOC format defined." 4192 #endif 4193 } 4194 4195 /* 4196 * If a valid label was not found, AND if no reservation conflict 4197 * was detected, then go ahead and create a default label (4069506). 4198 * 4199 * Note: currently, for VTOC_8 devices, the default label is created 4200 * for removables only. For VTOC_16 devices, the default label will 4201 * be created for both removables and non-removables alike. 4202 * (see sd_build_default_label) 4203 */ 4204 #if defined(_SUNOS_VTOC_8) 4205 if (ISREMOVABLE(un) && (label_error != EACCES)) { 4206 #elif defined(_SUNOS_VTOC_16) 4207 if (label_error != EACCES) { 4208 #endif 4209 if (un->un_f_geometry_is_valid == FALSE) { 4210 sd_build_default_label(un); 4211 } 4212 label_error = 0; 4213 } 4214 4215 no_solaris_partition: 4216 if ((!ISREMOVABLE(un) || 4217 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 4218 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 4219 /* 4220 * Print out a message indicating who and what we are. 4221 * We do this only when we happen to really validate the 4222 * geometry. We may call sd_validate_geometry() at other 4223 * times, e.g., ioctl()'s like Get VTOC in which case we 4224 * don't want to print the label. 4225 * If the geometry is valid, print the label string, 4226 * else print vendor and product info, if available 4227 */ 4228 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 4229 SD_INFO(SD_LOG_ATTACH_DETACH, un, "?<%s>\n", label); 4230 } else { 4231 mutex_enter(&sd_label_mutex); 4232 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 4233 labelstring); 4234 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 4235 &labelstring[64]); 4236 (void) sprintf(buf, "?Vendor '%s', product '%s'", 4237 labelstring, &labelstring[64]); 4238 if (un->un_f_blockcount_is_valid == TRUE) { 4239 (void) sprintf(&buf[strlen(buf)], 4240 ", %llu %u byte blocks\n", 4241 (longlong_t)un->un_blockcount, 4242 un->un_tgt_blocksize); 4243 } else { 4244 (void) sprintf(&buf[strlen(buf)], 4245 ", (unknown capacity)\n"); 4246 } 4247 SD_INFO(SD_LOG_ATTACH_DETACH, un, buf); 4248 mutex_exit(&sd_label_mutex); 4249 } 4250 } 4251 4252 #if defined(_SUNOS_VTOC_16) 4253 /* 4254 * If we have valid geometry, set up the remaining fdisk partitions. 4255 * Note that dkl_cylno is not used for the fdisk map entries, so 4256 * we set it to an entirely bogus value. 4257 */ 4258 for (count = 0; count < FD_NUMPART; count++) { 4259 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 4260 un->un_map[FDISK_P1 + count].dkl_nblk = 4261 un->un_fmap[count].fmap_nblk; 4262 4263 un->un_offset[FDISK_P1 + count] = 4264 un->un_fmap[count].fmap_start; 4265 } 4266 #endif 4267 4268 for (count = 0; count < NDKMAP; count++) { 4269 #if defined(_SUNOS_VTOC_8) 4270 struct dk_map *lp = &un->un_map[count]; 4271 un->un_offset[count] = 4272 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 4273 #elif defined(_SUNOS_VTOC_16) 4274 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 4275 4276 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 4277 #else 4278 #error "No VTOC format defined." 4279 #endif 4280 } 4281 4282 return (label_error); 4283 } 4284 4285 4286 #if defined(_SUNOS_VTOC_16) 4287 /* 4288 * Macro: MAX_BLKS 4289 * 4290 * This macro is used for table entries where we need to have the largest 4291 * possible sector value for that head & SPT (sectors per track) 4292 * combination. Other entries for some smaller disk sizes are set by 4293 * convention to match those used by X86 BIOS usage. 4294 */ 4295 #define MAX_BLKS(heads, spt) UINT16_MAX * heads * spt, heads, spt 4296 4297 /* 4298 * Function: sd_convert_geometry 4299 * 4300 * Description: Convert physical geometry into a dk_geom structure. In 4301 * other words, make sure we don't wrap 16-bit values. 4302 * e.g. converting from geom_cache to dk_geom 4303 * 4304 * Context: Kernel thread only 4305 */ 4306 static void 4307 sd_convert_geometry(uint64_t capacity, struct dk_geom *un_g) 4308 { 4309 int i; 4310 static const struct chs_values { 4311 uint_t max_cap; /* Max Capacity for this HS. */ 4312 uint_t nhead; /* Heads to use. */ 4313 uint_t nsect; /* SPT to use. */ 4314 } CHS_values[] = { 4315 {0x00200000, 64, 32}, /* 1GB or smaller disk. */ 4316 {0x01000000, 128, 32}, /* 8GB or smaller disk. */ 4317 {MAX_BLKS(255, 63)}, /* 502.02GB or smaller disk. */ 4318 {MAX_BLKS(255, 126)}, /* .98TB or smaller disk. */ 4319 {DK_MAX_BLOCKS, 255, 189} /* Max size is just under 1TB */ 4320 }; 4321 4322 /* Unlabeled SCSI floppy device */ 4323 if (capacity <= 0x1000) { 4324 un_g->dkg_nhead = 2; 4325 un_g->dkg_ncyl = 80; 4326 un_g->dkg_nsect = capacity / (un_g->dkg_nhead * un_g->dkg_ncyl); 4327 return; 4328 } 4329 4330 /* 4331 * For all devices we calculate cylinders using the 4332 * heads and sectors we assign based on capacity of the 4333 * device. The table is designed to be compatible with the 4334 * way other operating systems lay out fdisk tables for X86 4335 * and to insure that the cylinders never exceed 65535 to 4336 * prevent problems with X86 ioctls that report geometry. 4337 * We use SPT that are multiples of 63, since other OSes that 4338 * are not limited to 16-bits for cylinders stop at 63 SPT 4339 * we make do by using multiples of 63 SPT. 4340 * 4341 * Note than capacities greater than or equal to 1TB will simply 4342 * get the largest geometry from the table. This should be okay 4343 * since disks this large shouldn't be using CHS values anyway. 4344 */ 4345 for (i = 0; CHS_values[i].max_cap < capacity && 4346 CHS_values[i].max_cap != DK_MAX_BLOCKS; i++) 4347 ; 4348 4349 un_g->dkg_nhead = CHS_values[i].nhead; 4350 un_g->dkg_nsect = CHS_values[i].nsect; 4351 } 4352 #endif 4353 4354 4355 /* 4356 * Function: sd_resync_geom_caches 4357 * 4358 * Description: (Re)initialize both geometry caches: the virtual geometry 4359 * information is extracted from the HBA (the "geometry" 4360 * capability), and the physical geometry cache data is 4361 * generated by issuing MODE SENSE commands. 4362 * 4363 * Arguments: un - driver soft state (unit) structure 4364 * capacity - disk capacity in #blocks 4365 * lbasize - disk block size in bytes 4366 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4367 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4368 * to use the USCSI "direct" chain and bypass the normal 4369 * command waitq. 4370 * 4371 * Context: Kernel thread only (can sleep). 4372 */ 4373 4374 static void 4375 sd_resync_geom_caches(struct sd_lun *un, int capacity, int lbasize, 4376 int path_flag) 4377 { 4378 struct geom_cache pgeom; 4379 struct geom_cache *pgeom_p = &pgeom; 4380 int spc; 4381 unsigned short nhead; 4382 unsigned short nsect; 4383 4384 ASSERT(un != NULL); 4385 ASSERT(mutex_owned(SD_MUTEX(un))); 4386 4387 /* 4388 * Ask the controller for its logical geometry. 4389 * Note: if the HBA does not support scsi_ifgetcap("geometry"), 4390 * then the lgeom cache will be invalid. 4391 */ 4392 sd_get_virtual_geometry(un, capacity, lbasize); 4393 4394 /* 4395 * Initialize the pgeom cache from lgeom, so that if MODE SENSE 4396 * doesn't work, DKIOCG_PHYSGEOM can return reasonable values. 4397 */ 4398 if (un->un_lgeom.g_nsect == 0 || un->un_lgeom.g_nhead == 0) { 4399 /* 4400 * Note: Perhaps this needs to be more adaptive? The rationale 4401 * is that, if there's no HBA geometry from the HBA driver, any 4402 * guess is good, since this is the physical geometry. If MODE 4403 * SENSE fails this gives a max cylinder size for non-LBA access 4404 */ 4405 nhead = 255; 4406 nsect = 63; 4407 } else { 4408 nhead = un->un_lgeom.g_nhead; 4409 nsect = un->un_lgeom.g_nsect; 4410 } 4411 4412 if (ISCD(un)) { 4413 pgeom_p->g_nhead = 1; 4414 pgeom_p->g_nsect = nsect * nhead; 4415 } else { 4416 pgeom_p->g_nhead = nhead; 4417 pgeom_p->g_nsect = nsect; 4418 } 4419 4420 spc = pgeom_p->g_nhead * pgeom_p->g_nsect; 4421 pgeom_p->g_capacity = capacity; 4422 pgeom_p->g_ncyl = pgeom_p->g_capacity / spc; 4423 pgeom_p->g_acyl = 0; 4424 4425 /* 4426 * Retrieve fresh geometry data from the hardware, stash it 4427 * here temporarily before we rebuild the incore label. 4428 * 4429 * We want to use the MODE SENSE commands to derive the 4430 * physical geometry of the device, but if either command 4431 * fails, the logical geometry is used as the fallback for 4432 * disk label geometry. 4433 */ 4434 mutex_exit(SD_MUTEX(un)); 4435 sd_get_physical_geometry(un, pgeom_p, capacity, lbasize, path_flag); 4436 mutex_enter(SD_MUTEX(un)); 4437 4438 /* 4439 * Now update the real copy while holding the mutex. This 4440 * way the global copy is never in an inconsistent state. 4441 */ 4442 bcopy(pgeom_p, &un->un_pgeom, sizeof (un->un_pgeom)); 4443 4444 SD_INFO(SD_LOG_COMMON, un, "sd_resync_geom_caches: " 4445 "(cached from lgeom)\n"); 4446 SD_INFO(SD_LOG_COMMON, un, 4447 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4448 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4449 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4450 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 4451 "intrlv: %d; rpm: %d\n", un->un_pgeom.g_secsize, 4452 un->un_pgeom.g_capacity, un->un_pgeom.g_intrlv, 4453 un->un_pgeom.g_rpm); 4454 } 4455 4456 4457 /* 4458 * Function: sd_read_fdisk 4459 * 4460 * Description: utility routine to read the fdisk table. 4461 * 4462 * Arguments: un - driver soft state (unit) structure 4463 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4464 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4465 * to use the USCSI "direct" chain and bypass the normal 4466 * command waitq. 4467 * 4468 * Return Code: SD_CMD_SUCCESS 4469 * SD_CMD_FAILURE 4470 * 4471 * Context: Kernel thread only (can sleep). 4472 */ 4473 /* ARGSUSED */ 4474 static int 4475 sd_read_fdisk(struct sd_lun *un, uint_t capacity, int lbasize, int path_flag) 4476 { 4477 #if defined(_NO_FDISK_PRESENT) 4478 4479 un->un_solaris_offset = 0; 4480 un->un_solaris_size = capacity; 4481 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4482 return (SD_CMD_SUCCESS); 4483 4484 #elif defined(_FIRMWARE_NEEDS_FDISK) 4485 4486 struct ipart *fdp; 4487 struct mboot *mbp; 4488 struct ipart fdisk[FD_NUMPART]; 4489 int i; 4490 char sigbuf[2]; 4491 caddr_t bufp; 4492 int uidx; 4493 int rval; 4494 int lba = 0; 4495 uint_t solaris_offset; /* offset to solaris part. */ 4496 daddr_t solaris_size; /* size of solaris partition */ 4497 uint32_t blocksize; 4498 4499 ASSERT(un != NULL); 4500 ASSERT(mutex_owned(SD_MUTEX(un))); 4501 ASSERT(un->un_f_tgt_blocksize_is_valid == TRUE); 4502 4503 blocksize = un->un_tgt_blocksize; 4504 4505 /* 4506 * Start off assuming no fdisk table 4507 */ 4508 solaris_offset = 0; 4509 solaris_size = capacity; 4510 4511 mutex_exit(SD_MUTEX(un)); 4512 bufp = kmem_zalloc(blocksize, KM_SLEEP); 4513 rval = sd_send_scsi_READ(un, bufp, blocksize, 0, path_flag); 4514 mutex_enter(SD_MUTEX(un)); 4515 4516 if (rval != 0) { 4517 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4518 "sd_read_fdisk: fdisk read err\n"); 4519 kmem_free(bufp, blocksize); 4520 return (SD_CMD_FAILURE); 4521 } 4522 4523 mbp = (struct mboot *)bufp; 4524 4525 /* 4526 * The fdisk table does not begin on a 4-byte boundary within the 4527 * master boot record, so we copy it to an aligned structure to avoid 4528 * alignment exceptions on some processors. 4529 */ 4530 bcopy(&mbp->parts[0], fdisk, sizeof (fdisk)); 4531 4532 /* 4533 * Check for lba support before verifying sig; sig might not be 4534 * there, say on a blank disk, but the max_chs mark may still 4535 * be present. 4536 * 4537 * Note: LBA support and BEFs are an x86-only concept but this 4538 * code should work OK on SPARC as well. 4539 */ 4540 4541 /* 4542 * First, check for lba-access-ok on root node (or prom root node) 4543 * if present there, don't need to search fdisk table. 4544 */ 4545 if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0, 4546 "lba-access-ok", 0) != 0) { 4547 /* All drives do LBA; don't search fdisk table */ 4548 lba = 1; 4549 } else { 4550 /* Okay, look for mark in fdisk table */ 4551 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4552 /* accumulate "lba" value from all partitions */ 4553 lba = (lba || sd_has_max_chs_vals(fdp)); 4554 } 4555 } 4556 4557 /* 4558 * Next, look for 'no-bef-lba-access' prop on parent. 4559 * Its presence means the realmode driver doesn't support 4560 * LBA, so the target driver shouldn't advertise it as ok. 4561 * This should be a temporary condition; one day all 4562 * BEFs should support the LBA access functions. 4563 */ 4564 if ((lba != 0) && (ddi_getprop(DDI_DEV_T_ANY, 4565 ddi_get_parent(SD_DEVINFO(un)), DDI_PROP_DONTPASS, 4566 "no-bef-lba-access", 0) != 0)) { 4567 /* BEF doesn't support LBA; don't advertise it as ok */ 4568 lba = 0; 4569 } 4570 4571 if (lba != 0) { 4572 dev_t dev = sd_make_device(SD_DEVINFO(un)); 4573 4574 if (ddi_getprop(dev, SD_DEVINFO(un), DDI_PROP_DONTPASS, 4575 "lba-access-ok", 0) == 0) { 4576 /* not found; create it */ 4577 if (ddi_prop_create(dev, SD_DEVINFO(un), 0, 4578 "lba-access-ok", (caddr_t)NULL, 0) != 4579 DDI_PROP_SUCCESS) { 4580 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4581 "sd_read_fdisk: Can't create lba property " 4582 "for instance %d\n", 4583 ddi_get_instance(SD_DEVINFO(un))); 4584 } 4585 } 4586 } 4587 4588 bcopy(&mbp->signature, sigbuf, sizeof (sigbuf)); 4589 4590 /* 4591 * Endian-independent signature check 4592 */ 4593 if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) || 4594 (sigbuf[0] != (MBB_MAGIC & 0xFF))) { 4595 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 4596 "sd_read_fdisk: no fdisk\n"); 4597 bzero(un->un_fmap, sizeof (struct fmap) * FD_NUMPART); 4598 rval = SD_CMD_SUCCESS; 4599 goto done; 4600 } 4601 4602 #ifdef SDDEBUG 4603 if (sd_level_mask & SD_LOGMASK_INFO) { 4604 fdp = fdisk; 4605 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_read_fdisk:\n"); 4606 SD_INFO(SD_LOG_ATTACH_DETACH, un, " relsect " 4607 "numsect sysid bootid\n"); 4608 for (i = 0; i < FD_NUMPART; i++, fdp++) { 4609 SD_INFO(SD_LOG_ATTACH_DETACH, un, 4610 " %d: %8d %8d 0x%08x 0x%08x\n", 4611 i, fdp->relsect, fdp->numsect, 4612 fdp->systid, fdp->bootid); 4613 } 4614 } 4615 #endif 4616 4617 /* 4618 * Try to find the unix partition 4619 */ 4620 uidx = -1; 4621 solaris_offset = 0; 4622 solaris_size = 0; 4623 4624 for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) { 4625 int relsect; 4626 int numsect; 4627 4628 if (fdp->numsect == 0) { 4629 un->un_fmap[i].fmap_start = 0; 4630 un->un_fmap[i].fmap_nblk = 0; 4631 continue; 4632 } 4633 4634 /* 4635 * Data in the fdisk table is little-endian. 4636 */ 4637 relsect = LE_32(fdp->relsect); 4638 numsect = LE_32(fdp->numsect); 4639 4640 un->un_fmap[i].fmap_start = relsect; 4641 un->un_fmap[i].fmap_nblk = numsect; 4642 4643 if (fdp->systid != SUNIXOS && 4644 fdp->systid != SUNIXOS2 && 4645 fdp->systid != EFI_PMBR) { 4646 continue; 4647 } 4648 4649 /* 4650 * use the last active solaris partition id found 4651 * (there should only be 1 active partition id) 4652 * 4653 * if there are no active solaris partition id 4654 * then use the first inactive solaris partition id 4655 */ 4656 if ((uidx == -1) || (fdp->bootid == ACTIVE)) { 4657 uidx = i; 4658 solaris_offset = relsect; 4659 solaris_size = numsect; 4660 } 4661 } 4662 4663 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk 0x%x 0x%lx", 4664 un->un_solaris_offset, un->un_solaris_size); 4665 4666 rval = SD_CMD_SUCCESS; 4667 4668 done: 4669 4670 /* 4671 * Clear the VTOC info, only if the Solaris partition entry 4672 * has moved, changed size, been deleted, or if the size of 4673 * the partition is too small to even fit the label sector. 4674 */ 4675 if ((un->un_solaris_offset != solaris_offset) || 4676 (un->un_solaris_size != solaris_size) || 4677 solaris_size <= DK_LABEL_LOC) { 4678 SD_INFO(SD_LOG_ATTACH_DETACH, un, "fdisk moved 0x%x 0x%lx", 4679 solaris_offset, solaris_size); 4680 bzero(&un->un_g, sizeof (struct dk_geom)); 4681 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 4682 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 4683 un->un_f_geometry_is_valid = FALSE; 4684 } 4685 un->un_solaris_offset = solaris_offset; 4686 un->un_solaris_size = solaris_size; 4687 kmem_free(bufp, blocksize); 4688 return (rval); 4689 4690 #else /* #elif defined(_FIRMWARE_NEEDS_FDISK) */ 4691 #error "fdisk table presence undetermined for this platform." 4692 #endif /* #if defined(_NO_FDISK_PRESENT) */ 4693 } 4694 4695 4696 /* 4697 * Function: sd_get_physical_geometry 4698 * 4699 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and 4700 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the 4701 * target, and use this information to initialize the physical 4702 * geometry cache specified by pgeom_p. 4703 * 4704 * MODE SENSE is an optional command, so failure in this case 4705 * does not necessarily denote an error. We want to use the 4706 * MODE SENSE commands to derive the physical geometry of the 4707 * device, but if either command fails, the logical geometry is 4708 * used as the fallback for disk label geometry. 4709 * 4710 * This requires that un->un_blockcount and un->un_tgt_blocksize 4711 * have already been initialized for the current target and 4712 * that the current values be passed as args so that we don't 4713 * end up ever trying to use -1 as a valid value. This could 4714 * happen if either value is reset while we're not holding 4715 * the mutex. 4716 * 4717 * Arguments: un - driver soft state (unit) structure 4718 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 4719 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 4720 * to use the USCSI "direct" chain and bypass the normal 4721 * command waitq. 4722 * 4723 * Context: Kernel thread only (can sleep). 4724 */ 4725 4726 static void 4727 sd_get_physical_geometry(struct sd_lun *un, struct geom_cache *pgeom_p, 4728 int capacity, int lbasize, int path_flag) 4729 { 4730 struct mode_format *page3p; 4731 struct mode_geometry *page4p; 4732 struct mode_header *headerp; 4733 int sector_size; 4734 int nsect; 4735 int nhead; 4736 int ncyl; 4737 int intrlv; 4738 int spc; 4739 int modesense_capacity; 4740 int rpm; 4741 int bd_len; 4742 int mode_header_length; 4743 uchar_t *p3bufp; 4744 uchar_t *p4bufp; 4745 int cdbsize; 4746 4747 ASSERT(un != NULL); 4748 ASSERT(!(mutex_owned(SD_MUTEX(un)))); 4749 4750 if (un->un_f_blockcount_is_valid != TRUE) { 4751 return; 4752 } 4753 4754 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 4755 return; 4756 } 4757 4758 if (lbasize == 0) { 4759 if (ISCD(un)) { 4760 lbasize = 2048; 4761 } else { 4762 lbasize = un->un_sys_blocksize; 4763 } 4764 } 4765 pgeom_p->g_secsize = (unsigned short)lbasize; 4766 4767 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0; 4768 4769 /* 4770 * Retrieve MODE SENSE page 3 - Format Device Page 4771 */ 4772 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP); 4773 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p3bufp, 4774 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag) 4775 != 0) { 4776 SD_ERROR(SD_LOG_COMMON, un, 4777 "sd_get_physical_geometry: mode sense page 3 failed\n"); 4778 goto page3_exit; 4779 } 4780 4781 /* 4782 * Determine size of Block Descriptors in order to locate the mode 4783 * page data. ATAPI devices return 0, SCSI devices should return 4784 * MODE_BLK_DESC_LENGTH. 4785 */ 4786 headerp = (struct mode_header *)p3bufp; 4787 if (un->un_f_cfg_is_atapi == TRUE) { 4788 struct mode_header_grp2 *mhp = 4789 (struct mode_header_grp2 *)headerp; 4790 mode_header_length = MODE_HEADER_LENGTH_GRP2; 4791 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4792 } else { 4793 mode_header_length = MODE_HEADER_LENGTH; 4794 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4795 } 4796 4797 if (bd_len > MODE_BLK_DESC_LENGTH) { 4798 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4799 "received unexpected bd_len of %d, page3\n", bd_len); 4800 goto page3_exit; 4801 } 4802 4803 page3p = (struct mode_format *) 4804 ((caddr_t)headerp + mode_header_length + bd_len); 4805 4806 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) { 4807 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4808 "mode sense pg3 code mismatch %d\n", 4809 page3p->mode_page.code); 4810 goto page3_exit; 4811 } 4812 4813 /* 4814 * Use this physical geometry data only if BOTH MODE SENSE commands 4815 * complete successfully; otherwise, revert to the logical geometry. 4816 * So, we need to save everything in temporary variables. 4817 */ 4818 sector_size = BE_16(page3p->data_bytes_sect); 4819 4820 /* 4821 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size 4822 */ 4823 if (sector_size == 0) { 4824 sector_size = (ISCD(un)) ? 2048 : un->un_sys_blocksize; 4825 } else { 4826 sector_size &= ~(un->un_sys_blocksize - 1); 4827 } 4828 4829 nsect = BE_16(page3p->sect_track); 4830 intrlv = BE_16(page3p->interleave); 4831 4832 SD_INFO(SD_LOG_COMMON, un, 4833 "sd_get_physical_geometry: Format Parameters (page 3)\n"); 4834 SD_INFO(SD_LOG_COMMON, un, 4835 " mode page: %d; nsect: %d; sector size: %d;\n", 4836 page3p->mode_page.code, nsect, sector_size); 4837 SD_INFO(SD_LOG_COMMON, un, 4838 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv, 4839 BE_16(page3p->track_skew), 4840 BE_16(page3p->cylinder_skew)); 4841 4842 4843 /* 4844 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page 4845 */ 4846 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP); 4847 if (sd_send_scsi_MODE_SENSE(un, cdbsize, p4bufp, 4848 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag) 4849 != 0) { 4850 SD_ERROR(SD_LOG_COMMON, un, 4851 "sd_get_physical_geometry: mode sense page 4 failed\n"); 4852 goto page4_exit; 4853 } 4854 4855 /* 4856 * Determine size of Block Descriptors in order to locate the mode 4857 * page data. ATAPI devices return 0, SCSI devices should return 4858 * MODE_BLK_DESC_LENGTH. 4859 */ 4860 headerp = (struct mode_header *)p4bufp; 4861 if (un->un_f_cfg_is_atapi == TRUE) { 4862 struct mode_header_grp2 *mhp = 4863 (struct mode_header_grp2 *)headerp; 4864 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 4865 } else { 4866 bd_len = ((struct mode_header *)headerp)->bdesc_length; 4867 } 4868 4869 if (bd_len > MODE_BLK_DESC_LENGTH) { 4870 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4871 "received unexpected bd_len of %d, page4\n", bd_len); 4872 goto page4_exit; 4873 } 4874 4875 page4p = (struct mode_geometry *) 4876 ((caddr_t)headerp + mode_header_length + bd_len); 4877 4878 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) { 4879 SD_ERROR(SD_LOG_COMMON, un, "sd_get_physical_geometry: " 4880 "mode sense pg4 code mismatch %d\n", 4881 page4p->mode_page.code); 4882 goto page4_exit; 4883 } 4884 4885 /* 4886 * Stash the data now, after we know that both commands completed. 4887 */ 4888 4889 mutex_enter(SD_MUTEX(un)); 4890 4891 nhead = (int)page4p->heads; /* uchar, so no conversion needed */ 4892 spc = nhead * nsect; 4893 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb; 4894 rpm = BE_16(page4p->rpm); 4895 4896 modesense_capacity = spc * ncyl; 4897 4898 SD_INFO(SD_LOG_COMMON, un, 4899 "sd_get_physical_geometry: Geometry Parameters (page 4)\n"); 4900 SD_INFO(SD_LOG_COMMON, un, 4901 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm); 4902 SD_INFO(SD_LOG_COMMON, un, 4903 " computed capacity(h*s*c): %d;\n", modesense_capacity); 4904 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n", 4905 (void *)pgeom_p, capacity); 4906 4907 /* 4908 * Compensate if the drive's geometry is not rectangular, i.e., 4909 * the product of C * H * S returned by MODE SENSE >= that returned 4910 * by read capacity. This is an idiosyncrasy of the original x86 4911 * disk subsystem. 4912 */ 4913 if (modesense_capacity >= capacity) { 4914 SD_INFO(SD_LOG_COMMON, un, 4915 "sd_get_physical_geometry: adjusting acyl; " 4916 "old: %d; new: %d\n", pgeom_p->g_acyl, 4917 (modesense_capacity - capacity + spc - 1) / spc); 4918 if (sector_size != 0) { 4919 /* 1243403: NEC D38x7 drives don't support sec size */ 4920 pgeom_p->g_secsize = (unsigned short)sector_size; 4921 } 4922 pgeom_p->g_nsect = (unsigned short)nsect; 4923 pgeom_p->g_nhead = (unsigned short)nhead; 4924 pgeom_p->g_capacity = capacity; 4925 pgeom_p->g_acyl = 4926 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc; 4927 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl; 4928 } 4929 4930 pgeom_p->g_rpm = (unsigned short)rpm; 4931 pgeom_p->g_intrlv = (unsigned short)intrlv; 4932 4933 SD_INFO(SD_LOG_COMMON, un, 4934 "sd_get_physical_geometry: mode sense geometry:\n"); 4935 SD_INFO(SD_LOG_COMMON, un, 4936 " nsect: %d; sector size: %d; interlv: %d\n", 4937 nsect, sector_size, intrlv); 4938 SD_INFO(SD_LOG_COMMON, un, 4939 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n", 4940 nhead, ncyl, rpm, modesense_capacity); 4941 SD_INFO(SD_LOG_COMMON, un, 4942 "sd_get_physical_geometry: (cached)\n"); 4943 SD_INFO(SD_LOG_COMMON, un, 4944 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 4945 un->un_pgeom.g_ncyl, un->un_pgeom.g_acyl, 4946 un->un_pgeom.g_nhead, un->un_pgeom.g_nsect); 4947 SD_INFO(SD_LOG_COMMON, un, 4948 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n", 4949 un->un_pgeom.g_secsize, un->un_pgeom.g_capacity, 4950 un->un_pgeom.g_intrlv, un->un_pgeom.g_rpm); 4951 4952 mutex_exit(SD_MUTEX(un)); 4953 4954 page4_exit: 4955 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH); 4956 page3_exit: 4957 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH); 4958 } 4959 4960 4961 /* 4962 * Function: sd_get_virtual_geometry 4963 * 4964 * Description: Ask the controller to tell us about the target device. 4965 * 4966 * Arguments: un - pointer to softstate 4967 * capacity - disk capacity in #blocks 4968 * lbasize - disk block size in bytes 4969 * 4970 * Context: Kernel thread only 4971 */ 4972 4973 static void 4974 sd_get_virtual_geometry(struct sd_lun *un, int capacity, int lbasize) 4975 { 4976 struct geom_cache *lgeom_p = &un->un_lgeom; 4977 uint_t geombuf; 4978 int spc; 4979 4980 ASSERT(un != NULL); 4981 ASSERT(mutex_owned(SD_MUTEX(un))); 4982 4983 mutex_exit(SD_MUTEX(un)); 4984 4985 /* Set sector size, and total number of sectors */ 4986 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1); 4987 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1); 4988 4989 /* Let the HBA tell us its geometry */ 4990 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1); 4991 4992 mutex_enter(SD_MUTEX(un)); 4993 4994 /* A value of -1 indicates an undefined "geometry" property */ 4995 if (geombuf == (-1)) { 4996 return; 4997 } 4998 4999 /* Initialize the logical geometry cache. */ 5000 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff; 5001 lgeom_p->g_nsect = geombuf & 0xffff; 5002 lgeom_p->g_secsize = un->un_sys_blocksize; 5003 5004 spc = lgeom_p->g_nhead * lgeom_p->g_nsect; 5005 5006 /* 5007 * Note: The driver originally converted the capacity value from 5008 * target blocks to system blocks. However, the capacity value passed 5009 * to this routine is already in terms of system blocks (this scaling 5010 * is done when the READ CAPACITY command is issued and processed). 5011 * This 'error' may have gone undetected because the usage of g_ncyl 5012 * (which is based upon g_capacity) is very limited within the driver 5013 */ 5014 lgeom_p->g_capacity = capacity; 5015 5016 /* 5017 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The 5018 * hba may return zero values if the device has been removed. 5019 */ 5020 if (spc == 0) { 5021 lgeom_p->g_ncyl = 0; 5022 } else { 5023 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc; 5024 } 5025 lgeom_p->g_acyl = 0; 5026 5027 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n"); 5028 SD_INFO(SD_LOG_COMMON, un, 5029 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n", 5030 un->un_lgeom.g_ncyl, un->un_lgeom.g_acyl, 5031 un->un_lgeom.g_nhead, un->un_lgeom.g_nsect); 5032 SD_INFO(SD_LOG_COMMON, un, " lbasize: %d; capacity: %ld; " 5033 "intrlv: %d; rpm: %d\n", un->un_lgeom.g_secsize, 5034 un->un_lgeom.g_capacity, un->un_lgeom.g_intrlv, un->un_lgeom.g_rpm); 5035 } 5036 5037 5038 /* 5039 * Function: sd_update_block_info 5040 * 5041 * Description: Calculate a byte count to sector count bitshift value 5042 * from sector size. 5043 * 5044 * Arguments: un: unit struct. 5045 * lbasize: new target sector size 5046 * capacity: new target capacity, ie. block count 5047 * 5048 * Context: Kernel thread context 5049 */ 5050 5051 static void 5052 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity) 5053 { 5054 if (lbasize != 0) { 5055 un->un_tgt_blocksize = lbasize; 5056 un->un_f_tgt_blocksize_is_valid = TRUE; 5057 } 5058 5059 if (capacity != 0) { 5060 un->un_blockcount = capacity; 5061 un->un_f_blockcount_is_valid = TRUE; 5062 } 5063 } 5064 5065 5066 static void 5067 sd_swap_efi_gpt(efi_gpt_t *e) 5068 { 5069 _NOTE(ASSUMING_PROTECTED(*e)) 5070 e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature); 5071 e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision); 5072 e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize); 5073 e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32); 5074 e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA); 5075 e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA); 5076 e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA); 5077 e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA); 5078 UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID); 5079 e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA); 5080 e->efi_gpt_NumberOfPartitionEntries = 5081 LE_32(e->efi_gpt_NumberOfPartitionEntries); 5082 e->efi_gpt_SizeOfPartitionEntry = 5083 LE_32(e->efi_gpt_SizeOfPartitionEntry); 5084 e->efi_gpt_PartitionEntryArrayCRC32 = 5085 LE_32(e->efi_gpt_PartitionEntryArrayCRC32); 5086 } 5087 5088 static void 5089 sd_swap_efi_gpe(int nparts, efi_gpe_t *p) 5090 { 5091 int i; 5092 5093 _NOTE(ASSUMING_PROTECTED(*p)) 5094 for (i = 0; i < nparts; i++) { 5095 UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID, 5096 p[i].efi_gpe_PartitionTypeGUID); 5097 p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA); 5098 p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA); 5099 /* PartitionAttrs */ 5100 } 5101 } 5102 5103 static int 5104 sd_validate_efi(efi_gpt_t *labp) 5105 { 5106 if (labp->efi_gpt_Signature != EFI_SIGNATURE) 5107 return (EINVAL); 5108 /* at least 96 bytes in this version of the spec. */ 5109 if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) > 5110 labp->efi_gpt_HeaderSize) 5111 return (EINVAL); 5112 /* this should be 128 bytes */ 5113 if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t)) 5114 return (EINVAL); 5115 return (0); 5116 } 5117 5118 static int 5119 sd_use_efi(struct sd_lun *un, int path_flag) 5120 { 5121 int i; 5122 int rval = 0; 5123 efi_gpe_t *partitions; 5124 uchar_t *buf; 5125 uint_t lbasize; 5126 uint64_t cap; 5127 uint_t nparts; 5128 diskaddr_t gpe_lba; 5129 5130 ASSERT(mutex_owned(SD_MUTEX(un))); 5131 lbasize = un->un_tgt_blocksize; 5132 5133 mutex_exit(SD_MUTEX(un)); 5134 5135 buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 5136 5137 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 5138 rval = EINVAL; 5139 goto done_err; 5140 } 5141 5142 rval = sd_send_scsi_READ(un, buf, lbasize, 0, path_flag); 5143 if (rval) { 5144 goto done_err; 5145 } 5146 if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) { 5147 /* not ours */ 5148 rval = ESRCH; 5149 goto done_err; 5150 } 5151 5152 rval = sd_send_scsi_READ(un, buf, lbasize, 1, path_flag); 5153 if (rval) { 5154 goto done_err; 5155 } 5156 sd_swap_efi_gpt((efi_gpt_t *)buf); 5157 5158 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) { 5159 /* 5160 * Couldn't read the primary, try the backup. Our 5161 * capacity at this point could be based on CHS, so 5162 * check what the device reports. 5163 */ 5164 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 5165 path_flag); 5166 if (rval) { 5167 goto done_err; 5168 } 5169 if ((rval = sd_send_scsi_READ(un, buf, lbasize, 5170 cap - 1, path_flag)) != 0) { 5171 goto done_err; 5172 } 5173 sd_swap_efi_gpt((efi_gpt_t *)buf); 5174 if ((rval = sd_validate_efi((efi_gpt_t *)buf)) != 0) 5175 goto done_err; 5176 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5177 "primary label corrupt; using backup\n"); 5178 } 5179 5180 nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries; 5181 gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA; 5182 5183 rval = sd_send_scsi_READ(un, buf, EFI_MIN_ARRAY_SIZE, gpe_lba, 5184 path_flag); 5185 if (rval) { 5186 goto done_err; 5187 } 5188 partitions = (efi_gpe_t *)buf; 5189 5190 if (nparts > MAXPART) { 5191 nparts = MAXPART; 5192 } 5193 sd_swap_efi_gpe(nparts, partitions); 5194 5195 mutex_enter(SD_MUTEX(un)); 5196 5197 /* Fill in partition table. */ 5198 for (i = 0; i < nparts; i++) { 5199 if (partitions->efi_gpe_StartingLBA != 0 || 5200 partitions->efi_gpe_EndingLBA != 0) { 5201 un->un_map[i].dkl_cylno = 5202 partitions->efi_gpe_StartingLBA; 5203 un->un_map[i].dkl_nblk = 5204 partitions->efi_gpe_EndingLBA - 5205 partitions->efi_gpe_StartingLBA + 1; 5206 un->un_offset[i] = 5207 partitions->efi_gpe_StartingLBA; 5208 } 5209 if (i == WD_NODE) { 5210 /* 5211 * minor number 7 corresponds to the whole disk 5212 */ 5213 un->un_map[i].dkl_cylno = 0; 5214 un->un_map[i].dkl_nblk = un->un_blockcount; 5215 un->un_offset[i] = 0; 5216 } 5217 partitions++; 5218 } 5219 un->un_solaris_offset = 0; 5220 un->un_solaris_size = cap; 5221 un->un_f_geometry_is_valid = TRUE; 5222 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5223 return (0); 5224 5225 done_err: 5226 kmem_free(buf, EFI_MIN_ARRAY_SIZE); 5227 mutex_enter(SD_MUTEX(un)); 5228 /* 5229 * if we didn't find something that could look like a VTOC 5230 * and the disk is over 1TB, we know there isn't a valid label. 5231 * Otherwise let sd_uselabel decide what to do. We only 5232 * want to invalidate this if we're certain the label isn't 5233 * valid because sd_prop_op will now fail, which in turn 5234 * causes things like opens and stats on the partition to fail. 5235 */ 5236 if ((un->un_blockcount > DK_MAX_BLOCKS) && (rval != ESRCH)) { 5237 un->un_f_geometry_is_valid = FALSE; 5238 } 5239 return (rval); 5240 } 5241 5242 5243 /* 5244 * Function: sd_uselabel 5245 * 5246 * Description: Validate the disk label and update the relevant data (geometry, 5247 * partition, vtoc, and capacity data) in the sd_lun struct. 5248 * Marks the geometry of the unit as being valid. 5249 * 5250 * Arguments: un: unit struct. 5251 * dk_label: disk label 5252 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 5253 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 5254 * to use the USCSI "direct" chain and bypass the normal 5255 * command waitq. 5256 * 5257 * Return Code: SD_LABEL_IS_VALID: Label read from disk is OK; geometry, 5258 * partition, vtoc, and capacity data are good. 5259 * 5260 * SD_LABEL_IS_INVALID: Magic number or checksum error in the 5261 * label; or computed capacity does not jibe with capacity 5262 * reported from the READ CAPACITY command. 5263 * 5264 * Context: Kernel thread only (can sleep). 5265 */ 5266 5267 static int 5268 sd_uselabel(struct sd_lun *un, struct dk_label *labp, int path_flag) 5269 { 5270 short *sp; 5271 short sum; 5272 short count; 5273 int label_error = SD_LABEL_IS_VALID; 5274 int i; 5275 int capacity; 5276 int part_end; 5277 int track_capacity; 5278 int err; 5279 #if defined(_SUNOS_VTOC_16) 5280 struct dkl_partition *vpartp; 5281 #endif 5282 ASSERT(un != NULL); 5283 ASSERT(mutex_owned(SD_MUTEX(un))); 5284 5285 /* Validate the magic number of the label. */ 5286 if (labp->dkl_magic != DKL_MAGIC) { 5287 #if defined(__sparc) 5288 if ((un->un_state == SD_STATE_NORMAL) && 5289 !ISREMOVABLE(un)) { 5290 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5291 "Corrupt label; wrong magic number\n"); 5292 } 5293 #endif 5294 return (SD_LABEL_IS_INVALID); 5295 } 5296 5297 /* Validate the checksum of the label. */ 5298 sp = (short *)labp; 5299 sum = 0; 5300 count = sizeof (struct dk_label) / sizeof (short); 5301 while (count--) { 5302 sum ^= *sp++; 5303 } 5304 5305 if (sum != 0) { 5306 #if defined(_SUNOS_VTOC_16) 5307 if (un->un_state == SD_STATE_NORMAL && !ISCD(un)) { 5308 #elif defined(_SUNOS_VTOC_8) 5309 if (un->un_state == SD_STATE_NORMAL && !ISREMOVABLE(un)) { 5310 #endif 5311 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5312 "Corrupt label - label checksum failed\n"); 5313 } 5314 return (SD_LABEL_IS_INVALID); 5315 } 5316 5317 5318 /* 5319 * Fill in geometry structure with data from label. 5320 */ 5321 bzero(&un->un_g, sizeof (struct dk_geom)); 5322 un->un_g.dkg_ncyl = labp->dkl_ncyl; 5323 un->un_g.dkg_acyl = labp->dkl_acyl; 5324 un->un_g.dkg_bcyl = 0; 5325 un->un_g.dkg_nhead = labp->dkl_nhead; 5326 un->un_g.dkg_nsect = labp->dkl_nsect; 5327 un->un_g.dkg_intrlv = labp->dkl_intrlv; 5328 5329 #if defined(_SUNOS_VTOC_8) 5330 un->un_g.dkg_gap1 = labp->dkl_gap1; 5331 un->un_g.dkg_gap2 = labp->dkl_gap2; 5332 un->un_g.dkg_bhead = labp->dkl_bhead; 5333 #endif 5334 #if defined(_SUNOS_VTOC_16) 5335 un->un_dkg_skew = labp->dkl_skew; 5336 #endif 5337 5338 #if defined(__i386) || defined(__amd64) 5339 un->un_g.dkg_apc = labp->dkl_apc; 5340 #endif 5341 5342 /* 5343 * Currently we rely on the values in the label being accurate. If 5344 * dlk_rpm or dlk_pcly are zero in the label, use a default value. 5345 * 5346 * Note: In the future a MODE SENSE may be used to retrieve this data, 5347 * although this command is optional in SCSI-2. 5348 */ 5349 un->un_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600; 5350 un->un_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl : 5351 (un->un_g.dkg_ncyl + un->un_g.dkg_acyl); 5352 5353 /* 5354 * The Read and Write reinstruct values may not be valid 5355 * for older disks. 5356 */ 5357 un->un_g.dkg_read_reinstruct = labp->dkl_read_reinstruct; 5358 un->un_g.dkg_write_reinstruct = labp->dkl_write_reinstruct; 5359 5360 /* Fill in partition table. */ 5361 #if defined(_SUNOS_VTOC_8) 5362 for (i = 0; i < NDKMAP; i++) { 5363 un->un_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno; 5364 un->un_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk; 5365 } 5366 #endif 5367 #if defined(_SUNOS_VTOC_16) 5368 vpartp = labp->dkl_vtoc.v_part; 5369 track_capacity = labp->dkl_nhead * labp->dkl_nsect; 5370 5371 for (i = 0; i < NDKMAP; i++, vpartp++) { 5372 un->un_map[i].dkl_cylno = vpartp->p_start / track_capacity; 5373 un->un_map[i].dkl_nblk = vpartp->p_size; 5374 } 5375 #endif 5376 5377 /* Fill in VTOC Structure. */ 5378 bcopy(&labp->dkl_vtoc, &un->un_vtoc, sizeof (struct dk_vtoc)); 5379 #if defined(_SUNOS_VTOC_8) 5380 /* 5381 * The 8-slice vtoc does not include the ascii label; save it into 5382 * the device's soft state structure here. 5383 */ 5384 bcopy(labp->dkl_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 5385 #endif 5386 5387 /* Mark the geometry as valid. */ 5388 un->un_f_geometry_is_valid = TRUE; 5389 5390 /* Now look for a valid capacity. */ 5391 track_capacity = (un->un_g.dkg_nhead * un->un_g.dkg_nsect); 5392 capacity = (un->un_g.dkg_ncyl * track_capacity); 5393 5394 if (un->un_g.dkg_acyl) { 5395 #if defined(__i386) || defined(__amd64) 5396 /* we may have > 1 alts cylinder */ 5397 capacity += (track_capacity * un->un_g.dkg_acyl); 5398 #else 5399 capacity += track_capacity; 5400 #endif 5401 } 5402 5403 /* 5404 * At this point, un->un_blockcount should contain valid data from 5405 * the READ CAPACITY command. 5406 */ 5407 if (un->un_f_blockcount_is_valid != TRUE) { 5408 /* 5409 * We have a situation where the target didn't give us a good 5410 * READ CAPACITY value, yet there appears to be a valid label. 5411 * In this case, we'll fake the capacity. 5412 */ 5413 un->un_blockcount = capacity; 5414 un->un_f_blockcount_is_valid = TRUE; 5415 goto done; 5416 } 5417 5418 5419 if ((capacity <= un->un_blockcount) || 5420 (un->un_state != SD_STATE_NORMAL)) { 5421 #if defined(_SUNOS_VTOC_8) 5422 /* 5423 * We can't let this happen on drives that are subdivided 5424 * into logical disks (i.e., that have an fdisk table). 5425 * The un_blockcount field should always hold the full media 5426 * size in sectors, period. This code would overwrite 5427 * un_blockcount with the size of the Solaris fdisk partition. 5428 */ 5429 SD_ERROR(SD_LOG_COMMON, un, 5430 "sd_uselabel: Label %d blocks; Drive %d blocks\n", 5431 capacity, un->un_blockcount); 5432 un->un_blockcount = capacity; 5433 un->un_f_blockcount_is_valid = TRUE; 5434 #endif /* defined(_SUNOS_VTOC_8) */ 5435 goto done; 5436 } 5437 5438 if (ISCD(un)) { 5439 /* For CDROMs, we trust that the data in the label is OK. */ 5440 #if defined(_SUNOS_VTOC_8) 5441 for (i = 0; i < NDKMAP; i++) { 5442 part_end = labp->dkl_nhead * labp->dkl_nsect * 5443 labp->dkl_map[i].dkl_cylno + 5444 labp->dkl_map[i].dkl_nblk - 1; 5445 5446 if ((labp->dkl_map[i].dkl_nblk) && 5447 (part_end > un->un_blockcount)) { 5448 un->un_f_geometry_is_valid = FALSE; 5449 break; 5450 } 5451 } 5452 #endif 5453 #if defined(_SUNOS_VTOC_16) 5454 vpartp = &(labp->dkl_vtoc.v_part[0]); 5455 for (i = 0; i < NDKMAP; i++, vpartp++) { 5456 part_end = vpartp->p_start + vpartp->p_size; 5457 if ((vpartp->p_size > 0) && 5458 (part_end > un->un_blockcount)) { 5459 un->un_f_geometry_is_valid = FALSE; 5460 break; 5461 } 5462 } 5463 #endif 5464 } else { 5465 uint64_t t_capacity; 5466 uint32_t t_lbasize; 5467 5468 mutex_exit(SD_MUTEX(un)); 5469 err = sd_send_scsi_READ_CAPACITY(un, &t_capacity, &t_lbasize, 5470 path_flag); 5471 ASSERT(t_capacity <= DK_MAX_BLOCKS); 5472 mutex_enter(SD_MUTEX(un)); 5473 5474 if (err == 0) { 5475 sd_update_block_info(un, t_lbasize, t_capacity); 5476 } 5477 5478 if (capacity > un->un_blockcount) { 5479 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 5480 "Corrupt label - bad geometry\n"); 5481 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 5482 "Label says %u blocks; Drive says %llu blocks\n", 5483 capacity, (unsigned long long)un->un_blockcount); 5484 un->un_f_geometry_is_valid = FALSE; 5485 label_error = SD_LABEL_IS_INVALID; 5486 } 5487 } 5488 5489 done: 5490 5491 SD_INFO(SD_LOG_COMMON, un, "sd_uselabel: (label geometry)\n"); 5492 SD_INFO(SD_LOG_COMMON, un, 5493 " ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n", 5494 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5495 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5496 SD_INFO(SD_LOG_COMMON, un, 5497 " lbasize: %d; capacity: %d; intrlv: %d; rpm: %d\n", 5498 un->un_tgt_blocksize, un->un_blockcount, 5499 un->un_g.dkg_intrlv, un->un_g.dkg_rpm); 5500 SD_INFO(SD_LOG_COMMON, un, " wrt_reinstr: %d; rd_reinstr: %d\n", 5501 un->un_g.dkg_write_reinstruct, un->un_g.dkg_read_reinstruct); 5502 5503 ASSERT(mutex_owned(SD_MUTEX(un))); 5504 5505 return (label_error); 5506 } 5507 5508 5509 /* 5510 * Function: sd_build_default_label 5511 * 5512 * Description: Generate a default label for those devices that do not have 5513 * one, e.g., new media, removable cartridges, etc.. 5514 * 5515 * Context: Kernel thread only 5516 */ 5517 5518 static void 5519 sd_build_default_label(struct sd_lun *un) 5520 { 5521 #if defined(_SUNOS_VTOC_16) 5522 uint_t phys_spc; 5523 uint_t disksize; 5524 struct dk_geom un_g; 5525 #endif 5526 5527 ASSERT(un != NULL); 5528 ASSERT(mutex_owned(SD_MUTEX(un))); 5529 5530 #if defined(_SUNOS_VTOC_8) 5531 /* 5532 * Note: This is a legacy check for non-removable devices on VTOC_8 5533 * only. This may be a valid check for VTOC_16 as well. 5534 */ 5535 if (!ISREMOVABLE(un)) { 5536 return; 5537 } 5538 #endif 5539 5540 bzero(&un->un_g, sizeof (struct dk_geom)); 5541 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 5542 bzero(&un->un_map, NDKMAP * (sizeof (struct dk_map))); 5543 5544 #if defined(_SUNOS_VTOC_8) 5545 5546 /* 5547 * It's a REMOVABLE media, therefore no label (on sparc, anyway). 5548 * But it is still necessary to set up various geometry information, 5549 * and we are doing this here. 5550 */ 5551 5552 /* 5553 * For the rpm, we use the minimum for the disk. For the head, cyl, 5554 * and number of sector per track, if the capacity <= 1GB, head = 64, 5555 * sect = 32. else head = 255, sect 63 Note: the capacity should be 5556 * equal to C*H*S values. This will cause some truncation of size due 5557 * to round off errors. For CD-ROMs, this truncation can have adverse 5558 * side effects, so returning ncyl and nhead as 1. The nsect will 5559 * overflow for most of CD-ROMs as nsect is of type ushort. (4190569) 5560 */ 5561 if (ISCD(un)) { 5562 /* 5563 * Preserve the old behavior for non-writable 5564 * medias. Since dkg_nsect is a ushort, it 5565 * will lose bits as cdroms have more than 5566 * 65536 sectors. So if we recalculate 5567 * capacity, it will become much shorter. 5568 * But the dkg_* information is not 5569 * used for CDROMs so it is OK. But for 5570 * Writable CDs we need this information 5571 * to be valid (for newfs say). So we 5572 * make nsect and nhead > 1 that way 5573 * nsect can still stay within ushort limit 5574 * without losing any bits. 5575 */ 5576 if (un->un_f_mmc_writable_media == TRUE) { 5577 un->un_g.dkg_nhead = 64; 5578 un->un_g.dkg_nsect = 32; 5579 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5580 un->un_blockcount = un->un_g.dkg_ncyl * 5581 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5582 } else { 5583 un->un_g.dkg_ncyl = 1; 5584 un->un_g.dkg_nhead = 1; 5585 un->un_g.dkg_nsect = un->un_blockcount; 5586 } 5587 } else { 5588 if (un->un_blockcount <= 0x1000) { 5589 /* unlabeled SCSI floppy device */ 5590 un->un_g.dkg_nhead = 2; 5591 un->un_g.dkg_ncyl = 80; 5592 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 5593 } else if (un->un_blockcount <= 0x200000) { 5594 un->un_g.dkg_nhead = 64; 5595 un->un_g.dkg_nsect = 32; 5596 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 5597 } else { 5598 un->un_g.dkg_nhead = 255; 5599 un->un_g.dkg_nsect = 63; 5600 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 5601 } 5602 un->un_blockcount = 5603 un->un_g.dkg_ncyl * un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5604 } 5605 5606 un->un_g.dkg_acyl = 0; 5607 un->un_g.dkg_bcyl = 0; 5608 un->un_g.dkg_rpm = 200; 5609 un->un_asciilabel[0] = '\0'; 5610 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl; 5611 5612 un->un_map[0].dkl_cylno = 0; 5613 un->un_map[0].dkl_nblk = un->un_blockcount; 5614 un->un_map[2].dkl_cylno = 0; 5615 un->un_map[2].dkl_nblk = un->un_blockcount; 5616 5617 #elif defined(_SUNOS_VTOC_16) 5618 5619 if (un->un_solaris_size == 0) { 5620 /* 5621 * Got fdisk table but no solaris entry therefore 5622 * don't create a default label 5623 */ 5624 un->un_f_geometry_is_valid = TRUE; 5625 return; 5626 } 5627 5628 /* 5629 * For CDs we continue to use the physical geometry to calculate 5630 * number of cylinders. All other devices must convert the 5631 * physical geometry (geom_cache) to values that will fit 5632 * in a dk_geom structure. 5633 */ 5634 if (ISCD(un)) { 5635 phys_spc = un->un_pgeom.g_nhead * un->un_pgeom.g_nsect; 5636 } else { 5637 /* Convert physical geometry to disk geometry */ 5638 bzero(&un_g, sizeof (struct dk_geom)); 5639 sd_convert_geometry(un->un_blockcount, &un_g); 5640 bcopy(&un_g, &un->un_g, sizeof (un->un_g)); 5641 phys_spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5642 } 5643 5644 un->un_g.dkg_pcyl = un->un_solaris_size / phys_spc; 5645 un->un_g.dkg_acyl = DK_ACYL; 5646 un->un_g.dkg_ncyl = un->un_g.dkg_pcyl - DK_ACYL; 5647 disksize = un->un_g.dkg_ncyl * phys_spc; 5648 5649 if (ISCD(un)) { 5650 /* 5651 * CD's don't use the "heads * sectors * cyls"-type of 5652 * geometry, but instead use the entire capacity of the media. 5653 */ 5654 disksize = un->un_solaris_size; 5655 un->un_g.dkg_nhead = 1; 5656 un->un_g.dkg_nsect = 1; 5657 un->un_g.dkg_rpm = 5658 (un->un_pgeom.g_rpm == 0) ? 200 : un->un_pgeom.g_rpm; 5659 5660 un->un_vtoc.v_part[0].p_start = 0; 5661 un->un_vtoc.v_part[0].p_size = disksize; 5662 un->un_vtoc.v_part[0].p_tag = V_BACKUP; 5663 un->un_vtoc.v_part[0].p_flag = V_UNMNT; 5664 5665 un->un_map[0].dkl_cylno = 0; 5666 un->un_map[0].dkl_nblk = disksize; 5667 un->un_offset[0] = 0; 5668 5669 } else { 5670 /* 5671 * Hard disks and removable media cartridges 5672 */ 5673 un->un_g.dkg_rpm = 5674 (un->un_pgeom.g_rpm == 0) ? 3600: un->un_pgeom.g_rpm; 5675 un->un_vtoc.v_sectorsz = un->un_sys_blocksize; 5676 5677 /* Add boot slice */ 5678 un->un_vtoc.v_part[8].p_start = 0; 5679 un->un_vtoc.v_part[8].p_size = phys_spc; 5680 un->un_vtoc.v_part[8].p_tag = V_BOOT; 5681 un->un_vtoc.v_part[8].p_flag = V_UNMNT; 5682 5683 un->un_map[8].dkl_cylno = 0; 5684 un->un_map[8].dkl_nblk = phys_spc; 5685 un->un_offset[8] = 0; 5686 } 5687 5688 un->un_g.dkg_apc = 0; 5689 un->un_vtoc.v_nparts = V_NUMPAR; 5690 un->un_vtoc.v_version = V_VERSION; 5691 5692 /* Add backup slice */ 5693 un->un_vtoc.v_part[2].p_start = 0; 5694 un->un_vtoc.v_part[2].p_size = disksize; 5695 un->un_vtoc.v_part[2].p_tag = V_BACKUP; 5696 un->un_vtoc.v_part[2].p_flag = V_UNMNT; 5697 5698 un->un_map[2].dkl_cylno = 0; 5699 un->un_map[2].dkl_nblk = disksize; 5700 un->un_offset[2] = 0; 5701 5702 (void) sprintf(un->un_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d" 5703 " hd %d sec %d", un->un_g.dkg_ncyl, un->un_g.dkg_acyl, 5704 un->un_g.dkg_nhead, un->un_g.dkg_nsect); 5705 5706 #else 5707 #error "No VTOC format defined." 5708 #endif 5709 5710 un->un_g.dkg_read_reinstruct = 0; 5711 un->un_g.dkg_write_reinstruct = 0; 5712 5713 un->un_g.dkg_intrlv = 1; 5714 5715 un->un_vtoc.v_sanity = VTOC_SANE; 5716 5717 un->un_f_geometry_is_valid = TRUE; 5718 5719 SD_INFO(SD_LOG_COMMON, un, 5720 "sd_build_default_label: Default label created: " 5721 "cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n", 5722 un->un_g.dkg_ncyl, un->un_g.dkg_acyl, un->un_g.dkg_nhead, 5723 un->un_g.dkg_nsect, un->un_blockcount); 5724 } 5725 5726 5727 #if defined(_FIRMWARE_NEEDS_FDISK) 5728 /* 5729 * Max CHS values, as they are encoded into bytes, for 1022/254/63 5730 */ 5731 #define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2)) 5732 #define LBA_MAX_CYL (1022 & 0xFF) 5733 #define LBA_MAX_HEAD (254) 5734 5735 5736 /* 5737 * Function: sd_has_max_chs_vals 5738 * 5739 * Description: Return TRUE if Cylinder-Head-Sector values are all at maximum. 5740 * 5741 * Arguments: fdp - ptr to CHS info 5742 * 5743 * Return Code: True or false 5744 * 5745 * Context: Any. 5746 */ 5747 5748 static int 5749 sd_has_max_chs_vals(struct ipart *fdp) 5750 { 5751 return ((fdp->begcyl == LBA_MAX_CYL) && 5752 (fdp->beghead == LBA_MAX_HEAD) && 5753 (fdp->begsect == LBA_MAX_SECT) && 5754 (fdp->endcyl == LBA_MAX_CYL) && 5755 (fdp->endhead == LBA_MAX_HEAD) && 5756 (fdp->endsect == LBA_MAX_SECT)); 5757 } 5758 #endif 5759 5760 5761 /* 5762 * Function: sd_inq_fill 5763 * 5764 * Description: Print a piece of inquiry data, cleaned up for non-printable 5765 * characters and stopping at the first space character after 5766 * the beginning of the passed string; 5767 * 5768 * Arguments: p - source string 5769 * l - maximum length to copy 5770 * s - destination string 5771 * 5772 * Context: Any. 5773 */ 5774 5775 static void 5776 sd_inq_fill(char *p, int l, char *s) 5777 { 5778 unsigned i = 0; 5779 char c; 5780 5781 while (i++ < l) { 5782 if ((c = *p++) < ' ' || c >= 0x7F) { 5783 c = '*'; 5784 } else if (i != 1 && c == ' ') { 5785 break; 5786 } 5787 *s++ = c; 5788 } 5789 *s++ = 0; 5790 } 5791 5792 5793 /* 5794 * Function: sd_register_devid 5795 * 5796 * Description: This routine will obtain the device id information from the 5797 * target, obtain the serial number, and register the device 5798 * id with the ddi framework. 5799 * 5800 * Arguments: devi - the system's dev_info_t for the device. 5801 * un - driver soft state (unit) structure 5802 * reservation_flag - indicates if a reservation conflict 5803 * occurred during attach 5804 * 5805 * Context: Kernel Thread 5806 */ 5807 static void 5808 sd_register_devid(struct sd_lun *un, dev_info_t *devi, int reservation_flag) 5809 { 5810 int rval = 0; 5811 uchar_t *inq80 = NULL; 5812 size_t inq80_len = MAX_INQUIRY_SIZE; 5813 size_t inq80_resid = 0; 5814 uchar_t *inq83 = NULL; 5815 size_t inq83_len = MAX_INQUIRY_SIZE; 5816 size_t inq83_resid = 0; 5817 5818 ASSERT(un != NULL); 5819 ASSERT(mutex_owned(SD_MUTEX(un))); 5820 ASSERT((SD_DEVINFO(un)) == devi); 5821 5822 /* 5823 * This is the case of antiquated Sun disk drives that have the 5824 * FAB_DEVID property set in the disk_table. These drives 5825 * manage the devid's by storing them in last 2 available sectors 5826 * on the drive and have them fabricated by the ddi layer by calling 5827 * ddi_devid_init and passing the DEVID_FAB flag. 5828 */ 5829 if (un->un_f_opt_fab_devid == TRUE) { 5830 /* 5831 * Depending on EINVAL isn't reliable, since a reserved disk 5832 * may result in invalid geometry, so check to make sure a 5833 * reservation conflict did not occur during attach. 5834 */ 5835 if ((sd_get_devid(un) == EINVAL) && 5836 (reservation_flag != SD_TARGET_IS_RESERVED)) { 5837 /* 5838 * The devid is invalid AND there is no reservation 5839 * conflict. Fabricate a new devid. 5840 */ 5841 (void) sd_create_devid(un); 5842 } 5843 5844 /* Register the devid if it exists */ 5845 if (un->un_devid != NULL) { 5846 (void) ddi_devid_register(SD_DEVINFO(un), 5847 un->un_devid); 5848 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5849 "sd_register_devid: Devid Fabricated\n"); 5850 } 5851 return; 5852 } 5853 5854 /* 5855 * We check the availibility of the World Wide Name (0x83) and Unit 5856 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using 5857 * un_vpd_page_mask from them, we decide which way to get the WWN. If 5858 * 0x83 is availible, that is the best choice. Our next choice is 5859 * 0x80. If neither are availible, we munge the devid from the device 5860 * vid/pid/serial # for Sun qualified disks, or use the ddi framework 5861 * to fabricate a devid for non-Sun qualified disks. 5862 */ 5863 if (sd_check_vpd_page_support(un) == 0) { 5864 /* collect page 80 data if available */ 5865 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) { 5866 5867 mutex_exit(SD_MUTEX(un)); 5868 inq80 = kmem_zalloc(inq80_len, KM_SLEEP); 5869 rval = sd_send_scsi_INQUIRY(un, inq80, inq80_len, 5870 0x01, 0x80, &inq80_resid); 5871 5872 if (rval != 0) { 5873 kmem_free(inq80, inq80_len); 5874 inq80 = NULL; 5875 inq80_len = 0; 5876 } 5877 mutex_enter(SD_MUTEX(un)); 5878 } 5879 5880 /* collect page 83 data if available */ 5881 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) { 5882 5883 mutex_exit(SD_MUTEX(un)); 5884 inq83 = kmem_zalloc(inq83_len, KM_SLEEP); 5885 rval = sd_send_scsi_INQUIRY(un, inq83, inq83_len, 5886 0x01, 0x83, &inq83_resid); 5887 5888 if (rval != 0) { 5889 kmem_free(inq83, inq83_len); 5890 inq83 = NULL; 5891 inq83_len = 0; 5892 } 5893 mutex_enter(SD_MUTEX(un)); 5894 } 5895 } 5896 5897 /* encode best devid possible based on data available */ 5898 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, 5899 (char *)ddi_driver_name(SD_DEVINFO(un)), 5900 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)), 5901 inq80, inq80_len - inq80_resid, inq83, inq83_len - 5902 inq83_resid, &un->un_devid) == DDI_SUCCESS) { 5903 5904 /* devid successfully encoded, register devid */ 5905 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid); 5906 5907 } else { 5908 /* 5909 * Unable to encode a devid based on data available. 5910 * This is not a Sun qualified disk. Older Sun disk 5911 * drives that have the SD_FAB_DEVID property 5912 * set in the disk_table and non Sun qualified 5913 * disks are treated in the same manner. These 5914 * drives manage the devid's by storing them in 5915 * last 2 available sectors on the drive and 5916 * have them fabricated by the ddi layer by 5917 * calling ddi_devid_init and passing the 5918 * DEVID_FAB flag. 5919 * Create a fabricate devid only if there's no 5920 * fabricate devid existed. 5921 */ 5922 if (sd_get_devid(un) == EINVAL) { 5923 (void) sd_create_devid(un); 5924 un->un_f_opt_fab_devid = TRUE; 5925 } 5926 5927 /* Register the devid if it exists */ 5928 if (un->un_devid != NULL) { 5929 (void) ddi_devid_register(SD_DEVINFO(un), 5930 un->un_devid); 5931 SD_INFO(SD_LOG_ATTACH_DETACH, un, 5932 "sd_register_devid: devid fabricated using " 5933 "ddi framework\n"); 5934 } 5935 } 5936 5937 /* clean up resources */ 5938 if (inq80 != NULL) { 5939 kmem_free(inq80, inq80_len); 5940 } 5941 if (inq83 != NULL) { 5942 kmem_free(inq83, inq83_len); 5943 } 5944 } 5945 5946 static daddr_t 5947 sd_get_devid_block(struct sd_lun *un) 5948 { 5949 daddr_t spc, blk, head, cyl; 5950 5951 if (un->un_blockcount <= DK_MAX_BLOCKS) { 5952 /* this geometry doesn't allow us to write a devid */ 5953 if (un->un_g.dkg_acyl < 2) { 5954 return (-1); 5955 } 5956 5957 /* 5958 * Subtract 2 guarantees that the next to last cylinder 5959 * is used 5960 */ 5961 cyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl - 2; 5962 spc = un->un_g.dkg_nhead * un->un_g.dkg_nsect; 5963 head = un->un_g.dkg_nhead - 1; 5964 blk = (cyl * (spc - un->un_g.dkg_apc)) + 5965 (head * un->un_g.dkg_nsect) + 1; 5966 } else { 5967 if (un->un_reserved != -1) { 5968 blk = un->un_map[un->un_reserved].dkl_cylno + 1; 5969 } else { 5970 return (-1); 5971 } 5972 } 5973 return (blk); 5974 } 5975 5976 /* 5977 * Function: sd_get_devid 5978 * 5979 * Description: This routine will return 0 if a valid device id has been 5980 * obtained from the target and stored in the soft state. If a 5981 * valid device id has not been previously read and stored, a 5982 * read attempt will be made. 5983 * 5984 * Arguments: un - driver soft state (unit) structure 5985 * 5986 * Return Code: 0 if we successfully get the device id 5987 * 5988 * Context: Kernel Thread 5989 */ 5990 5991 static int 5992 sd_get_devid(struct sd_lun *un) 5993 { 5994 struct dk_devid *dkdevid; 5995 ddi_devid_t tmpid; 5996 uint_t *ip; 5997 size_t sz; 5998 daddr_t blk; 5999 int status; 6000 int chksum; 6001 int i; 6002 size_t buffer_size; 6003 6004 ASSERT(un != NULL); 6005 ASSERT(mutex_owned(SD_MUTEX(un))); 6006 6007 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n", 6008 un); 6009 6010 if (un->un_devid != NULL) { 6011 return (0); 6012 } 6013 6014 blk = sd_get_devid_block(un); 6015 if (blk < 0) 6016 return (EINVAL); 6017 6018 /* 6019 * Read and verify device id, stored in the reserved cylinders at the 6020 * end of the disk. Backup label is on the odd sectors of the last 6021 * track of the last cylinder. Device id will be on track of the next 6022 * to last cylinder. 6023 */ 6024 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid)); 6025 mutex_exit(SD_MUTEX(un)); 6026 dkdevid = kmem_alloc(buffer_size, KM_SLEEP); 6027 status = sd_send_scsi_READ(un, dkdevid, buffer_size, blk, 6028 SD_PATH_DIRECT); 6029 if (status != 0) { 6030 goto error; 6031 } 6032 6033 /* Validate the revision */ 6034 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 6035 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 6036 status = EINVAL; 6037 goto error; 6038 } 6039 6040 /* Calculate the checksum */ 6041 chksum = 0; 6042 ip = (uint_t *)dkdevid; 6043 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6044 i++) { 6045 chksum ^= ip[i]; 6046 } 6047 6048 /* Compare the checksums */ 6049 if (DKD_GETCHKSUM(dkdevid) != chksum) { 6050 status = EINVAL; 6051 goto error; 6052 } 6053 6054 /* Validate the device id */ 6055 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 6056 status = EINVAL; 6057 goto error; 6058 } 6059 6060 /* 6061 * Store the device id in the driver soft state 6062 */ 6063 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 6064 tmpid = kmem_alloc(sz, KM_SLEEP); 6065 6066 mutex_enter(SD_MUTEX(un)); 6067 6068 un->un_devid = tmpid; 6069 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 6070 6071 kmem_free(dkdevid, buffer_size); 6072 6073 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un); 6074 6075 return (status); 6076 error: 6077 mutex_enter(SD_MUTEX(un)); 6078 kmem_free(dkdevid, buffer_size); 6079 return (status); 6080 } 6081 6082 6083 /* 6084 * Function: sd_create_devid 6085 * 6086 * Description: This routine will fabricate the device id and write it 6087 * to the disk. 6088 * 6089 * Arguments: un - driver soft state (unit) structure 6090 * 6091 * Return Code: value of the fabricated device id 6092 * 6093 * Context: Kernel Thread 6094 */ 6095 6096 static ddi_devid_t 6097 sd_create_devid(struct sd_lun *un) 6098 { 6099 ASSERT(un != NULL); 6100 6101 /* Fabricate the devid */ 6102 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid) 6103 == DDI_FAILURE) { 6104 return (NULL); 6105 } 6106 6107 /* Write the devid to disk */ 6108 if (sd_write_deviceid(un) != 0) { 6109 ddi_devid_free(un->un_devid); 6110 un->un_devid = NULL; 6111 } 6112 6113 return (un->un_devid); 6114 } 6115 6116 6117 /* 6118 * Function: sd_write_deviceid 6119 * 6120 * Description: This routine will write the device id to the disk 6121 * reserved sector. 6122 * 6123 * Arguments: un - driver soft state (unit) structure 6124 * 6125 * Return Code: EINVAL 6126 * value returned by sd_send_scsi_cmd 6127 * 6128 * Context: Kernel Thread 6129 */ 6130 6131 static int 6132 sd_write_deviceid(struct sd_lun *un) 6133 { 6134 struct dk_devid *dkdevid; 6135 daddr_t blk; 6136 uint_t *ip, chksum; 6137 int status; 6138 int i; 6139 6140 ASSERT(mutex_owned(SD_MUTEX(un))); 6141 6142 blk = sd_get_devid_block(un); 6143 if (blk < 0) 6144 return (-1); 6145 mutex_exit(SD_MUTEX(un)); 6146 6147 /* Allocate the buffer */ 6148 dkdevid = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP); 6149 6150 /* Fill in the revision */ 6151 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 6152 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 6153 6154 /* Copy in the device id */ 6155 mutex_enter(SD_MUTEX(un)); 6156 bcopy(un->un_devid, &dkdevid->dkd_devid, 6157 ddi_devid_sizeof(un->un_devid)); 6158 mutex_exit(SD_MUTEX(un)); 6159 6160 /* Calculate the checksum */ 6161 chksum = 0; 6162 ip = (uint_t *)dkdevid; 6163 for (i = 0; i < ((un->un_sys_blocksize - sizeof (int))/sizeof (int)); 6164 i++) { 6165 chksum ^= ip[i]; 6166 } 6167 6168 /* Fill-in checksum */ 6169 DKD_FORMCHKSUM(chksum, dkdevid); 6170 6171 /* Write the reserved sector */ 6172 status = sd_send_scsi_WRITE(un, dkdevid, un->un_sys_blocksize, blk, 6173 SD_PATH_DIRECT); 6174 6175 kmem_free(dkdevid, un->un_sys_blocksize); 6176 6177 mutex_enter(SD_MUTEX(un)); 6178 return (status); 6179 } 6180 6181 6182 /* 6183 * Function: sd_check_vpd_page_support 6184 * 6185 * Description: This routine sends an inquiry command with the EVPD bit set and 6186 * a page code of 0x00 to the device. It is used to determine which 6187 * vital product pages are availible to find the devid. We are 6188 * looking for pages 0x83 or 0x80. If we return a negative 1, the 6189 * device does not support that command. 6190 * 6191 * Arguments: un - driver soft state (unit) structure 6192 * 6193 * Return Code: 0 - success 6194 * 1 - check condition 6195 * 6196 * Context: This routine can sleep. 6197 */ 6198 6199 static int 6200 sd_check_vpd_page_support(struct sd_lun *un) 6201 { 6202 uchar_t *page_list = NULL; 6203 uchar_t page_length = 0xff; /* Use max possible length */ 6204 uchar_t evpd = 0x01; /* Set the EVPD bit */ 6205 uchar_t page_code = 0x00; /* Supported VPD Pages */ 6206 int rval = 0; 6207 int counter; 6208 6209 ASSERT(un != NULL); 6210 ASSERT(mutex_owned(SD_MUTEX(un))); 6211 6212 mutex_exit(SD_MUTEX(un)); 6213 6214 /* 6215 * We'll set the page length to the maximum to save figuring it out 6216 * with an additional call. 6217 */ 6218 page_list = kmem_zalloc(page_length, KM_SLEEP); 6219 6220 rval = sd_send_scsi_INQUIRY(un, page_list, page_length, evpd, 6221 page_code, NULL); 6222 6223 mutex_enter(SD_MUTEX(un)); 6224 6225 /* 6226 * Now we must validate that the device accepted the command, as some 6227 * drives do not support it. If the drive does support it, we will 6228 * return 0, and the supported pages will be in un_vpd_page_mask. If 6229 * not, we return -1. 6230 */ 6231 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) { 6232 /* Loop to find one of the 2 pages we need */ 6233 counter = 4; /* Supported pages start at byte 4, with 0x00 */ 6234 6235 /* 6236 * Pages are returned in ascending order, and 0x83 is what we 6237 * are hoping for. 6238 */ 6239 while ((page_list[counter] <= 0x83) && 6240 (counter <= (page_list[VPD_PAGE_LENGTH] + 6241 VPD_HEAD_OFFSET))) { 6242 /* 6243 * Add 3 because page_list[3] is the number of 6244 * pages minus 3 6245 */ 6246 6247 switch (page_list[counter]) { 6248 case 0x00: 6249 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG; 6250 break; 6251 case 0x80: 6252 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG; 6253 break; 6254 case 0x81: 6255 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG; 6256 break; 6257 case 0x82: 6258 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG; 6259 break; 6260 case 0x83: 6261 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG; 6262 break; 6263 } 6264 counter++; 6265 } 6266 6267 } else { 6268 rval = -1; 6269 6270 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6271 "sd_check_vpd_page_support: This drive does not implement " 6272 "VPD pages.\n"); 6273 } 6274 6275 kmem_free(page_list, page_length); 6276 6277 return (rval); 6278 } 6279 6280 6281 /* 6282 * Function: sd_setup_pm 6283 * 6284 * Description: Initialize Power Management on the device 6285 * 6286 * Context: Kernel Thread 6287 */ 6288 6289 static void 6290 sd_setup_pm(struct sd_lun *un, dev_info_t *devi) 6291 { 6292 uint_t log_page_size; 6293 uchar_t *log_page_data; 6294 int rval; 6295 6296 /* 6297 * Since we are called from attach, holding a mutex for 6298 * un is unnecessary. Because some of the routines called 6299 * from here require SD_MUTEX to not be held, assert this 6300 * right up front. 6301 */ 6302 ASSERT(!mutex_owned(SD_MUTEX(un))); 6303 /* 6304 * Since the sd device does not have the 'reg' property, 6305 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 6306 * The following code is to tell cpr that this device 6307 * DOES need to be suspended and resumed. 6308 */ 6309 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 6310 "pm-hardware-state", "needs-suspend-resume"); 6311 6312 /* 6313 * Check if HBA has set the "pm-capable" property. 6314 * If "pm-capable" exists and is non-zero then we can 6315 * power manage the device without checking the start/stop 6316 * cycle count log sense page. 6317 * 6318 * If "pm-capable" exists and is SD_PM_CAPABLE_FALSE (0) 6319 * then we should not power manage the device. 6320 * 6321 * If "pm-capable" doesn't exist then un->un_pm_capable_prop will 6322 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case, sd will 6323 * check the start/stop cycle count log sense page and power manage 6324 * the device if the cycle count limit has not been exceeded. 6325 */ 6326 un->un_pm_capable_prop = 6327 ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 6328 "pm-capable", SD_PM_CAPABLE_UNDEFINED); 6329 if (un->un_pm_capable_prop != SD_PM_CAPABLE_UNDEFINED) { 6330 /* 6331 * pm-capable property exists. 6332 * 6333 * Convert "TRUE" values for un_pm_capable_prop to 6334 * SD_PM_CAPABLE_TRUE (1) to make it easier to check later. 6335 * "TRUE" values are any values except SD_PM_CAPABLE_FALSE (0) 6336 * and SD_PM_CAPABLE_UNDEFINED (-1) 6337 */ 6338 if (un->un_pm_capable_prop != SD_PM_CAPABLE_FALSE) { 6339 un->un_pm_capable_prop = SD_PM_CAPABLE_TRUE; 6340 } 6341 6342 SD_INFO(SD_LOG_ATTACH_DETACH, un, 6343 "sd_unit_attach: un:0x%p pm-capable " 6344 "property set to %d.\n", un, un->un_pm_capable_prop); 6345 } 6346 6347 /* 6348 * This complies with the new power management framework 6349 * for certain desktop machines. Create the pm_components 6350 * property as a string array property. 6351 * 6352 * If this is a removable device or if the pm-capable property 6353 * is SD_PM_CAPABLE_TRUE (1) then we should create the 6354 * pm_components property without checking for the existance of 6355 * the start-stop cycle counter log page 6356 */ 6357 if (ISREMOVABLE(un) || 6358 un->un_pm_capable_prop == SD_PM_CAPABLE_TRUE) { 6359 /* 6360 * not all devices have a motor, try it first. 6361 * some devices may return ILLEGAL REQUEST, some 6362 * will hang 6363 */ 6364 un->un_f_start_stop_supported = TRUE; 6365 if (sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 6366 SD_PATH_DIRECT) != 0) { 6367 un->un_f_start_stop_supported = FALSE; 6368 } 6369 6370 /* 6371 * create pm properties anyways otherwise the parent can't 6372 * go to sleep 6373 */ 6374 (void) sd_create_pm_components(devi, un); 6375 un->un_f_pm_is_enabled = TRUE; 6376 6377 /* 6378 * Need to create a zero length (Boolean) property 6379 * removable-media for the removable media devices. 6380 * Note that the return value of the property is not being 6381 * checked, since if unable to create the property 6382 * then do not want the attach to fail altogether. Consistent 6383 * with other property creation in attach. 6384 */ 6385 if (ISREMOVABLE(un)) { 6386 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, 6387 DDI_PROP_CANSLEEP, "removable-media", NULL, 0); 6388 } 6389 return; 6390 } 6391 6392 rval = sd_log_page_supported(un, START_STOP_CYCLE_PAGE); 6393 6394 #ifdef SDDEBUG 6395 if (sd_force_pm_supported) { 6396 /* Force a successful result */ 6397 rval = 1; 6398 } 6399 #endif 6400 6401 /* 6402 * If the start-stop cycle counter log page is not supported 6403 * or if the pm-capable property is SD_PM_CAPABLE_FALSE (0) 6404 * then we should not create the pm_components property. 6405 */ 6406 if (rval == -1 || un->un_pm_capable_prop == SD_PM_CAPABLE_FALSE) { 6407 /* 6408 * Error. 6409 * Reading log sense failed, most likely this is 6410 * an older drive that does not support log sense. 6411 * If this fails auto-pm is not supported. 6412 */ 6413 un->un_power_level = SD_SPINDLE_ON; 6414 un->un_f_pm_is_enabled = FALSE; 6415 6416 } else if (rval == 0) { 6417 /* 6418 * Page not found. 6419 * The start stop cycle counter is implemented as page 6420 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For 6421 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE). 6422 */ 6423 if (sd_log_page_supported(un, START_STOP_CYCLE_VU_PAGE) == 1) { 6424 /* 6425 * Page found, use this one. 6426 */ 6427 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE; 6428 un->un_f_pm_is_enabled = TRUE; 6429 } else { 6430 /* 6431 * Error or page not found. 6432 * auto-pm is not supported for this device. 6433 */ 6434 un->un_power_level = SD_SPINDLE_ON; 6435 un->un_f_pm_is_enabled = FALSE; 6436 } 6437 } else { 6438 /* 6439 * Page found, use it. 6440 */ 6441 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE; 6442 un->un_f_pm_is_enabled = TRUE; 6443 } 6444 6445 6446 if (un->un_f_pm_is_enabled == TRUE) { 6447 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 6448 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 6449 6450 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 6451 log_page_size, un->un_start_stop_cycle_page, 6452 0x01, 0, SD_PATH_DIRECT); 6453 #ifdef SDDEBUG 6454 if (sd_force_pm_supported) { 6455 /* Force a successful result */ 6456 rval = 0; 6457 } 6458 #endif 6459 6460 /* 6461 * If the Log sense for Page( Start/stop cycle counter page) 6462 * succeeds, then power managment is supported and we can 6463 * enable auto-pm. 6464 */ 6465 if (rval == 0) { 6466 (void) sd_create_pm_components(devi, un); 6467 } else { 6468 un->un_power_level = SD_SPINDLE_ON; 6469 un->un_f_pm_is_enabled = FALSE; 6470 } 6471 6472 kmem_free(log_page_data, log_page_size); 6473 } 6474 } 6475 6476 6477 /* 6478 * Function: sd_create_pm_components 6479 * 6480 * Description: Initialize PM property. 6481 * 6482 * Context: Kernel thread context 6483 */ 6484 6485 static void 6486 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un) 6487 { 6488 char *pm_comp[] = { "NAME=spindle-motor", "0=off", "1=on", NULL }; 6489 6490 ASSERT(!mutex_owned(SD_MUTEX(un))); 6491 6492 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi, 6493 "pm-components", pm_comp, 3) == DDI_PROP_SUCCESS) { 6494 /* 6495 * When components are initially created they are idle, 6496 * power up any non-removables. 6497 * Note: the return value of pm_raise_power can't be used 6498 * for determining if PM should be enabled for this device. 6499 * Even if you check the return values and remove this 6500 * property created above, the PM framework will not honor the 6501 * change after the first call to pm_raise_power. Hence, 6502 * removal of that property does not help if pm_raise_power 6503 * fails. In the case of removable media, the start/stop 6504 * will fail if the media is not present. 6505 */ 6506 if ((!ISREMOVABLE(un)) && (pm_raise_power(SD_DEVINFO(un), 0, 6507 SD_SPINDLE_ON) == DDI_SUCCESS)) { 6508 mutex_enter(SD_MUTEX(un)); 6509 un->un_power_level = SD_SPINDLE_ON; 6510 mutex_enter(&un->un_pm_mutex); 6511 /* Set to on and not busy. */ 6512 un->un_pm_count = 0; 6513 } else { 6514 mutex_enter(SD_MUTEX(un)); 6515 un->un_power_level = SD_SPINDLE_OFF; 6516 mutex_enter(&un->un_pm_mutex); 6517 /* Set to off. */ 6518 un->un_pm_count = -1; 6519 } 6520 mutex_exit(&un->un_pm_mutex); 6521 mutex_exit(SD_MUTEX(un)); 6522 } else { 6523 un->un_power_level = SD_SPINDLE_ON; 6524 un->un_f_pm_is_enabled = FALSE; 6525 } 6526 } 6527 6528 6529 /* 6530 * Function: sd_ddi_suspend 6531 * 6532 * Description: Performs system power-down operations. This includes 6533 * setting the drive state to indicate its suspended so 6534 * that no new commands will be accepted. Also, wait for 6535 * all commands that are in transport or queued to a timer 6536 * for retry to complete. All timeout threads are cancelled. 6537 * 6538 * Return Code: DDI_FAILURE or DDI_SUCCESS 6539 * 6540 * Context: Kernel thread context 6541 */ 6542 6543 static int 6544 sd_ddi_suspend(dev_info_t *devi) 6545 { 6546 struct sd_lun *un; 6547 clock_t wait_cmds_complete; 6548 6549 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6550 if (un == NULL) { 6551 return (DDI_FAILURE); 6552 } 6553 6554 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n"); 6555 6556 mutex_enter(SD_MUTEX(un)); 6557 6558 /* Return success if the device is already suspended. */ 6559 if (un->un_state == SD_STATE_SUSPENDED) { 6560 mutex_exit(SD_MUTEX(un)); 6561 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6562 "device already suspended, exiting\n"); 6563 return (DDI_SUCCESS); 6564 } 6565 6566 /* Return failure if the device is being used by HA */ 6567 if (un->un_resvd_status & 6568 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) { 6569 mutex_exit(SD_MUTEX(un)); 6570 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6571 "device in use by HA, exiting\n"); 6572 return (DDI_FAILURE); 6573 } 6574 6575 /* 6576 * Return failure if the device is in a resource wait 6577 * or power changing state. 6578 */ 6579 if ((un->un_state == SD_STATE_RWAIT) || 6580 (un->un_state == SD_STATE_PM_CHANGING)) { 6581 mutex_exit(SD_MUTEX(un)); 6582 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: " 6583 "device in resource wait state, exiting\n"); 6584 return (DDI_FAILURE); 6585 } 6586 6587 6588 un->un_save_state = un->un_last_state; 6589 New_state(un, SD_STATE_SUSPENDED); 6590 6591 /* 6592 * Wait for all commands that are in transport or queued to a timer 6593 * for retry to complete. 6594 * 6595 * While waiting, no new commands will be accepted or sent because of 6596 * the new state we set above. 6597 * 6598 * Wait till current operation has completed. If we are in the resource 6599 * wait state (with an intr outstanding) then we need to wait till the 6600 * intr completes and starts the next cmd. We want to wait for 6601 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND. 6602 */ 6603 wait_cmds_complete = ddi_get_lbolt() + 6604 (sd_wait_cmds_complete * drv_usectohz(1000000)); 6605 6606 while (un->un_ncmds_in_transport != 0) { 6607 /* 6608 * Fail if commands do not finish in the specified time. 6609 */ 6610 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un), 6611 wait_cmds_complete) == -1) { 6612 /* 6613 * Undo the state changes made above. Everything 6614 * must go back to it's original value. 6615 */ 6616 Restore_state(un); 6617 un->un_last_state = un->un_save_state; 6618 /* Wake up any threads that might be waiting. */ 6619 cv_broadcast(&un->un_suspend_cv); 6620 mutex_exit(SD_MUTEX(un)); 6621 SD_ERROR(SD_LOG_IO_PM, un, 6622 "sd_ddi_suspend: failed due to outstanding cmds\n"); 6623 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n"); 6624 return (DDI_FAILURE); 6625 } 6626 } 6627 6628 /* 6629 * Cancel SCSI watch thread and timeouts, if any are active 6630 */ 6631 6632 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) { 6633 opaque_t temp_token = un->un_swr_token; 6634 mutex_exit(SD_MUTEX(un)); 6635 scsi_watch_suspend(temp_token); 6636 mutex_enter(SD_MUTEX(un)); 6637 } 6638 6639 if (un->un_reset_throttle_timeid != NULL) { 6640 timeout_id_t temp_id = un->un_reset_throttle_timeid; 6641 un->un_reset_throttle_timeid = NULL; 6642 mutex_exit(SD_MUTEX(un)); 6643 (void) untimeout(temp_id); 6644 mutex_enter(SD_MUTEX(un)); 6645 } 6646 6647 if (un->un_dcvb_timeid != NULL) { 6648 timeout_id_t temp_id = un->un_dcvb_timeid; 6649 un->un_dcvb_timeid = NULL; 6650 mutex_exit(SD_MUTEX(un)); 6651 (void) untimeout(temp_id); 6652 mutex_enter(SD_MUTEX(un)); 6653 } 6654 6655 mutex_enter(&un->un_pm_mutex); 6656 if (un->un_pm_timeid != NULL) { 6657 timeout_id_t temp_id = un->un_pm_timeid; 6658 un->un_pm_timeid = NULL; 6659 mutex_exit(&un->un_pm_mutex); 6660 mutex_exit(SD_MUTEX(un)); 6661 (void) untimeout(temp_id); 6662 mutex_enter(SD_MUTEX(un)); 6663 } else { 6664 mutex_exit(&un->un_pm_mutex); 6665 } 6666 6667 if (un->un_retry_timeid != NULL) { 6668 timeout_id_t temp_id = un->un_retry_timeid; 6669 un->un_retry_timeid = NULL; 6670 mutex_exit(SD_MUTEX(un)); 6671 (void) untimeout(temp_id); 6672 mutex_enter(SD_MUTEX(un)); 6673 } 6674 6675 if (un->un_direct_priority_timeid != NULL) { 6676 timeout_id_t temp_id = un->un_direct_priority_timeid; 6677 un->un_direct_priority_timeid = NULL; 6678 mutex_exit(SD_MUTEX(un)); 6679 (void) untimeout(temp_id); 6680 mutex_enter(SD_MUTEX(un)); 6681 } 6682 6683 if (un->un_f_is_fibre == TRUE) { 6684 /* 6685 * Remove callbacks for insert and remove events 6686 */ 6687 if (un->un_insert_event != NULL) { 6688 mutex_exit(SD_MUTEX(un)); 6689 (void) ddi_remove_event_handler(un->un_insert_cb_id); 6690 mutex_enter(SD_MUTEX(un)); 6691 un->un_insert_event = NULL; 6692 } 6693 6694 if (un->un_remove_event != NULL) { 6695 mutex_exit(SD_MUTEX(un)); 6696 (void) ddi_remove_event_handler(un->un_remove_cb_id); 6697 mutex_enter(SD_MUTEX(un)); 6698 un->un_remove_event = NULL; 6699 } 6700 } 6701 6702 mutex_exit(SD_MUTEX(un)); 6703 6704 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n"); 6705 6706 return (DDI_SUCCESS); 6707 } 6708 6709 6710 /* 6711 * Function: sd_ddi_pm_suspend 6712 * 6713 * Description: Set the drive state to low power. 6714 * Someone else is required to actually change the drive 6715 * power level. 6716 * 6717 * Arguments: un - driver soft state (unit) structure 6718 * 6719 * Return Code: DDI_FAILURE or DDI_SUCCESS 6720 * 6721 * Context: Kernel thread context 6722 */ 6723 6724 static int 6725 sd_ddi_pm_suspend(struct sd_lun *un) 6726 { 6727 ASSERT(un != NULL); 6728 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: entry\n"); 6729 6730 ASSERT(!mutex_owned(SD_MUTEX(un))); 6731 mutex_enter(SD_MUTEX(un)); 6732 6733 /* 6734 * Exit if power management is not enabled for this device, or if 6735 * the device is being used by HA. 6736 */ 6737 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status & 6738 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) { 6739 mutex_exit(SD_MUTEX(un)); 6740 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exiting\n"); 6741 return (DDI_SUCCESS); 6742 } 6743 6744 SD_INFO(SD_LOG_POWER, un, "sd_ddi_pm_suspend: un_ncmds_in_driver=%ld\n", 6745 un->un_ncmds_in_driver); 6746 6747 /* 6748 * See if the device is not busy, ie.: 6749 * - we have no commands in the driver for this device 6750 * - not waiting for resources 6751 */ 6752 if ((un->un_ncmds_in_driver == 0) && 6753 (un->un_state != SD_STATE_RWAIT)) { 6754 /* 6755 * The device is not busy, so it is OK to go to low power state. 6756 * Indicate low power, but rely on someone else to actually 6757 * change it. 6758 */ 6759 mutex_enter(&un->un_pm_mutex); 6760 un->un_pm_count = -1; 6761 mutex_exit(&un->un_pm_mutex); 6762 un->un_power_level = SD_SPINDLE_OFF; 6763 } 6764 6765 mutex_exit(SD_MUTEX(un)); 6766 6767 SD_TRACE(SD_LOG_POWER, un, "sd_ddi_pm_suspend: exit\n"); 6768 6769 return (DDI_SUCCESS); 6770 } 6771 6772 6773 /* 6774 * Function: sd_ddi_resume 6775 * 6776 * Description: Performs system power-up operations.. 6777 * 6778 * Return Code: DDI_SUCCESS 6779 * DDI_FAILURE 6780 * 6781 * Context: Kernel thread context 6782 */ 6783 6784 static int 6785 sd_ddi_resume(dev_info_t *devi) 6786 { 6787 struct sd_lun *un; 6788 6789 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi)); 6790 if (un == NULL) { 6791 return (DDI_FAILURE); 6792 } 6793 6794 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n"); 6795 6796 mutex_enter(SD_MUTEX(un)); 6797 Restore_state(un); 6798 6799 /* 6800 * Restore the state which was saved to give the 6801 * the right state in un_last_state 6802 */ 6803 un->un_last_state = un->un_save_state; 6804 /* 6805 * Note: throttle comes back at full. 6806 * Also note: this MUST be done before calling pm_raise_power 6807 * otherwise the system can get hung in biowait. The scenario where 6808 * this'll happen is under cpr suspend. Writing of the system 6809 * state goes through sddump, which writes 0 to un_throttle. If 6810 * writing the system state then fails, example if the partition is 6811 * too small, then cpr attempts a resume. If throttle isn't restored 6812 * from the saved value until after calling pm_raise_power then 6813 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs 6814 * in biowait. 6815 */ 6816 un->un_throttle = un->un_saved_throttle; 6817 6818 /* 6819 * The chance of failure is very rare as the only command done in power 6820 * entry point is START command when you transition from 0->1 or 6821 * unknown->1. Put it to SPINDLE ON state irrespective of the state at 6822 * which suspend was done. Ignore the return value as the resume should 6823 * not be failed. In the case of removable media the media need not be 6824 * inserted and hence there is a chance that raise power will fail with 6825 * media not present. 6826 */ 6827 if (!ISREMOVABLE(un)) { 6828 mutex_exit(SD_MUTEX(un)); 6829 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 6830 mutex_enter(SD_MUTEX(un)); 6831 } 6832 6833 /* 6834 * Don't broadcast to the suspend cv and therefore possibly 6835 * start I/O until after power has been restored. 6836 */ 6837 cv_broadcast(&un->un_suspend_cv); 6838 cv_broadcast(&un->un_state_cv); 6839 6840 /* restart thread */ 6841 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) { 6842 scsi_watch_resume(un->un_swr_token); 6843 } 6844 6845 #if (defined(__fibre)) 6846 if (un->un_f_is_fibre == TRUE) { 6847 /* 6848 * Add callbacks for insert and remove events 6849 */ 6850 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 6851 sd_init_event_callbacks(un); 6852 } 6853 } 6854 #endif 6855 6856 /* 6857 * Transport any pending commands to the target. 6858 * 6859 * If this is a low-activity device commands in queue will have to wait 6860 * until new commands come in, which may take awhile. Also, we 6861 * specifically don't check un_ncmds_in_transport because we know that 6862 * there really are no commands in progress after the unit was 6863 * suspended and we could have reached the throttle level, been 6864 * suspended, and have no new commands coming in for awhile. Highly 6865 * unlikely, but so is the low-activity disk scenario. 6866 */ 6867 ddi_xbuf_dispatch(un->un_xbuf_attr); 6868 6869 sd_start_cmds(un, NULL); 6870 mutex_exit(SD_MUTEX(un)); 6871 6872 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n"); 6873 6874 return (DDI_SUCCESS); 6875 } 6876 6877 6878 /* 6879 * Function: sd_ddi_pm_resume 6880 * 6881 * Description: Set the drive state to powered on. 6882 * Someone else is required to actually change the drive 6883 * power level. 6884 * 6885 * Arguments: un - driver soft state (unit) structure 6886 * 6887 * Return Code: DDI_SUCCESS 6888 * 6889 * Context: Kernel thread context 6890 */ 6891 6892 static int 6893 sd_ddi_pm_resume(struct sd_lun *un) 6894 { 6895 ASSERT(un != NULL); 6896 6897 ASSERT(!mutex_owned(SD_MUTEX(un))); 6898 mutex_enter(SD_MUTEX(un)); 6899 un->un_power_level = SD_SPINDLE_ON; 6900 6901 ASSERT(!mutex_owned(&un->un_pm_mutex)); 6902 mutex_enter(&un->un_pm_mutex); 6903 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 6904 un->un_pm_count++; 6905 ASSERT(un->un_pm_count == 0); 6906 /* 6907 * Note: no longer do the cv_broadcast on un_suspend_cv. The 6908 * un_suspend_cv is for a system resume, not a power management 6909 * device resume. (4297749) 6910 * cv_broadcast(&un->un_suspend_cv); 6911 */ 6912 } 6913 mutex_exit(&un->un_pm_mutex); 6914 mutex_exit(SD_MUTEX(un)); 6915 6916 return (DDI_SUCCESS); 6917 } 6918 6919 6920 /* 6921 * Function: sd_pm_idletimeout_handler 6922 * 6923 * Description: A timer routine that's active only while a device is busy. 6924 * The purpose is to extend slightly the pm framework's busy 6925 * view of the device to prevent busy/idle thrashing for 6926 * back-to-back commands. Do this by comparing the current time 6927 * to the time at which the last command completed and when the 6928 * difference is greater than sd_pm_idletime, call 6929 * pm_idle_component. In addition to indicating idle to the pm 6930 * framework, update the chain type to again use the internal pm 6931 * layers of the driver. 6932 * 6933 * Arguments: arg - driver soft state (unit) structure 6934 * 6935 * Context: Executes in a timeout(9F) thread context 6936 */ 6937 6938 static void 6939 sd_pm_idletimeout_handler(void *arg) 6940 { 6941 struct sd_lun *un = arg; 6942 6943 time_t now; 6944 6945 mutex_enter(&sd_detach_mutex); 6946 if (un->un_detach_count != 0) { 6947 /* Abort if the instance is detaching */ 6948 mutex_exit(&sd_detach_mutex); 6949 return; 6950 } 6951 mutex_exit(&sd_detach_mutex); 6952 6953 now = ddi_get_time(); 6954 /* 6955 * Grab both mutexes, in the proper order, since we're accessing 6956 * both PM and softstate variables. 6957 */ 6958 mutex_enter(SD_MUTEX(un)); 6959 mutex_enter(&un->un_pm_mutex); 6960 if (((now - un->un_pm_idle_time) > sd_pm_idletime) && 6961 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) { 6962 /* 6963 * Update the chain types. 6964 * This takes affect on the next new command received. 6965 */ 6966 if (ISREMOVABLE(un)) { 6967 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 6968 } else { 6969 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 6970 } 6971 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 6972 6973 SD_TRACE(SD_LOG_IO_PM, un, 6974 "sd_pm_idletimeout_handler: idling device\n"); 6975 (void) pm_idle_component(SD_DEVINFO(un), 0); 6976 un->un_pm_idle_timeid = NULL; 6977 } else { 6978 un->un_pm_idle_timeid = 6979 timeout(sd_pm_idletimeout_handler, un, 6980 (drv_usectohz((clock_t)300000))); /* 300 ms. */ 6981 } 6982 mutex_exit(&un->un_pm_mutex); 6983 mutex_exit(SD_MUTEX(un)); 6984 } 6985 6986 6987 /* 6988 * Function: sd_pm_timeout_handler 6989 * 6990 * Description: Callback to tell framework we are idle. 6991 * 6992 * Context: timeout(9f) thread context. 6993 */ 6994 6995 static void 6996 sd_pm_timeout_handler(void *arg) 6997 { 6998 struct sd_lun *un = arg; 6999 7000 (void) pm_idle_component(SD_DEVINFO(un), 0); 7001 mutex_enter(&un->un_pm_mutex); 7002 un->un_pm_timeid = NULL; 7003 mutex_exit(&un->un_pm_mutex); 7004 } 7005 7006 7007 /* 7008 * Function: sdpower 7009 * 7010 * Description: PM entry point. 7011 * 7012 * Return Code: DDI_SUCCESS 7013 * DDI_FAILURE 7014 * 7015 * Context: Kernel thread context 7016 */ 7017 7018 static int 7019 sdpower(dev_info_t *devi, int component, int level) 7020 { 7021 struct sd_lun *un; 7022 int instance; 7023 int rval = DDI_SUCCESS; 7024 uint_t i, log_page_size, maxcycles, ncycles; 7025 uchar_t *log_page_data; 7026 int log_sense_page; 7027 int medium_present; 7028 time_t intvlp; 7029 dev_t dev; 7030 struct pm_trans_data sd_pm_tran_data; 7031 uchar_t save_state; 7032 int sval; 7033 uchar_t state_before_pm; 7034 int got_semaphore_here; 7035 7036 instance = ddi_get_instance(devi); 7037 7038 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 7039 (SD_SPINDLE_OFF > level) || (level > SD_SPINDLE_ON) || 7040 component != 0) { 7041 return (DDI_FAILURE); 7042 } 7043 7044 dev = sd_make_device(SD_DEVINFO(un)); 7045 7046 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level); 7047 7048 /* 7049 * Must synchronize power down with close. 7050 * Attempt to decrement/acquire the open/close semaphore, 7051 * but do NOT wait on it. If it's not greater than zero, 7052 * ie. it can't be decremented without waiting, then 7053 * someone else, either open or close, already has it 7054 * and the try returns 0. Use that knowledge here to determine 7055 * if it's OK to change the device power level. 7056 * Also, only increment it on exit if it was decremented, ie. gotten, 7057 * here. 7058 */ 7059 got_semaphore_here = sema_tryp(&un->un_semoclose); 7060 7061 mutex_enter(SD_MUTEX(un)); 7062 7063 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n", 7064 un->un_ncmds_in_driver); 7065 7066 /* 7067 * If un_ncmds_in_driver is non-zero it indicates commands are 7068 * already being processed in the driver, or if the semaphore was 7069 * not gotten here it indicates an open or close is being processed. 7070 * At the same time somebody is requesting to go low power which 7071 * can't happen, therefore we need to return failure. 7072 */ 7073 if ((level == SD_SPINDLE_OFF) && 7074 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) { 7075 mutex_exit(SD_MUTEX(un)); 7076 7077 if (got_semaphore_here != 0) { 7078 sema_v(&un->un_semoclose); 7079 } 7080 SD_TRACE(SD_LOG_IO_PM, un, 7081 "sdpower: exit, device has queued cmds.\n"); 7082 return (DDI_FAILURE); 7083 } 7084 7085 /* 7086 * if it is OFFLINE that means the disk is completely dead 7087 * in our case we have to put the disk in on or off by sending commands 7088 * Of course that will fail anyway so return back here. 7089 * 7090 * Power changes to a device that's OFFLINE or SUSPENDED 7091 * are not allowed. 7092 */ 7093 if ((un->un_state == SD_STATE_OFFLINE) || 7094 (un->un_state == SD_STATE_SUSPENDED)) { 7095 mutex_exit(SD_MUTEX(un)); 7096 7097 if (got_semaphore_here != 0) { 7098 sema_v(&un->un_semoclose); 7099 } 7100 SD_TRACE(SD_LOG_IO_PM, un, 7101 "sdpower: exit, device is off-line.\n"); 7102 return (DDI_FAILURE); 7103 } 7104 7105 /* 7106 * Change the device's state to indicate it's power level 7107 * is being changed. Do this to prevent a power off in the 7108 * middle of commands, which is especially bad on devices 7109 * that are really powered off instead of just spun down. 7110 */ 7111 state_before_pm = un->un_state; 7112 un->un_state = SD_STATE_PM_CHANGING; 7113 7114 mutex_exit(SD_MUTEX(un)); 7115 7116 /* 7117 * Bypass checking the log sense information for removables 7118 * and devices for which the HBA set the pm-capable property. 7119 * If un->un_pm_capable_prop is SD_PM_CAPABLE_UNDEFINED (-1) 7120 * then the HBA did not create the property. 7121 */ 7122 if ((level == SD_SPINDLE_OFF) && (!ISREMOVABLE(un)) && 7123 un->un_pm_capable_prop == SD_PM_CAPABLE_UNDEFINED) { 7124 /* 7125 * Get the log sense information to understand whether the 7126 * the powercycle counts have gone beyond the threshhold. 7127 */ 7128 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE; 7129 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP); 7130 7131 mutex_enter(SD_MUTEX(un)); 7132 log_sense_page = un->un_start_stop_cycle_page; 7133 mutex_exit(SD_MUTEX(un)); 7134 7135 rval = sd_send_scsi_LOG_SENSE(un, log_page_data, 7136 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT); 7137 #ifdef SDDEBUG 7138 if (sd_force_pm_supported) { 7139 /* Force a successful result */ 7140 rval = 0; 7141 } 7142 #endif 7143 if (rval != 0) { 7144 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 7145 "Log Sense Failed\n"); 7146 kmem_free(log_page_data, log_page_size); 7147 /* Cannot support power management on those drives */ 7148 7149 if (got_semaphore_here != 0) { 7150 sema_v(&un->un_semoclose); 7151 } 7152 /* 7153 * On exit put the state back to it's original value 7154 * and broadcast to anyone waiting for the power 7155 * change completion. 7156 */ 7157 mutex_enter(SD_MUTEX(un)); 7158 un->un_state = state_before_pm; 7159 cv_broadcast(&un->un_suspend_cv); 7160 mutex_exit(SD_MUTEX(un)); 7161 SD_TRACE(SD_LOG_IO_PM, un, 7162 "sdpower: exit, Log Sense Failed.\n"); 7163 return (DDI_FAILURE); 7164 } 7165 7166 /* 7167 * From the page data - Convert the essential information to 7168 * pm_trans_data 7169 */ 7170 maxcycles = 7171 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) | 7172 (log_page_data[0x1E] << 8) | log_page_data[0x1F]; 7173 7174 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles; 7175 7176 ncycles = 7177 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) | 7178 (log_page_data[0x26] << 8) | log_page_data[0x27]; 7179 7180 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles; 7181 7182 for (i = 0; i < DC_SCSI_MFR_LEN; i++) { 7183 sd_pm_tran_data.un.scsi_cycles.svc_date[i] = 7184 log_page_data[8+i]; 7185 } 7186 7187 kmem_free(log_page_data, log_page_size); 7188 7189 /* 7190 * Call pm_trans_check routine to get the Ok from 7191 * the global policy 7192 */ 7193 7194 sd_pm_tran_data.format = DC_SCSI_FORMAT; 7195 sd_pm_tran_data.un.scsi_cycles.flag = 0; 7196 7197 rval = pm_trans_check(&sd_pm_tran_data, &intvlp); 7198 #ifdef SDDEBUG 7199 if (sd_force_pm_supported) { 7200 /* Force a successful result */ 7201 rval = 1; 7202 } 7203 #endif 7204 switch (rval) { 7205 case 0: 7206 /* 7207 * Not Ok to Power cycle or error in parameters passed 7208 * Would have given the advised time to consider power 7209 * cycle. Based on the new intvlp parameter we are 7210 * supposed to pretend we are busy so that pm framework 7211 * will never call our power entry point. Because of 7212 * that install a timeout handler and wait for the 7213 * recommended time to elapse so that power management 7214 * can be effective again. 7215 * 7216 * To effect this behavior, call pm_busy_component to 7217 * indicate to the framework this device is busy. 7218 * By not adjusting un_pm_count the rest of PM in 7219 * the driver will function normally, and independant 7220 * of this but because the framework is told the device 7221 * is busy it won't attempt powering down until it gets 7222 * a matching idle. The timeout handler sends this. 7223 * Note: sd_pm_entry can't be called here to do this 7224 * because sdpower may have been called as a result 7225 * of a call to pm_raise_power from within sd_pm_entry. 7226 * 7227 * If a timeout handler is already active then 7228 * don't install another. 7229 */ 7230 mutex_enter(&un->un_pm_mutex); 7231 if (un->un_pm_timeid == NULL) { 7232 un->un_pm_timeid = 7233 timeout(sd_pm_timeout_handler, 7234 un, intvlp * drv_usectohz(1000000)); 7235 mutex_exit(&un->un_pm_mutex); 7236 (void) pm_busy_component(SD_DEVINFO(un), 0); 7237 } else { 7238 mutex_exit(&un->un_pm_mutex); 7239 } 7240 if (got_semaphore_here != 0) { 7241 sema_v(&un->un_semoclose); 7242 } 7243 /* 7244 * On exit put the state back to it's original value 7245 * and broadcast to anyone waiting for the power 7246 * change completion. 7247 */ 7248 mutex_enter(SD_MUTEX(un)); 7249 un->un_state = state_before_pm; 7250 cv_broadcast(&un->un_suspend_cv); 7251 mutex_exit(SD_MUTEX(un)); 7252 7253 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, " 7254 "trans check Failed, not ok to power cycle.\n"); 7255 return (DDI_FAILURE); 7256 7257 case -1: 7258 if (got_semaphore_here != 0) { 7259 sema_v(&un->un_semoclose); 7260 } 7261 /* 7262 * On exit put the state back to it's original value 7263 * and broadcast to anyone waiting for the power 7264 * change completion. 7265 */ 7266 mutex_enter(SD_MUTEX(un)); 7267 un->un_state = state_before_pm; 7268 cv_broadcast(&un->un_suspend_cv); 7269 mutex_exit(SD_MUTEX(un)); 7270 SD_TRACE(SD_LOG_IO_PM, un, 7271 "sdpower: exit, trans check command Failed.\n"); 7272 return (DDI_FAILURE); 7273 } 7274 } 7275 7276 if (level == SD_SPINDLE_OFF) { 7277 /* 7278 * Save the last state... if the STOP FAILS we need it 7279 * for restoring 7280 */ 7281 mutex_enter(SD_MUTEX(un)); 7282 save_state = un->un_last_state; 7283 /* 7284 * There must not be any cmds. getting processed 7285 * in the driver when we get here. Power to the 7286 * device is potentially going off. 7287 */ 7288 ASSERT(un->un_ncmds_in_driver == 0); 7289 mutex_exit(SD_MUTEX(un)); 7290 7291 /* 7292 * For now suspend the device completely before spindle is 7293 * turned off 7294 */ 7295 if ((rval = sd_ddi_pm_suspend(un)) == DDI_FAILURE) { 7296 if (got_semaphore_here != 0) { 7297 sema_v(&un->un_semoclose); 7298 } 7299 /* 7300 * On exit put the state back to it's original value 7301 * and broadcast to anyone waiting for the power 7302 * change completion. 7303 */ 7304 mutex_enter(SD_MUTEX(un)); 7305 un->un_state = state_before_pm; 7306 cv_broadcast(&un->un_suspend_cv); 7307 mutex_exit(SD_MUTEX(un)); 7308 SD_TRACE(SD_LOG_IO_PM, un, 7309 "sdpower: exit, PM suspend Failed.\n"); 7310 return (DDI_FAILURE); 7311 } 7312 } 7313 7314 /* 7315 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open, 7316 * close, or strategy. Dump no long uses this routine, it uses it's 7317 * own code so it can be done in polled mode. 7318 */ 7319 7320 medium_present = TRUE; 7321 7322 /* 7323 * When powering up, issue a TUR in case the device is at unit 7324 * attention. Don't do retries. Bypass the PM layer, otherwise 7325 * a deadlock on un_pm_busy_cv will occur. 7326 */ 7327 if (level == SD_SPINDLE_ON) { 7328 (void) sd_send_scsi_TEST_UNIT_READY(un, 7329 SD_DONT_RETRY_TUR | SD_BYPASS_PM); 7330 } 7331 7332 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n", 7333 ((level == SD_SPINDLE_ON) ? "START" : "STOP")); 7334 7335 sval = sd_send_scsi_START_STOP_UNIT(un, 7336 ((level == SD_SPINDLE_ON) ? SD_TARGET_START : SD_TARGET_STOP), 7337 SD_PATH_DIRECT); 7338 /* Command failed, check for media present. */ 7339 if ((sval == ENXIO) && ISREMOVABLE(un)) { 7340 medium_present = FALSE; 7341 } 7342 7343 /* 7344 * The conditions of interest here are: 7345 * if a spindle off with media present fails, 7346 * then restore the state and return an error. 7347 * else if a spindle on fails, 7348 * then return an error (there's no state to restore). 7349 * In all other cases we setup for the new state 7350 * and return success. 7351 */ 7352 switch (level) { 7353 case SD_SPINDLE_OFF: 7354 if ((medium_present == TRUE) && (sval != 0)) { 7355 /* The stop command from above failed */ 7356 rval = DDI_FAILURE; 7357 /* 7358 * The stop command failed, and we have media 7359 * present. Put the level back by calling the 7360 * sd_pm_resume() and set the state back to 7361 * it's previous value. 7362 */ 7363 (void) sd_ddi_pm_resume(un); 7364 mutex_enter(SD_MUTEX(un)); 7365 un->un_last_state = save_state; 7366 mutex_exit(SD_MUTEX(un)); 7367 break; 7368 } 7369 /* 7370 * The stop command from above succeeded. 7371 */ 7372 if (ISREMOVABLE(un)) { 7373 /* 7374 * Terminate watch thread in case of removable media 7375 * devices going into low power state. This is as per 7376 * the requirements of pm framework, otherwise commands 7377 * will be generated for the device (through watch 7378 * thread), even when the device is in low power state. 7379 */ 7380 mutex_enter(SD_MUTEX(un)); 7381 un->un_f_watcht_stopped = FALSE; 7382 if (un->un_swr_token != NULL) { 7383 opaque_t temp_token = un->un_swr_token; 7384 un->un_f_watcht_stopped = TRUE; 7385 un->un_swr_token = NULL; 7386 mutex_exit(SD_MUTEX(un)); 7387 (void) scsi_watch_request_terminate(temp_token, 7388 SCSI_WATCH_TERMINATE_WAIT); 7389 } else { 7390 mutex_exit(SD_MUTEX(un)); 7391 } 7392 } 7393 break; 7394 7395 default: /* The level requested is spindle on... */ 7396 /* 7397 * Legacy behavior: return success on a failed spinup 7398 * if there is no media in the drive. 7399 * Do this by looking at medium_present here. 7400 */ 7401 if ((sval != 0) && medium_present) { 7402 /* The start command from above failed */ 7403 rval = DDI_FAILURE; 7404 break; 7405 } 7406 /* 7407 * The start command from above succeeded 7408 * Resume the devices now that we have 7409 * started the disks 7410 */ 7411 (void) sd_ddi_pm_resume(un); 7412 7413 /* 7414 * Resume the watch thread since it was suspended 7415 * when the device went into low power mode. 7416 */ 7417 if (ISREMOVABLE(un)) { 7418 mutex_enter(SD_MUTEX(un)); 7419 if (un->un_f_watcht_stopped == TRUE) { 7420 opaque_t temp_token; 7421 7422 un->un_f_watcht_stopped = FALSE; 7423 mutex_exit(SD_MUTEX(un)); 7424 temp_token = scsi_watch_request_submit( 7425 SD_SCSI_DEVP(un), 7426 sd_check_media_time, 7427 SENSE_LENGTH, sd_media_watch_cb, 7428 (caddr_t)dev); 7429 mutex_enter(SD_MUTEX(un)); 7430 un->un_swr_token = temp_token; 7431 } 7432 mutex_exit(SD_MUTEX(un)); 7433 } 7434 } 7435 if (got_semaphore_here != 0) { 7436 sema_v(&un->un_semoclose); 7437 } 7438 /* 7439 * On exit put the state back to it's original value 7440 * and broadcast to anyone waiting for the power 7441 * change completion. 7442 */ 7443 mutex_enter(SD_MUTEX(un)); 7444 un->un_state = state_before_pm; 7445 cv_broadcast(&un->un_suspend_cv); 7446 mutex_exit(SD_MUTEX(un)); 7447 7448 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval); 7449 7450 return (rval); 7451 } 7452 7453 7454 7455 /* 7456 * Function: sdattach 7457 * 7458 * Description: Driver's attach(9e) entry point function. 7459 * 7460 * Arguments: devi - opaque device info handle 7461 * cmd - attach type 7462 * 7463 * Return Code: DDI_SUCCESS 7464 * DDI_FAILURE 7465 * 7466 * Context: Kernel thread context 7467 */ 7468 7469 static int 7470 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 7471 { 7472 switch (cmd) { 7473 case DDI_ATTACH: 7474 return (sd_unit_attach(devi)); 7475 case DDI_RESUME: 7476 return (sd_ddi_resume(devi)); 7477 default: 7478 break; 7479 } 7480 return (DDI_FAILURE); 7481 } 7482 7483 7484 /* 7485 * Function: sddetach 7486 * 7487 * Description: Driver's detach(9E) entry point function. 7488 * 7489 * Arguments: devi - opaque device info handle 7490 * cmd - detach type 7491 * 7492 * Return Code: DDI_SUCCESS 7493 * DDI_FAILURE 7494 * 7495 * Context: Kernel thread context 7496 */ 7497 7498 static int 7499 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 7500 { 7501 switch (cmd) { 7502 case DDI_DETACH: 7503 return (sd_unit_detach(devi)); 7504 case DDI_SUSPEND: 7505 return (sd_ddi_suspend(devi)); 7506 default: 7507 break; 7508 } 7509 return (DDI_FAILURE); 7510 } 7511 7512 7513 /* 7514 * Function: sd_sync_with_callback 7515 * 7516 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft 7517 * state while the callback routine is active. 7518 * 7519 * Arguments: un: softstate structure for the instance 7520 * 7521 * Context: Kernel thread context 7522 */ 7523 7524 static void 7525 sd_sync_with_callback(struct sd_lun *un) 7526 { 7527 ASSERT(un != NULL); 7528 7529 mutex_enter(SD_MUTEX(un)); 7530 7531 ASSERT(un->un_in_callback >= 0); 7532 7533 while (un->un_in_callback > 0) { 7534 mutex_exit(SD_MUTEX(un)); 7535 delay(2); 7536 mutex_enter(SD_MUTEX(un)); 7537 } 7538 7539 mutex_exit(SD_MUTEX(un)); 7540 } 7541 7542 /* 7543 * Function: sd_unit_attach 7544 * 7545 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates 7546 * the soft state structure for the device and performs 7547 * all necessary structure and device initializations. 7548 * 7549 * Arguments: devi: the system's dev_info_t for the device. 7550 * 7551 * Return Code: DDI_SUCCESS if attach is successful. 7552 * DDI_FAILURE if any part of the attach fails. 7553 * 7554 * Context: Called at attach(9e) time for the DDI_ATTACH flag. 7555 * Kernel thread context only. Can sleep. 7556 */ 7557 7558 static int 7559 sd_unit_attach(dev_info_t *devi) 7560 { 7561 struct scsi_device *devp; 7562 struct sd_lun *un; 7563 char *variantp; 7564 int reservation_flag = SD_TARGET_IS_UNRESERVED; 7565 int instance; 7566 int rval; 7567 uint64_t capacity; 7568 uint_t lbasize; 7569 7570 /* 7571 * Retrieve the target driver's private data area. This was set 7572 * up by the HBA. 7573 */ 7574 devp = ddi_get_driver_private(devi); 7575 7576 /* 7577 * Since we have no idea what state things were left in by the last 7578 * user of the device, set up some 'default' settings, ie. turn 'em 7579 * off. The scsi_ifsetcap calls force re-negotiations with the drive. 7580 * Do this before the scsi_probe, which sends an inquiry. 7581 * This is a fix for bug (4430280). 7582 * Of special importance is wide-xfer. The drive could have been left 7583 * in wide transfer mode by the last driver to communicate with it, 7584 * this includes us. If that's the case, and if the following is not 7585 * setup properly or we don't re-negotiate with the drive prior to 7586 * transferring data to/from the drive, it causes bus parity errors, 7587 * data overruns, and unexpected interrupts. This first occurred when 7588 * the fix for bug (4378686) was made. 7589 */ 7590 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1); 7591 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1); 7592 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1); 7593 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1); 7594 7595 /* 7596 * Use scsi_probe() to issue an INQUIRY command to the device. 7597 * This call will allocate and fill in the scsi_inquiry structure 7598 * and point the sd_inq member of the scsi_device structure to it. 7599 * If the attach succeeds, then this memory will not be de-allocated 7600 * (via scsi_unprobe()) until the instance is detached. 7601 */ 7602 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) { 7603 goto probe_failed; 7604 } 7605 7606 /* 7607 * Check the device type as specified in the inquiry data and 7608 * claim it if it is of a type that we support. 7609 */ 7610 switch (devp->sd_inq->inq_dtype) { 7611 case DTYPE_DIRECT: 7612 break; 7613 case DTYPE_RODIRECT: 7614 break; 7615 case DTYPE_OPTICAL: 7616 break; 7617 case DTYPE_NOTPRESENT: 7618 default: 7619 /* Unsupported device type; fail the attach. */ 7620 goto probe_failed; 7621 } 7622 7623 /* 7624 * Allocate the soft state structure for this unit. 7625 * 7626 * We rely upon this memory being set to all zeroes by 7627 * ddi_soft_state_zalloc(). We assume that any member of the 7628 * soft state structure that is not explicitly initialized by 7629 * this routine will have a value of zero. 7630 */ 7631 instance = ddi_get_instance(devp->sd_dev); 7632 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) { 7633 goto probe_failed; 7634 } 7635 7636 /* 7637 * Retrieve a pointer to the newly-allocated soft state. 7638 * 7639 * This should NEVER fail if the ddi_soft_state_zalloc() call above 7640 * was successful, unless something has gone horribly wrong and the 7641 * ddi's soft state internals are corrupt (in which case it is 7642 * probably better to halt here than just fail the attach....) 7643 */ 7644 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) { 7645 panic("sd_unit_attach: NULL soft state on instance:0x%x", 7646 instance); 7647 /*NOTREACHED*/ 7648 } 7649 7650 /* 7651 * Link the back ptr of the driver soft state to the scsi_device 7652 * struct for this lun. 7653 * Save a pointer to the softstate in the driver-private area of 7654 * the scsi_device struct. 7655 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until 7656 * we first set un->un_sd below. 7657 */ 7658 un->un_sd = devp; 7659 devp->sd_private = (opaque_t)un; 7660 7661 /* 7662 * The following must be after devp is stored in the soft state struct. 7663 */ 7664 #ifdef SDDEBUG 7665 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7666 "%s_unit_attach: un:0x%p instance:%d\n", 7667 ddi_driver_name(devi), un, instance); 7668 #endif 7669 7670 /* 7671 * Set up the device type and node type (for the minor nodes). 7672 * By default we assume that the device can at least support the 7673 * Common Command Set. Call it a CD-ROM if it reports itself 7674 * as a RODIRECT device. 7675 */ 7676 switch (devp->sd_inq->inq_dtype) { 7677 case DTYPE_RODIRECT: 7678 un->un_node_type = DDI_NT_CD_CHAN; 7679 un->un_ctype = CTYPE_CDROM; 7680 break; 7681 case DTYPE_OPTICAL: 7682 un->un_node_type = DDI_NT_BLOCK_CHAN; 7683 un->un_ctype = CTYPE_ROD; 7684 break; 7685 default: 7686 un->un_node_type = DDI_NT_BLOCK_CHAN; 7687 un->un_ctype = CTYPE_CCS; 7688 break; 7689 } 7690 7691 /* 7692 * Try to read the interconnect type from the HBA. 7693 * 7694 * Note: This driver is currently compiled as two binaries, a parallel 7695 * scsi version (sd) and a fibre channel version (ssd). All functional 7696 * differences are determined at compile time. In the future a single 7697 * binary will be provided and the inteconnect type will be used to 7698 * differentiate between fibre and parallel scsi behaviors. At that time 7699 * it will be necessary for all fibre channel HBAs to support this 7700 * property. 7701 * 7702 * set un_f_is_fiber to TRUE ( default fiber ) 7703 */ 7704 un->un_f_is_fibre = TRUE; 7705 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) { 7706 case INTERCONNECT_SSA: 7707 un->un_interconnect_type = SD_INTERCONNECT_SSA; 7708 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7709 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un); 7710 break; 7711 case INTERCONNECT_PARALLEL: 7712 un->un_f_is_fibre = FALSE; 7713 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7714 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7715 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un); 7716 break; 7717 case INTERCONNECT_FIBRE: 7718 un->un_interconnect_type = SD_INTERCONNECT_FIBRE; 7719 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7720 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un); 7721 break; 7722 case INTERCONNECT_FABRIC: 7723 un->un_interconnect_type = SD_INTERCONNECT_FABRIC; 7724 un->un_node_type = DDI_NT_BLOCK_FABRIC; 7725 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7726 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un); 7727 break; 7728 default: 7729 #ifdef SD_DEFAULT_INTERCONNECT_TYPE 7730 /* 7731 * The HBA does not support the "interconnect-type" property 7732 * (or did not provide a recognized type). 7733 * 7734 * Note: This will be obsoleted when a single fibre channel 7735 * and parallel scsi driver is delivered. In the meantime the 7736 * interconnect type will be set to the platform default.If that 7737 * type is not parallel SCSI, it means that we should be 7738 * assuming "ssd" semantics. However, here this also means that 7739 * the FC HBA is not supporting the "interconnect-type" property 7740 * like we expect it to, so log this occurrence. 7741 */ 7742 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE; 7743 if (!SD_IS_PARALLEL_SCSI(un)) { 7744 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7745 "sd_unit_attach: un:0x%p Assuming " 7746 "INTERCONNECT_FIBRE\n", un); 7747 } else { 7748 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7749 "sd_unit_attach: un:0x%p Assuming " 7750 "INTERCONNECT_PARALLEL\n", un); 7751 un->un_f_is_fibre = FALSE; 7752 } 7753 #else 7754 /* 7755 * Note: This source will be implemented when a single fibre 7756 * channel and parallel scsi driver is delivered. The default 7757 * will be to assume that if a device does not support the 7758 * "interconnect-type" property it is a parallel SCSI HBA and 7759 * we will set the interconnect type for parallel scsi. 7760 */ 7761 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL; 7762 un->un_f_is_fibre = FALSE; 7763 #endif 7764 break; 7765 } 7766 7767 if (un->un_f_is_fibre == TRUE) { 7768 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) == 7769 SCSI_VERSION_3) { 7770 switch (un->un_interconnect_type) { 7771 case SD_INTERCONNECT_FIBRE: 7772 case SD_INTERCONNECT_SSA: 7773 un->un_node_type = DDI_NT_BLOCK_WWN; 7774 break; 7775 default: 7776 break; 7777 } 7778 } 7779 } 7780 7781 /* 7782 * Initialize the Request Sense command for the target 7783 */ 7784 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) { 7785 goto alloc_rqs_failed; 7786 } 7787 7788 /* 7789 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc 7790 * with seperate binary for sd and ssd. 7791 * 7792 * x86 has 1 binary, un_retry_count is set base on connection type. 7793 * The hardcoded values will go away when Sparc uses 1 binary 7794 * for sd and ssd. This hardcoded values need to match 7795 * SD_RETRY_COUNT in sddef.h 7796 * The value used is base on interconnect type. 7797 * fibre = 3, parallel = 5 7798 */ 7799 #if defined(__i386) || defined(__amd64) 7800 un->un_retry_count = un->un_f_is_fibre ? 3 : 5; 7801 #else 7802 un->un_retry_count = SD_RETRY_COUNT; 7803 #endif 7804 7805 /* 7806 * Set the per disk retry count to the default number of retries 7807 * for disks and CDROMs. This value can be overridden by the 7808 * disk property list or an entry in sd.conf. 7809 */ 7810 un->un_notready_retry_count = 7811 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un) 7812 : DISK_NOT_READY_RETRY_COUNT(un); 7813 7814 /* 7815 * Set the busy retry count to the default value of un_retry_count. 7816 * This can be overridden by entries in sd.conf or the device 7817 * config table. 7818 */ 7819 un->un_busy_retry_count = un->un_retry_count; 7820 7821 /* 7822 * Init the reset threshold for retries. This number determines 7823 * how many retries must be performed before a reset can be issued 7824 * (for certain error conditions). This can be overridden by entries 7825 * in sd.conf or the device config table. 7826 */ 7827 un->un_reset_retry_count = (un->un_retry_count / 2); 7828 7829 /* 7830 * Set the victim_retry_count to the default un_retry_count 7831 */ 7832 un->un_victim_retry_count = (2 * un->un_retry_count); 7833 7834 /* 7835 * Set the reservation release timeout to the default value of 7836 * 5 seconds. This can be overridden by entries in ssd.conf or the 7837 * device config table. 7838 */ 7839 un->un_reserve_release_time = 5; 7840 7841 /* 7842 * Set up the default maximum transfer size. Note that this may 7843 * get updated later in the attach, when setting up default wide 7844 * operations for disks. 7845 */ 7846 #if defined(__i386) || defined(__amd64) 7847 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE; 7848 #else 7849 un->un_max_xfer_size = (uint_t)maxphys; 7850 #endif 7851 7852 /* 7853 * Get "allow bus device reset" property (defaults to "enabled" if 7854 * the property was not defined). This is to disable bus resets for 7855 * certain kinds of error recovery. Note: In the future when a run-time 7856 * fibre check is available the soft state flag should default to 7857 * enabled. 7858 */ 7859 if (un->un_f_is_fibre == TRUE) { 7860 un->un_f_allow_bus_device_reset = TRUE; 7861 } else { 7862 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 7863 "allow-bus-device-reset", 1) != 0) { 7864 un->un_f_allow_bus_device_reset = TRUE; 7865 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7866 "sd_unit_attach: un:0x%p Bus device reset enabled\n", 7867 un); 7868 } else { 7869 un->un_f_allow_bus_device_reset = FALSE; 7870 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7871 "sd_unit_attach: un:0x%p Bus device reset disabled\n", 7872 un); 7873 } 7874 } 7875 7876 /* 7877 * Check if this is an ATAPI device. ATAPI devices use Group 1 7878 * Read/Write commands and Group 2 Mode Sense/Select commands. 7879 * 7880 * Note: The "obsolete" way of doing this is to check for the "atapi" 7881 * property. The new "variant" property with a value of "atapi" has been 7882 * introduced so that future 'variants' of standard SCSI behavior (like 7883 * atapi) could be specified by the underlying HBA drivers by supplying 7884 * a new value for the "variant" property, instead of having to define a 7885 * new property. 7886 */ 7887 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) { 7888 un->un_f_cfg_is_atapi = TRUE; 7889 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7890 "sd_unit_attach: un:0x%p Atapi device\n", un); 7891 } 7892 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant", 7893 &variantp) == DDI_PROP_SUCCESS) { 7894 if (strcmp(variantp, "atapi") == 0) { 7895 un->un_f_cfg_is_atapi = TRUE; 7896 SD_INFO(SD_LOG_ATTACH_DETACH, un, 7897 "sd_unit_attach: un:0x%p Atapi device\n", un); 7898 } 7899 ddi_prop_free(variantp); 7900 } 7901 7902 /* 7903 * Assume doorlock commands are supported. If not, the first 7904 * call to sd_send_scsi_DOORLOCK() will set to FALSE 7905 */ 7906 un->un_f_doorlock_supported = TRUE; 7907 7908 un->un_cmd_timeout = SD_IO_TIME; 7909 7910 /* Info on current states, statuses, etc. (Updated frequently) */ 7911 un->un_state = SD_STATE_NORMAL; 7912 un->un_last_state = SD_STATE_NORMAL; 7913 7914 /* Control & status info for command throttling */ 7915 un->un_throttle = sd_max_throttle; 7916 un->un_saved_throttle = sd_max_throttle; 7917 un->un_min_throttle = sd_min_throttle; 7918 7919 if (un->un_f_is_fibre == TRUE) { 7920 un->un_f_use_adaptive_throttle = TRUE; 7921 } else { 7922 un->un_f_use_adaptive_throttle = FALSE; 7923 } 7924 7925 /* Removable media support. */ 7926 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 7927 un->un_mediastate = DKIO_NONE; 7928 un->un_specified_mediastate = DKIO_NONE; 7929 7930 /* CVs for suspend/resume (PM or DR) */ 7931 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 7932 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 7933 7934 /* Power management support. */ 7935 un->un_power_level = SD_SPINDLE_UNINIT; 7936 7937 /* 7938 * The open/close semaphore is used to serialize threads executing 7939 * in the driver's open & close entry point routines for a given 7940 * instance. 7941 */ 7942 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 7943 7944 /* 7945 * The conf file entry and softstate variable is a forceful override, 7946 * meaning a non-zero value must be entered to change the default. 7947 */ 7948 un->un_f_disksort_disabled = FALSE; 7949 7950 /* 7951 * Retrieve the properties from the static driver table or the driver 7952 * configuration file (.conf) for this unit and update the soft state 7953 * for the device as needed for the indicated properties. 7954 * Note: the property configuration needs to occur here as some of the 7955 * following routines may have dependancies on soft state flags set 7956 * as part of the driver property configuration. 7957 */ 7958 sd_read_unit_properties(un); 7959 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 7960 "sd_unit_attach: un:0x%p property configuration complete.\n", un); 7961 7962 /* 7963 * By default, we mark the capacity, lbazize, and geometry 7964 * as invalid. Only if we successfully read a valid capacity 7965 * will we update the un_blockcount and un_tgt_blocksize with the 7966 * valid values (the geometry will be validated later). 7967 */ 7968 un->un_f_blockcount_is_valid = FALSE; 7969 un->un_f_tgt_blocksize_is_valid = FALSE; 7970 un->un_f_geometry_is_valid = FALSE; 7971 7972 /* 7973 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine 7974 * otherwise. 7975 */ 7976 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE; 7977 un->un_blockcount = 0; 7978 7979 /* 7980 * Set up the per-instance info needed to determine the correct 7981 * CDBs and other info for issuing commands to the target. 7982 */ 7983 sd_init_cdb_limits(un); 7984 7985 /* 7986 * Set up the IO chains to use, based upon the target type. 7987 */ 7988 if (ISREMOVABLE(un)) { 7989 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA; 7990 } else { 7991 un->un_buf_chain_type = SD_CHAIN_INFO_DISK; 7992 } 7993 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD; 7994 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD; 7995 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD; 7996 7997 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf), 7998 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit, 7999 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER); 8000 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi); 8001 8002 8003 if (ISCD(un)) { 8004 un->un_additional_codes = sd_additional_codes; 8005 } else { 8006 un->un_additional_codes = NULL; 8007 } 8008 8009 /* 8010 * Create the kstats here so they can be available for attach-time 8011 * routines that send commands to the unit (either polled or via 8012 * sd_send_scsi_cmd). 8013 * 8014 * Note: This is a critical sequence that needs to be maintained: 8015 * 1) Instantiate the kstats here, before any routines using the 8016 * iopath (i.e. sd_send_scsi_cmd). 8017 * 2) Initialize the error stats (sd_set_errstats) and partition 8018 * stats (sd_set_pstats), following sd_validate_geometry(), 8019 * sd_register_devid(), and sd_disable_caching(). 8020 */ 8021 8022 un->un_stats = kstat_create(sd_label, instance, 8023 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT); 8024 if (un->un_stats != NULL) { 8025 un->un_stats->ks_lock = SD_MUTEX(un); 8026 kstat_install(un->un_stats); 8027 } 8028 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8029 "sd_unit_attach: un:0x%p un_stats created\n", un); 8030 8031 sd_create_errstats(un, instance); 8032 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8033 "sd_unit_attach: un:0x%p errstats created\n", un); 8034 8035 /* 8036 * The following if/else code was relocated here from below as part 8037 * of the fix for bug (4430280). However with the default setup added 8038 * on entry to this routine, it's no longer absolutely necessary for 8039 * this to be before the call to sd_spin_up_unit. 8040 */ 8041 if (SD_IS_PARALLEL_SCSI(un)) { 8042 /* 8043 * If SCSI-2 tagged queueing is supported by the target 8044 * and by the host adapter then we will enable it. 8045 */ 8046 un->un_tagflags = 0; 8047 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8048 (devp->sd_inq->inq_cmdque) && 8049 (un->un_f_arq_enabled == TRUE)) { 8050 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 8051 1, 1) == 1) { 8052 un->un_tagflags = FLAG_STAG; 8053 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8054 "sd_unit_attach: un:0x%p tag queueing " 8055 "enabled\n", un); 8056 } else if (scsi_ifgetcap(SD_ADDRESS(un), 8057 "untagged-qing", 0) == 1) { 8058 un->un_f_opt_queueing = TRUE; 8059 un->un_saved_throttle = un->un_throttle = 8060 min(un->un_throttle, 3); 8061 } else { 8062 un->un_f_opt_queueing = FALSE; 8063 un->un_saved_throttle = un->un_throttle = 1; 8064 } 8065 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0) 8066 == 1) && (un->un_f_arq_enabled == TRUE)) { 8067 /* The Host Adapter supports internal queueing. */ 8068 un->un_f_opt_queueing = TRUE; 8069 un->un_saved_throttle = un->un_throttle = 8070 min(un->un_throttle, 3); 8071 } else { 8072 un->un_f_opt_queueing = FALSE; 8073 un->un_saved_throttle = un->un_throttle = 1; 8074 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8075 "sd_unit_attach: un:0x%p no tag queueing\n", un); 8076 } 8077 8078 8079 /* Setup or tear down default wide operations for disks */ 8080 8081 /* 8082 * Note: Legacy: it may be possible for both "sd_max_xfer_size" 8083 * and "ssd_max_xfer_size" to exist simultaneously on the same 8084 * system and be set to different values. In the future this 8085 * code may need to be updated when the ssd module is 8086 * obsoleted and removed from the system. (4299588) 8087 */ 8088 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && 8089 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) { 8090 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8091 1, 1) == 1) { 8092 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8093 "sd_unit_attach: un:0x%p Wide Transfer " 8094 "enabled\n", un); 8095 } 8096 8097 /* 8098 * If tagged queuing has also been enabled, then 8099 * enable large xfers 8100 */ 8101 if (un->un_saved_throttle == sd_max_throttle) { 8102 un->un_max_xfer_size = 8103 ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8104 sd_max_xfer_size, SD_MAX_XFER_SIZE); 8105 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8106 "sd_unit_attach: un:0x%p max transfer " 8107 "size=0x%x\n", un, un->un_max_xfer_size); 8108 } 8109 } else { 8110 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 8111 0, 1) == 1) { 8112 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8113 "sd_unit_attach: un:0x%p " 8114 "Wide Transfer disabled\n", un); 8115 } 8116 } 8117 } else { 8118 un->un_tagflags = FLAG_STAG; 8119 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY, 8120 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE); 8121 } 8122 8123 /* 8124 * If this target supports LUN reset, try to enable it. 8125 */ 8126 if (un->un_f_lun_reset_enabled) { 8127 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) { 8128 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8129 "un:0x%p lun_reset capability set\n", un); 8130 } else { 8131 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: " 8132 "un:0x%p lun-reset capability not set\n", un); 8133 } 8134 } 8135 8136 /* 8137 * At this point in the attach, we have enough info in the 8138 * soft state to be able to issue commands to the target. 8139 * 8140 * All command paths used below MUST issue their commands as 8141 * SD_PATH_DIRECT. This is important as intermediate layers 8142 * are not all initialized yet (such as PM). 8143 */ 8144 8145 /* 8146 * Send a TEST UNIT READY command to the device. This should clear 8147 * any outstanding UNIT ATTENTION that may be present. 8148 * 8149 * Note: Don't check for success, just track if there is a reservation, 8150 * this is a throw away command to clear any unit attentions. 8151 * 8152 * Note: This MUST be the first command issued to the target during 8153 * attach to ensure power on UNIT ATTENTIONS are cleared. 8154 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated 8155 * with attempts at spinning up a device with no media. 8156 */ 8157 if (sd_send_scsi_TEST_UNIT_READY(un, SD_DONT_RETRY_TUR) == EACCES) { 8158 reservation_flag = SD_TARGET_IS_RESERVED; 8159 } 8160 8161 /* 8162 * If the device is NOT a removable media device, attempt to spin 8163 * it up (using the START_STOP_UNIT command) and read its capacity 8164 * (using the READ CAPACITY command). Note, however, that either 8165 * of these could fail and in some cases we would continue with 8166 * the attach despite the failure (see below). 8167 */ 8168 if (devp->sd_inq->inq_dtype == DTYPE_DIRECT && !ISREMOVABLE(un)) { 8169 switch (sd_spin_up_unit(un)) { 8170 case 0: 8171 /* 8172 * Spin-up was successful; now try to read the 8173 * capacity. If successful then save the results 8174 * and mark the capacity & lbasize as valid. 8175 */ 8176 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8177 "sd_unit_attach: un:0x%p spin-up successful\n", un); 8178 8179 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, 8180 &lbasize, SD_PATH_DIRECT)) { 8181 case 0: { 8182 if (capacity > DK_MAX_BLOCKS) { 8183 #ifdef _LP64 8184 /* 8185 * Enable descriptor format sense data 8186 * so that we can get 64 bit sense 8187 * data fields. 8188 */ 8189 sd_enable_descr_sense(un); 8190 #else 8191 /* 32-bit kernels can't handle this */ 8192 scsi_log(SD_DEVINFO(un), 8193 sd_label, CE_WARN, 8194 "disk has %llu blocks, which " 8195 "is too large for a 32-bit " 8196 "kernel", capacity); 8197 goto spinup_failed; 8198 #endif 8199 } 8200 /* 8201 * The following relies on 8202 * sd_send_scsi_READ_CAPACITY never 8203 * returning 0 for capacity and/or lbasize. 8204 */ 8205 sd_update_block_info(un, lbasize, capacity); 8206 8207 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8208 "sd_unit_attach: un:0x%p capacity = %ld " 8209 "blocks; lbasize= %ld.\n", un, 8210 un->un_blockcount, un->un_tgt_blocksize); 8211 8212 break; 8213 } 8214 case EACCES: 8215 /* 8216 * Should never get here if the spin-up 8217 * succeeded, but code it in anyway. 8218 * From here, just continue with the attach... 8219 */ 8220 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8221 "sd_unit_attach: un:0x%p " 8222 "sd_send_scsi_READ_CAPACITY " 8223 "returned reservation conflict\n", un); 8224 reservation_flag = SD_TARGET_IS_RESERVED; 8225 break; 8226 default: 8227 /* 8228 * Likewise, should never get here if the 8229 * spin-up succeeded. Just continue with 8230 * the attach... 8231 */ 8232 break; 8233 } 8234 break; 8235 case EACCES: 8236 /* 8237 * Device is reserved by another host. In this case 8238 * we could not spin it up or read the capacity, but 8239 * we continue with the attach anyway. 8240 */ 8241 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8242 "sd_unit_attach: un:0x%p spin-up reservation " 8243 "conflict.\n", un); 8244 reservation_flag = SD_TARGET_IS_RESERVED; 8245 break; 8246 default: 8247 /* Fail the attach if the spin-up failed. */ 8248 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8249 "sd_unit_attach: un:0x%p spin-up failed.", un); 8250 goto spinup_failed; 8251 } 8252 } 8253 8254 /* 8255 * Check to see if this is a MMC drive 8256 */ 8257 if (ISCD(un)) { 8258 sd_set_mmc_caps(un); 8259 } 8260 8261 /* 8262 * Create the minor nodes for the device. 8263 * Note: If we want to support fdisk on both sparc and intel, this will 8264 * have to separate out the notion that VTOC8 is always sparc, and 8265 * VTOC16 is always intel (tho these can be the defaults). The vtoc 8266 * type will have to be determined at run-time, and the fdisk 8267 * partitioning will have to have been read & set up before we 8268 * create the minor nodes. (any other inits (such as kstats) that 8269 * also ought to be done before creating the minor nodes?) (Doesn't 8270 * setting up the minor nodes kind of imply that we're ready to 8271 * handle an open from userland?) 8272 */ 8273 if (sd_create_minor_nodes(un, devi) != DDI_SUCCESS) { 8274 goto create_minor_nodes_failed; 8275 } 8276 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8277 "sd_unit_attach: un:0x%p minor nodes created\n", un); 8278 8279 /* 8280 * Add a zero-length attribute to tell the world we support 8281 * kernel ioctls (for layered drivers) 8282 */ 8283 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8284 DDI_KERNEL_IOCTL, NULL, 0); 8285 8286 /* 8287 * Add a boolean property to tell the world we support 8288 * the B_FAILFAST flag (for layered drivers) 8289 */ 8290 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 8291 "ddi-failfast-supported", NULL, 0); 8292 8293 /* 8294 * Initialize power management 8295 */ 8296 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL); 8297 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL); 8298 sd_setup_pm(un, devi); 8299 if (un->un_f_pm_is_enabled == FALSE) { 8300 /* 8301 * For performance, point to a jump table that does 8302 * not include pm. 8303 * The direct and priority chains don't change with PM. 8304 * 8305 * Note: this is currently done based on individual device 8306 * capabilities. When an interface for determining system 8307 * power enabled state becomes available, or when additional 8308 * layers are added to the command chain, these values will 8309 * have to be re-evaluated for correctness. 8310 */ 8311 if (ISREMOVABLE(un)) { 8312 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM; 8313 } else { 8314 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM; 8315 } 8316 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 8317 } 8318 8319 /* 8320 * This property is set to 0 by HA software to avoid retries 8321 * on a reserved disk. (The preferred property name is 8322 * "retry-on-reservation-conflict") (1189689) 8323 * 8324 * Note: The use of a global here can have unintended consequences. A 8325 * per instance variable is preferrable to match the capabilities of 8326 * different underlying hba's (4402600) 8327 */ 8328 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi, 8329 DDI_PROP_DONTPASS, "retry-on-reservation-conflict", 8330 sd_retry_on_reservation_conflict); 8331 if (sd_retry_on_reservation_conflict != 0) { 8332 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, 8333 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name, 8334 sd_retry_on_reservation_conflict); 8335 } 8336 8337 /* Set up options for QFULL handling. */ 8338 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8339 "qfull-retries", -1)) != -1) { 8340 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries", 8341 rval, 1); 8342 } 8343 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0, 8344 "qfull-retry-interval", -1)) != -1) { 8345 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval", 8346 rval, 1); 8347 } 8348 8349 /* 8350 * This just prints a message that announces the existence of the 8351 * device. The message is always printed in the system logfile, but 8352 * only appears on the console if the system is booted with the 8353 * -v (verbose) argument. 8354 */ 8355 ddi_report_dev(devi); 8356 8357 /* 8358 * The framework calls driver attach routines single-threaded 8359 * for a given instance. However we still acquire SD_MUTEX here 8360 * because this required for calling the sd_validate_geometry() 8361 * and sd_register_devid() functions. 8362 */ 8363 mutex_enter(SD_MUTEX(un)); 8364 un->un_f_geometry_is_valid = FALSE; 8365 un->un_mediastate = DKIO_NONE; 8366 un->un_reserved = -1; 8367 if (!ISREMOVABLE(un)) { 8368 /* 8369 * Read and validate the device's geometry (ie, disk label) 8370 * A new unformatted drive will not have a valid geometry, but 8371 * the driver needs to successfully attach to this device so 8372 * the drive can be formatted via ioctls. 8373 */ 8374 if (((sd_validate_geometry(un, SD_PATH_DIRECT) == 8375 ENOTSUP)) && 8376 (un->un_blockcount < DK_MAX_BLOCKS)) { 8377 /* 8378 * We found a small disk with an EFI label on it; 8379 * we need to fix up the minor nodes accordingly. 8380 */ 8381 ddi_remove_minor_node(devi, "h"); 8382 ddi_remove_minor_node(devi, "h,raw"); 8383 (void) ddi_create_minor_node(devi, "wd", 8384 S_IFBLK, 8385 (instance << SDUNIT_SHIFT) | WD_NODE, 8386 un->un_node_type, NULL); 8387 (void) ddi_create_minor_node(devi, "wd,raw", 8388 S_IFCHR, 8389 (instance << SDUNIT_SHIFT) | WD_NODE, 8390 un->un_node_type, NULL); 8391 } 8392 } 8393 8394 /* 8395 * Read and initialize the devid for the unit. 8396 */ 8397 ASSERT(un->un_errstats != NULL); 8398 if (!ISREMOVABLE(un)) { 8399 sd_register_devid(un, devi, reservation_flag); 8400 } 8401 mutex_exit(SD_MUTEX(un)); 8402 8403 #if (defined(__fibre)) 8404 /* 8405 * Register callbacks for fibre only. You can't do this soley 8406 * on the basis of the devid_type because this is hba specific. 8407 * We need to query our hba capabilities to find out whether to 8408 * register or not. 8409 */ 8410 if (un->un_f_is_fibre) { 8411 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) { 8412 sd_init_event_callbacks(un); 8413 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8414 "sd_unit_attach: un:0x%p event callbacks inserted", un); 8415 } 8416 } 8417 #endif 8418 8419 if (un->un_f_opt_disable_cache == TRUE) { 8420 if (sd_disable_caching(un) != 0) { 8421 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8422 "sd_unit_attach: un:0x%p Could not disable " 8423 "caching", un); 8424 goto devid_failed; 8425 } 8426 } 8427 8428 /* 8429 * Set the pstat and error stat values here, so data obtained during the 8430 * previous attach-time routines is available. 8431 * 8432 * Note: This is a critical sequence that needs to be maintained: 8433 * 1) Instantiate the kstats before any routines using the iopath 8434 * (i.e. sd_send_scsi_cmd). 8435 * 2) Initialize the error stats (sd_set_errstats) and partition 8436 * stats (sd_set_pstats)here, following sd_validate_geometry(), 8437 * sd_register_devid(), and sd_disable_caching(). 8438 */ 8439 if (!ISREMOVABLE(un) && (un->un_f_pkstats_enabled == TRUE)) { 8440 sd_set_pstats(un); 8441 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8442 "sd_unit_attach: un:0x%p pstats created and set\n", un); 8443 } 8444 8445 sd_set_errstats(un); 8446 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8447 "sd_unit_attach: un:0x%p errstats set\n", un); 8448 8449 /* 8450 * Find out what type of reservation this disk supports. 8451 */ 8452 switch (sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 0, NULL)) { 8453 case 0: 8454 /* 8455 * SCSI-3 reservations are supported. 8456 */ 8457 un->un_reservation_type = SD_SCSI3_RESERVATION; 8458 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8459 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un); 8460 break; 8461 case ENOTSUP: 8462 /* 8463 * The PERSISTENT RESERVE IN command would not be recognized by 8464 * a SCSI-2 device, so assume the reservation type is SCSI-2. 8465 */ 8466 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8467 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un); 8468 un->un_reservation_type = SD_SCSI2_RESERVATION; 8469 break; 8470 default: 8471 /* 8472 * default to SCSI-3 reservations 8473 */ 8474 SD_INFO(SD_LOG_ATTACH_DETACH, un, 8475 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un); 8476 un->un_reservation_type = SD_SCSI3_RESERVATION; 8477 break; 8478 } 8479 8480 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 8481 "sd_unit_attach: un:0x%p exit success\n", un); 8482 8483 return (DDI_SUCCESS); 8484 8485 /* 8486 * An error occurred during the attach; clean up & return failure. 8487 */ 8488 8489 devid_failed: 8490 8491 setup_pm_failed: 8492 ddi_remove_minor_node(devi, NULL); 8493 8494 create_minor_nodes_failed: 8495 /* 8496 * Cleanup from the scsi_ifsetcap() calls (437868) 8497 */ 8498 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8499 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8500 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8501 8502 if (un->un_f_is_fibre == FALSE) { 8503 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8504 } 8505 8506 spinup_failed: 8507 8508 mutex_enter(SD_MUTEX(un)); 8509 8510 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */ 8511 if (un->un_direct_priority_timeid != NULL) { 8512 timeout_id_t temp_id = un->un_direct_priority_timeid; 8513 un->un_direct_priority_timeid = NULL; 8514 mutex_exit(SD_MUTEX(un)); 8515 (void) untimeout(temp_id); 8516 mutex_enter(SD_MUTEX(un)); 8517 } 8518 8519 /* Cancel any pending start/stop timeouts */ 8520 if (un->un_startstop_timeid != NULL) { 8521 timeout_id_t temp_id = un->un_startstop_timeid; 8522 un->un_startstop_timeid = NULL; 8523 mutex_exit(SD_MUTEX(un)); 8524 (void) untimeout(temp_id); 8525 mutex_enter(SD_MUTEX(un)); 8526 } 8527 8528 mutex_exit(SD_MUTEX(un)); 8529 8530 /* There should not be any in-progress I/O so ASSERT this check */ 8531 ASSERT(un->un_ncmds_in_transport == 0); 8532 ASSERT(un->un_ncmds_in_driver == 0); 8533 8534 /* Do not free the softstate if the callback routine is active */ 8535 sd_sync_with_callback(un); 8536 8537 /* 8538 * Partition stats apparently are not used with removables. These would 8539 * not have been created during attach, so no need to clean them up... 8540 */ 8541 if (un->un_stats != NULL) { 8542 kstat_delete(un->un_stats); 8543 un->un_stats = NULL; 8544 } 8545 if (un->un_errstats != NULL) { 8546 kstat_delete(un->un_errstats); 8547 un->un_errstats = NULL; 8548 } 8549 8550 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 8551 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 8552 8553 ddi_prop_remove_all(devi); 8554 sema_destroy(&un->un_semoclose); 8555 cv_destroy(&un->un_state_cv); 8556 8557 getrbuf_failed: 8558 8559 sd_free_rqs(un); 8560 8561 alloc_rqs_failed: 8562 8563 devp->sd_private = NULL; 8564 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */ 8565 8566 get_softstate_failed: 8567 /* 8568 * Note: the man pages are unclear as to whether or not doing a 8569 * ddi_soft_state_free(sd_state, instance) is the right way to 8570 * clean up after the ddi_soft_state_zalloc() if the subsequent 8571 * ddi_get_soft_state() fails. The implication seems to be 8572 * that the get_soft_state cannot fail if the zalloc succeeds. 8573 */ 8574 ddi_soft_state_free(sd_state, instance); 8575 8576 probe_failed: 8577 scsi_unprobe(devp); 8578 #ifdef SDDEBUG 8579 if ((sd_component_mask & SD_LOG_ATTACH_DETACH) && 8580 (sd_level_mask & SD_LOGMASK_TRACE)) { 8581 cmn_err(CE_CONT, "sd_unit_attach: un:0x%p exit failure\n", 8582 (void *)un); 8583 } 8584 #endif 8585 return (DDI_FAILURE); 8586 } 8587 8588 8589 /* 8590 * Function: sd_unit_detach 8591 * 8592 * Description: Performs DDI_DETACH processing for sddetach(). 8593 * 8594 * Return Code: DDI_SUCCESS 8595 * DDI_FAILURE 8596 * 8597 * Context: Kernel thread context 8598 */ 8599 8600 static int 8601 sd_unit_detach(dev_info_t *devi) 8602 { 8603 struct scsi_device *devp; 8604 struct sd_lun *un; 8605 int i; 8606 dev_t dev; 8607 #if !(defined(__i386) || defined(__amd64)) && !defined(__fibre) 8608 int reset_retval; 8609 #endif 8610 int instance = ddi_get_instance(devi); 8611 8612 mutex_enter(&sd_detach_mutex); 8613 8614 /* 8615 * Fail the detach for any of the following: 8616 * - Unable to get the sd_lun struct for the instance 8617 * - A layered driver has an outstanding open on the instance 8618 * - Another thread is already detaching this instance 8619 * - Another thread is currently performing an open 8620 */ 8621 devp = ddi_get_driver_private(devi); 8622 if ((devp == NULL) || 8623 ((un = (struct sd_lun *)devp->sd_private) == NULL) || 8624 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) || 8625 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) { 8626 mutex_exit(&sd_detach_mutex); 8627 return (DDI_FAILURE); 8628 } 8629 8630 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un); 8631 8632 /* 8633 * Mark this instance as currently in a detach, to inhibit any 8634 * opens from a layered driver. 8635 */ 8636 un->un_detach_count++; 8637 mutex_exit(&sd_detach_mutex); 8638 8639 dev = sd_make_device(SD_DEVINFO(un)); 8640 8641 _NOTE(COMPETING_THREADS_NOW); 8642 8643 mutex_enter(SD_MUTEX(un)); 8644 8645 /* 8646 * Fail the detach if there are any outstanding layered 8647 * opens on this device. 8648 */ 8649 for (i = 0; i < NDKMAP; i++) { 8650 if (un->un_ocmap.lyropen[i] != 0) { 8651 goto err_notclosed; 8652 } 8653 } 8654 8655 /* 8656 * Verify there are NO outstanding commands issued to this device. 8657 * ie, un_ncmds_in_transport == 0. 8658 * It's possible to have outstanding commands through the physio 8659 * code path, even though everything's closed. 8660 */ 8661 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) || 8662 (un->un_direct_priority_timeid != NULL) || 8663 (un->un_state == SD_STATE_RWAIT)) { 8664 mutex_exit(SD_MUTEX(un)); 8665 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8666 "sd_dr_detach: Detach failure due to outstanding cmds\n"); 8667 goto err_stillbusy; 8668 } 8669 8670 /* 8671 * If we have the device reserved, release the reservation. 8672 */ 8673 if ((un->un_resvd_status & SD_RESERVE) && 8674 !(un->un_resvd_status & SD_LOST_RESERVE)) { 8675 mutex_exit(SD_MUTEX(un)); 8676 /* 8677 * Note: sd_reserve_release sends a command to the device 8678 * via the sd_ioctlcmd() path, and can sleep. 8679 */ 8680 if (sd_reserve_release(dev, SD_RELEASE) != 0) { 8681 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8682 "sd_dr_detach: Cannot release reservation \n"); 8683 } 8684 } else { 8685 mutex_exit(SD_MUTEX(un)); 8686 } 8687 8688 /* 8689 * Untimeout any reserve recover, throttle reset, restart unit 8690 * and delayed broadcast timeout threads. Protect the timeout pointer 8691 * from getting nulled by their callback functions. 8692 */ 8693 mutex_enter(SD_MUTEX(un)); 8694 if (un->un_resvd_timeid != NULL) { 8695 timeout_id_t temp_id = un->un_resvd_timeid; 8696 un->un_resvd_timeid = NULL; 8697 mutex_exit(SD_MUTEX(un)); 8698 (void) untimeout(temp_id); 8699 mutex_enter(SD_MUTEX(un)); 8700 } 8701 8702 if (un->un_reset_throttle_timeid != NULL) { 8703 timeout_id_t temp_id = un->un_reset_throttle_timeid; 8704 un->un_reset_throttle_timeid = NULL; 8705 mutex_exit(SD_MUTEX(un)); 8706 (void) untimeout(temp_id); 8707 mutex_enter(SD_MUTEX(un)); 8708 } 8709 8710 if (un->un_startstop_timeid != NULL) { 8711 timeout_id_t temp_id = un->un_startstop_timeid; 8712 un->un_startstop_timeid = NULL; 8713 mutex_exit(SD_MUTEX(un)); 8714 (void) untimeout(temp_id); 8715 mutex_enter(SD_MUTEX(un)); 8716 } 8717 8718 if (un->un_dcvb_timeid != NULL) { 8719 timeout_id_t temp_id = un->un_dcvb_timeid; 8720 un->un_dcvb_timeid = NULL; 8721 mutex_exit(SD_MUTEX(un)); 8722 (void) untimeout(temp_id); 8723 } else { 8724 mutex_exit(SD_MUTEX(un)); 8725 } 8726 8727 /* Remove any pending reservation reclaim requests for this device */ 8728 sd_rmv_resv_reclaim_req(dev); 8729 8730 mutex_enter(SD_MUTEX(un)); 8731 8732 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */ 8733 if (un->un_direct_priority_timeid != NULL) { 8734 timeout_id_t temp_id = un->un_direct_priority_timeid; 8735 un->un_direct_priority_timeid = NULL; 8736 mutex_exit(SD_MUTEX(un)); 8737 (void) untimeout(temp_id); 8738 mutex_enter(SD_MUTEX(un)); 8739 } 8740 8741 /* Cancel any active multi-host disk watch thread requests */ 8742 if (un->un_mhd_token != NULL) { 8743 mutex_exit(SD_MUTEX(un)); 8744 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token)); 8745 if (scsi_watch_request_terminate(un->un_mhd_token, 8746 SCSI_WATCH_TERMINATE_NOWAIT)) { 8747 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8748 "sd_dr_detach: Cannot cancel mhd watch request\n"); 8749 /* 8750 * Note: We are returning here after having removed 8751 * some driver timeouts above. This is consistent with 8752 * the legacy implementation but perhaps the watch 8753 * terminate call should be made with the wait flag set. 8754 */ 8755 goto err_stillbusy; 8756 } 8757 mutex_enter(SD_MUTEX(un)); 8758 un->un_mhd_token = NULL; 8759 } 8760 8761 if (un->un_swr_token != NULL) { 8762 mutex_exit(SD_MUTEX(un)); 8763 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token)); 8764 if (scsi_watch_request_terminate(un->un_swr_token, 8765 SCSI_WATCH_TERMINATE_NOWAIT)) { 8766 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8767 "sd_dr_detach: Cannot cancel swr watch request\n"); 8768 /* 8769 * Note: We are returning here after having removed 8770 * some driver timeouts above. This is consistent with 8771 * the legacy implementation but perhaps the watch 8772 * terminate call should be made with the wait flag set. 8773 */ 8774 goto err_stillbusy; 8775 } 8776 mutex_enter(SD_MUTEX(un)); 8777 un->un_swr_token = NULL; 8778 } 8779 8780 mutex_exit(SD_MUTEX(un)); 8781 8782 /* 8783 * Clear any scsi_reset_notifies. We clear the reset notifies 8784 * if we have not registered one. 8785 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX! 8786 */ 8787 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 8788 sd_mhd_reset_notify_cb, (caddr_t)un); 8789 8790 8791 8792 #if defined(__i386) || defined(__amd64) 8793 /* 8794 * Gratuitous bus resets sometimes cause an otherwise 8795 * okay ATA/ATAPI bus to hang. This is due the lack of 8796 * a clear spec of how resets should be implemented by ATA 8797 * disk drives. 8798 */ 8799 #elif !defined(__fibre) /* "#else if" does NOT work! */ 8800 /* 8801 * Reset target/bus. 8802 * 8803 * Note: This is a legacy workaround for Elite III dual-port drives that 8804 * will not come online after an aborted detach and subsequent re-attach 8805 * It should be removed when the Elite III FW is fixed, or the drives 8806 * are no longer supported. 8807 */ 8808 if (un->un_f_cfg_is_atapi == FALSE) { 8809 reset_retval = 0; 8810 8811 /* If the device is in low power mode don't reset it */ 8812 8813 mutex_enter(&un->un_pm_mutex); 8814 if (!SD_DEVICE_IS_IN_LOW_POWER(un)) { 8815 /* 8816 * First try a LUN reset if we can, then move on to a 8817 * target reset if needed; swat the bus as a last 8818 * resort. 8819 */ 8820 mutex_exit(&un->un_pm_mutex); 8821 if (un->un_f_allow_bus_device_reset == TRUE) { 8822 if (un->un_f_lun_reset_enabled == TRUE) { 8823 reset_retval = 8824 scsi_reset(SD_ADDRESS(un), 8825 RESET_LUN); 8826 } 8827 if (reset_retval == 0) { 8828 reset_retval = 8829 scsi_reset(SD_ADDRESS(un), 8830 RESET_TARGET); 8831 } 8832 } 8833 if (reset_retval == 0) { 8834 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 8835 } 8836 } else { 8837 mutex_exit(&un->un_pm_mutex); 8838 } 8839 } 8840 #endif 8841 8842 /* 8843 * protect the timeout pointers from getting nulled by 8844 * their callback functions during the cancellation process. 8845 * In such a scenario untimeout can be invoked with a null value. 8846 */ 8847 _NOTE(NO_COMPETING_THREADS_NOW); 8848 8849 mutex_enter(&un->un_pm_mutex); 8850 if (un->un_pm_idle_timeid != NULL) { 8851 timeout_id_t temp_id = un->un_pm_idle_timeid; 8852 un->un_pm_idle_timeid = NULL; 8853 mutex_exit(&un->un_pm_mutex); 8854 8855 /* 8856 * Timeout is active; cancel it. 8857 * Note that it'll never be active on a device 8858 * that does not support PM therefore we don't 8859 * have to check before calling pm_idle_component. 8860 */ 8861 (void) untimeout(temp_id); 8862 (void) pm_idle_component(SD_DEVINFO(un), 0); 8863 mutex_enter(&un->un_pm_mutex); 8864 } 8865 8866 /* 8867 * Check whether there is already a timeout scheduled for power 8868 * management. If yes then don't lower the power here, that's. 8869 * the timeout handler's job. 8870 */ 8871 if (un->un_pm_timeid != NULL) { 8872 timeout_id_t temp_id = un->un_pm_timeid; 8873 un->un_pm_timeid = NULL; 8874 mutex_exit(&un->un_pm_mutex); 8875 /* 8876 * Timeout is active; cancel it. 8877 * Note that it'll never be active on a device 8878 * that does not support PM therefore we don't 8879 * have to check before calling pm_idle_component. 8880 */ 8881 (void) untimeout(temp_id); 8882 (void) pm_idle_component(SD_DEVINFO(un), 0); 8883 8884 } else { 8885 mutex_exit(&un->un_pm_mutex); 8886 if ((un->un_f_pm_is_enabled == TRUE) && 8887 (pm_lower_power(SD_DEVINFO(un), 0, SD_SPINDLE_OFF) != 8888 DDI_SUCCESS)) { 8889 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8890 "sd_dr_detach: Lower power request failed, ignoring.\n"); 8891 /* 8892 * Fix for bug: 4297749, item # 13 8893 * The above test now includes a check to see if PM is 8894 * supported by this device before call 8895 * pm_lower_power(). 8896 * Note, the following is not dead code. The call to 8897 * pm_lower_power above will generate a call back into 8898 * our sdpower routine which might result in a timeout 8899 * handler getting activated. Therefore the following 8900 * code is valid and necessary. 8901 */ 8902 mutex_enter(&un->un_pm_mutex); 8903 if (un->un_pm_timeid != NULL) { 8904 timeout_id_t temp_id = un->un_pm_timeid; 8905 un->un_pm_timeid = NULL; 8906 mutex_exit(&un->un_pm_mutex); 8907 (void) untimeout(temp_id); 8908 (void) pm_idle_component(SD_DEVINFO(un), 0); 8909 } else { 8910 mutex_exit(&un->un_pm_mutex); 8911 } 8912 } 8913 } 8914 8915 /* 8916 * Cleanup from the scsi_ifsetcap() calls (437868) 8917 * Relocated here from above to be after the call to 8918 * pm_lower_power, which was getting errors. 8919 */ 8920 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1); 8921 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1); 8922 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 8923 8924 if (un->un_f_is_fibre == FALSE) { 8925 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1); 8926 } 8927 8928 /* 8929 * Remove any event callbacks, fibre only 8930 */ 8931 if (un->un_f_is_fibre == TRUE) { 8932 if ((un->un_insert_event != NULL) && 8933 (ddi_remove_event_handler(un->un_insert_cb_id) != 8934 DDI_SUCCESS)) { 8935 /* 8936 * Note: We are returning here after having done 8937 * substantial cleanup above. This is consistent 8938 * with the legacy implementation but this may not 8939 * be the right thing to do. 8940 */ 8941 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8942 "sd_dr_detach: Cannot cancel insert event\n"); 8943 goto err_remove_event; 8944 } 8945 un->un_insert_event = NULL; 8946 8947 if ((un->un_remove_event != NULL) && 8948 (ddi_remove_event_handler(un->un_remove_cb_id) != 8949 DDI_SUCCESS)) { 8950 /* 8951 * Note: We are returning here after having done 8952 * substantial cleanup above. This is consistent 8953 * with the legacy implementation but this may not 8954 * be the right thing to do. 8955 */ 8956 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 8957 "sd_dr_detach: Cannot cancel remove event\n"); 8958 goto err_remove_event; 8959 } 8960 un->un_remove_event = NULL; 8961 } 8962 8963 /* Do not free the softstate if the callback routine is active */ 8964 sd_sync_with_callback(un); 8965 8966 /* 8967 * Hold the detach mutex here, to make sure that no other threads ever 8968 * can access a (partially) freed soft state structure. 8969 */ 8970 mutex_enter(&sd_detach_mutex); 8971 8972 /* 8973 * Clean up the soft state struct. 8974 * Cleanup is done in reverse order of allocs/inits. 8975 * At this point there should be no competing threads anymore. 8976 */ 8977 8978 /* Unregister and free device id. */ 8979 ddi_devid_unregister(devi); 8980 if (un->un_devid) { 8981 ddi_devid_free(un->un_devid); 8982 un->un_devid = NULL; 8983 } 8984 8985 /* 8986 * Destroy wmap cache if it exists. 8987 */ 8988 if (un->un_wm_cache != NULL) { 8989 kmem_cache_destroy(un->un_wm_cache); 8990 un->un_wm_cache = NULL; 8991 } 8992 8993 /* Remove minor nodes */ 8994 ddi_remove_minor_node(devi, NULL); 8995 8996 /* 8997 * kstat cleanup is done in detach for all device types (4363169). 8998 * We do not want to fail detach if the device kstats are not deleted 8999 * since there is a confusion about the devo_refcnt for the device. 9000 * We just delete the kstats and let detach complete successfully. 9001 */ 9002 if (un->un_stats != NULL) { 9003 kstat_delete(un->un_stats); 9004 un->un_stats = NULL; 9005 } 9006 if (un->un_errstats != NULL) { 9007 kstat_delete(un->un_errstats); 9008 un->un_errstats = NULL; 9009 } 9010 9011 /* Remove partition stats (not created for removables) */ 9012 if (!ISREMOVABLE(un)) { 9013 for (i = 0; i < NSDMAP; i++) { 9014 if (un->un_pstats[i] != NULL) { 9015 kstat_delete(un->un_pstats[i]); 9016 un->un_pstats[i] = NULL; 9017 } 9018 } 9019 } 9020 9021 /* Remove xbuf registration */ 9022 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi); 9023 ddi_xbuf_attr_destroy(un->un_xbuf_attr); 9024 9025 /* Remove driver properties */ 9026 ddi_prop_remove_all(devi); 9027 9028 mutex_destroy(&un->un_pm_mutex); 9029 cv_destroy(&un->un_pm_busy_cv); 9030 9031 /* Open/close semaphore */ 9032 sema_destroy(&un->un_semoclose); 9033 9034 /* Removable media condvar. */ 9035 cv_destroy(&un->un_state_cv); 9036 9037 /* Suspend/resume condvar. */ 9038 cv_destroy(&un->un_suspend_cv); 9039 cv_destroy(&un->un_disk_busy_cv); 9040 9041 sd_free_rqs(un); 9042 9043 /* Free up soft state */ 9044 devp->sd_private = NULL; 9045 bzero(un, sizeof (struct sd_lun)); 9046 ddi_soft_state_free(sd_state, instance); 9047 9048 mutex_exit(&sd_detach_mutex); 9049 9050 /* This frees up the INQUIRY data associated with the device. */ 9051 scsi_unprobe(devp); 9052 9053 return (DDI_SUCCESS); 9054 9055 err_notclosed: 9056 mutex_exit(SD_MUTEX(un)); 9057 9058 err_stillbusy: 9059 _NOTE(NO_COMPETING_THREADS_NOW); 9060 9061 err_remove_event: 9062 mutex_enter(&sd_detach_mutex); 9063 un->un_detach_count--; 9064 mutex_exit(&sd_detach_mutex); 9065 9066 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n"); 9067 return (DDI_FAILURE); 9068 } 9069 9070 9071 /* 9072 * Driver minor node structure and data table 9073 */ 9074 struct driver_minor_data { 9075 char *name; 9076 minor_t minor; 9077 int type; 9078 }; 9079 9080 static struct driver_minor_data sd_minor_data[] = { 9081 {"a", 0, S_IFBLK}, 9082 {"b", 1, S_IFBLK}, 9083 {"c", 2, S_IFBLK}, 9084 {"d", 3, S_IFBLK}, 9085 {"e", 4, S_IFBLK}, 9086 {"f", 5, S_IFBLK}, 9087 {"g", 6, S_IFBLK}, 9088 {"h", 7, S_IFBLK}, 9089 #if defined(_SUNOS_VTOC_16) 9090 {"i", 8, S_IFBLK}, 9091 {"j", 9, S_IFBLK}, 9092 {"k", 10, S_IFBLK}, 9093 {"l", 11, S_IFBLK}, 9094 {"m", 12, S_IFBLK}, 9095 {"n", 13, S_IFBLK}, 9096 {"o", 14, S_IFBLK}, 9097 {"p", 15, S_IFBLK}, 9098 #endif /* defined(_SUNOS_VTOC_16) */ 9099 #if defined(_FIRMWARE_NEEDS_FDISK) 9100 {"q", 16, S_IFBLK}, 9101 {"r", 17, S_IFBLK}, 9102 {"s", 18, S_IFBLK}, 9103 {"t", 19, S_IFBLK}, 9104 {"u", 20, S_IFBLK}, 9105 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9106 {"a,raw", 0, S_IFCHR}, 9107 {"b,raw", 1, S_IFCHR}, 9108 {"c,raw", 2, S_IFCHR}, 9109 {"d,raw", 3, S_IFCHR}, 9110 {"e,raw", 4, S_IFCHR}, 9111 {"f,raw", 5, S_IFCHR}, 9112 {"g,raw", 6, S_IFCHR}, 9113 {"h,raw", 7, S_IFCHR}, 9114 #if defined(_SUNOS_VTOC_16) 9115 {"i,raw", 8, S_IFCHR}, 9116 {"j,raw", 9, S_IFCHR}, 9117 {"k,raw", 10, S_IFCHR}, 9118 {"l,raw", 11, S_IFCHR}, 9119 {"m,raw", 12, S_IFCHR}, 9120 {"n,raw", 13, S_IFCHR}, 9121 {"o,raw", 14, S_IFCHR}, 9122 {"p,raw", 15, S_IFCHR}, 9123 #endif /* defined(_SUNOS_VTOC_16) */ 9124 #if defined(_FIRMWARE_NEEDS_FDISK) 9125 {"q,raw", 16, S_IFCHR}, 9126 {"r,raw", 17, S_IFCHR}, 9127 {"s,raw", 18, S_IFCHR}, 9128 {"t,raw", 19, S_IFCHR}, 9129 {"u,raw", 20, S_IFCHR}, 9130 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9131 {0} 9132 }; 9133 9134 static struct driver_minor_data sd_minor_data_efi[] = { 9135 {"a", 0, S_IFBLK}, 9136 {"b", 1, S_IFBLK}, 9137 {"c", 2, S_IFBLK}, 9138 {"d", 3, S_IFBLK}, 9139 {"e", 4, S_IFBLK}, 9140 {"f", 5, S_IFBLK}, 9141 {"g", 6, S_IFBLK}, 9142 {"wd", 7, S_IFBLK}, 9143 #if defined(_FIRMWARE_NEEDS_FDISK) 9144 {"q", 16, S_IFBLK}, 9145 {"r", 17, S_IFBLK}, 9146 {"s", 18, S_IFBLK}, 9147 {"t", 19, S_IFBLK}, 9148 {"u", 20, S_IFBLK}, 9149 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9150 {"a,raw", 0, S_IFCHR}, 9151 {"b,raw", 1, S_IFCHR}, 9152 {"c,raw", 2, S_IFCHR}, 9153 {"d,raw", 3, S_IFCHR}, 9154 {"e,raw", 4, S_IFCHR}, 9155 {"f,raw", 5, S_IFCHR}, 9156 {"g,raw", 6, S_IFCHR}, 9157 {"wd,raw", 7, S_IFCHR}, 9158 #if defined(_FIRMWARE_NEEDS_FDISK) 9159 {"q,raw", 16, S_IFCHR}, 9160 {"r,raw", 17, S_IFCHR}, 9161 {"s,raw", 18, S_IFCHR}, 9162 {"t,raw", 19, S_IFCHR}, 9163 {"u,raw", 20, S_IFCHR}, 9164 #endif /* defined(_FIRMWARE_NEEDS_FDISK) */ 9165 {0} 9166 }; 9167 9168 9169 /* 9170 * Function: sd_create_minor_nodes 9171 * 9172 * Description: Create the minor device nodes for the instance. 9173 * 9174 * Arguments: un - driver soft state (unit) structure 9175 * devi - pointer to device info structure 9176 * 9177 * Return Code: DDI_SUCCESS 9178 * DDI_FAILURE 9179 * 9180 * Context: Kernel thread context 9181 */ 9182 9183 static int 9184 sd_create_minor_nodes(struct sd_lun *un, dev_info_t *devi) 9185 { 9186 struct driver_minor_data *dmdp; 9187 struct scsi_device *devp; 9188 int instance; 9189 char name[48]; 9190 9191 ASSERT(un != NULL); 9192 devp = ddi_get_driver_private(devi); 9193 instance = ddi_get_instance(devp->sd_dev); 9194 9195 /* 9196 * Create all the minor nodes for this target. 9197 */ 9198 if (un->un_blockcount > DK_MAX_BLOCKS) 9199 dmdp = sd_minor_data_efi; 9200 else 9201 dmdp = sd_minor_data; 9202 while (dmdp->name != NULL) { 9203 9204 (void) sprintf(name, "%s", dmdp->name); 9205 9206 if (ddi_create_minor_node(devi, name, dmdp->type, 9207 (instance << SDUNIT_SHIFT) | dmdp->minor, 9208 un->un_node_type, NULL) == DDI_FAILURE) { 9209 /* 9210 * Clean up any nodes that may have been created, in 9211 * case this fails in the middle of the loop. 9212 */ 9213 ddi_remove_minor_node(devi, NULL); 9214 return (DDI_FAILURE); 9215 } 9216 dmdp++; 9217 } 9218 9219 return (DDI_SUCCESS); 9220 } 9221 9222 9223 /* 9224 * Function: sd_create_errstats 9225 * 9226 * Description: This routine instantiates the device error stats. 9227 * 9228 * Note: During attach the stats are instantiated first so they are 9229 * available for attach-time routines that utilize the driver 9230 * iopath to send commands to the device. The stats are initialized 9231 * separately so data obtained during some attach-time routines is 9232 * available. (4362483) 9233 * 9234 * Arguments: un - driver soft state (unit) structure 9235 * instance - driver instance 9236 * 9237 * Context: Kernel thread context 9238 */ 9239 9240 static void 9241 sd_create_errstats(struct sd_lun *un, int instance) 9242 { 9243 struct sd_errstats *stp; 9244 char kstatmodule_err[KSTAT_STRLEN]; 9245 char kstatname[KSTAT_STRLEN]; 9246 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t)); 9247 9248 ASSERT(un != NULL); 9249 9250 if (un->un_errstats != NULL) { 9251 return; 9252 } 9253 9254 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err), 9255 "%serr", sd_label); 9256 (void) snprintf(kstatname, sizeof (kstatname), 9257 "%s%d,err", sd_label, instance); 9258 9259 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname, 9260 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT); 9261 9262 if (un->un_errstats == NULL) { 9263 SD_ERROR(SD_LOG_ATTACH_DETACH, un, 9264 "sd_create_errstats: Failed kstat_create\n"); 9265 return; 9266 } 9267 9268 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9269 kstat_named_init(&stp->sd_softerrs, "Soft Errors", 9270 KSTAT_DATA_UINT32); 9271 kstat_named_init(&stp->sd_harderrs, "Hard Errors", 9272 KSTAT_DATA_UINT32); 9273 kstat_named_init(&stp->sd_transerrs, "Transport Errors", 9274 KSTAT_DATA_UINT32); 9275 kstat_named_init(&stp->sd_vid, "Vendor", 9276 KSTAT_DATA_CHAR); 9277 kstat_named_init(&stp->sd_pid, "Product", 9278 KSTAT_DATA_CHAR); 9279 kstat_named_init(&stp->sd_revision, "Revision", 9280 KSTAT_DATA_CHAR); 9281 kstat_named_init(&stp->sd_serial, "Serial No", 9282 KSTAT_DATA_CHAR); 9283 kstat_named_init(&stp->sd_capacity, "Size", 9284 KSTAT_DATA_ULONGLONG); 9285 kstat_named_init(&stp->sd_rq_media_err, "Media Error", 9286 KSTAT_DATA_UINT32); 9287 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready", 9288 KSTAT_DATA_UINT32); 9289 kstat_named_init(&stp->sd_rq_nodev_err, "No Device", 9290 KSTAT_DATA_UINT32); 9291 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable", 9292 KSTAT_DATA_UINT32); 9293 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request", 9294 KSTAT_DATA_UINT32); 9295 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis", 9296 KSTAT_DATA_UINT32); 9297 9298 un->un_errstats->ks_private = un; 9299 un->un_errstats->ks_update = nulldev; 9300 9301 kstat_install(un->un_errstats); 9302 } 9303 9304 9305 /* 9306 * Function: sd_set_errstats 9307 * 9308 * Description: This routine sets the value of the vendor id, product id, 9309 * revision, serial number, and capacity device error stats. 9310 * 9311 * Note: During attach the stats are instantiated first so they are 9312 * available for attach-time routines that utilize the driver 9313 * iopath to send commands to the device. The stats are initialized 9314 * separately so data obtained during some attach-time routines is 9315 * available. (4362483) 9316 * 9317 * Arguments: un - driver soft state (unit) structure 9318 * 9319 * Context: Kernel thread context 9320 */ 9321 9322 static void 9323 sd_set_errstats(struct sd_lun *un) 9324 { 9325 struct sd_errstats *stp; 9326 9327 ASSERT(un != NULL); 9328 ASSERT(un->un_errstats != NULL); 9329 stp = (struct sd_errstats *)un->un_errstats->ks_data; 9330 ASSERT(stp != NULL); 9331 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8); 9332 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16); 9333 (void) strncpy(stp->sd_revision.value.c, 9334 un->un_sd->sd_inq->inq_revision, 4); 9335 9336 /* 9337 * Set the "Serial No" kstat for Sun qualified drives (indicated by 9338 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid) 9339 * (4376302)) 9340 */ 9341 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) { 9342 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c, 9343 sizeof (SD_INQUIRY(un)->inq_serial)); 9344 } 9345 9346 if (un->un_f_blockcount_is_valid != TRUE) { 9347 /* 9348 * Set capacity error stat to 0 for no media. This ensures 9349 * a valid capacity is displayed in response to 'iostat -E' 9350 * when no media is present in the device. 9351 */ 9352 stp->sd_capacity.value.ui64 = 0; 9353 } else { 9354 /* 9355 * Multiply un_blockcount by un->un_sys_blocksize to get 9356 * capacity. 9357 * 9358 * Note: for non-512 blocksize devices "un_blockcount" has been 9359 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by 9360 * (un_tgt_blocksize / un->un_sys_blocksize). 9361 */ 9362 stp->sd_capacity.value.ui64 = (uint64_t) 9363 ((uint64_t)un->un_blockcount * un->un_sys_blocksize); 9364 } 9365 } 9366 9367 9368 /* 9369 * Function: sd_set_pstats 9370 * 9371 * Description: This routine instantiates and initializes the partition 9372 * stats for each partition with more than zero blocks. 9373 * (4363169) 9374 * 9375 * Arguments: un - driver soft state (unit) structure 9376 * 9377 * Context: Kernel thread context 9378 */ 9379 9380 static void 9381 sd_set_pstats(struct sd_lun *un) 9382 { 9383 char kstatname[KSTAT_STRLEN]; 9384 int instance; 9385 int i; 9386 9387 ASSERT(un != NULL); 9388 9389 instance = ddi_get_instance(SD_DEVINFO(un)); 9390 9391 /* Note:x86: is this a VTOC8/VTOC16 difference? */ 9392 for (i = 0; i < NSDMAP; i++) { 9393 if ((un->un_pstats[i] == NULL) && 9394 (un->un_map[i].dkl_nblk != 0)) { 9395 (void) snprintf(kstatname, sizeof (kstatname), 9396 "%s%d,%s", sd_label, instance, 9397 sd_minor_data[i].name); 9398 un->un_pstats[i] = kstat_create(sd_label, 9399 instance, kstatname, "partition", KSTAT_TYPE_IO, 9400 1, KSTAT_FLAG_PERSISTENT); 9401 if (un->un_pstats[i] != NULL) { 9402 un->un_pstats[i]->ks_lock = SD_MUTEX(un); 9403 kstat_install(un->un_pstats[i]); 9404 } 9405 } 9406 } 9407 } 9408 9409 9410 #if (defined(__fibre)) 9411 /* 9412 * Function: sd_init_event_callbacks 9413 * 9414 * Description: This routine initializes the insertion and removal event 9415 * callbacks. (fibre only) 9416 * 9417 * Arguments: un - driver soft state (unit) structure 9418 * 9419 * Context: Kernel thread context 9420 */ 9421 9422 static void 9423 sd_init_event_callbacks(struct sd_lun *un) 9424 { 9425 ASSERT(un != NULL); 9426 9427 if ((un->un_insert_event == NULL) && 9428 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT, 9429 &un->un_insert_event) == DDI_SUCCESS)) { 9430 /* 9431 * Add the callback for an insertion event 9432 */ 9433 (void) ddi_add_event_handler(SD_DEVINFO(un), 9434 un->un_insert_event, sd_event_callback, (void *)un, 9435 &(un->un_insert_cb_id)); 9436 } 9437 9438 if ((un->un_remove_event == NULL) && 9439 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT, 9440 &un->un_remove_event) == DDI_SUCCESS)) { 9441 /* 9442 * Add the callback for a removal event 9443 */ 9444 (void) ddi_add_event_handler(SD_DEVINFO(un), 9445 un->un_remove_event, sd_event_callback, (void *)un, 9446 &(un->un_remove_cb_id)); 9447 } 9448 } 9449 9450 9451 /* 9452 * Function: sd_event_callback 9453 * 9454 * Description: This routine handles insert/remove events (photon). The 9455 * state is changed to OFFLINE which can be used to supress 9456 * error msgs. (fibre only) 9457 * 9458 * Arguments: un - driver soft state (unit) structure 9459 * 9460 * Context: Callout thread context 9461 */ 9462 /* ARGSUSED */ 9463 static void 9464 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg, 9465 void *bus_impldata) 9466 { 9467 struct sd_lun *un = (struct sd_lun *)arg; 9468 9469 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event)); 9470 if (event == un->un_insert_event) { 9471 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event"); 9472 mutex_enter(SD_MUTEX(un)); 9473 if (un->un_state == SD_STATE_OFFLINE) { 9474 if (un->un_last_state != SD_STATE_SUSPENDED) { 9475 un->un_state = un->un_last_state; 9476 } else { 9477 /* 9478 * We have gone through SUSPEND/RESUME while 9479 * we were offline. Restore the last state 9480 */ 9481 un->un_state = un->un_save_state; 9482 } 9483 } 9484 mutex_exit(SD_MUTEX(un)); 9485 9486 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event)); 9487 } else if (event == un->un_remove_event) { 9488 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event"); 9489 mutex_enter(SD_MUTEX(un)); 9490 /* 9491 * We need to handle an event callback that occurs during 9492 * the suspend operation, since we don't prevent it. 9493 */ 9494 if (un->un_state != SD_STATE_OFFLINE) { 9495 if (un->un_state != SD_STATE_SUSPENDED) { 9496 New_state(un, SD_STATE_OFFLINE); 9497 } else { 9498 un->un_last_state = SD_STATE_OFFLINE; 9499 } 9500 } 9501 mutex_exit(SD_MUTEX(un)); 9502 } else { 9503 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 9504 "!Unknown event\n"); 9505 } 9506 9507 } 9508 #endif 9509 9510 9511 /* 9512 * Function: sd_disable_caching() 9513 * 9514 * Description: This routine is the driver entry point for disabling 9515 * read and write caching by modifying the WCE (write cache 9516 * enable) and RCD (read cache disable) bits of mode 9517 * page 8 (MODEPAGE_CACHING). 9518 * 9519 * Arguments: un - driver soft state (unit) structure 9520 * 9521 * Return Code: EIO 9522 * code returned by sd_send_scsi_MODE_SENSE and 9523 * sd_send_scsi_MODE_SELECT 9524 * 9525 * Context: Kernel Thread 9526 */ 9527 9528 static int 9529 sd_disable_caching(struct sd_lun *un) 9530 { 9531 struct mode_caching *mode_caching_page; 9532 uchar_t *header; 9533 size_t buflen; 9534 int hdrlen; 9535 int bd_len; 9536 int rval = 0; 9537 9538 ASSERT(un != NULL); 9539 9540 /* 9541 * Do a test unit ready, otherwise a mode sense may not work if this 9542 * is the first command sent to the device after boot. 9543 */ 9544 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 9545 9546 if (un->un_f_cfg_is_atapi == TRUE) { 9547 hdrlen = MODE_HEADER_LENGTH_GRP2; 9548 } else { 9549 hdrlen = MODE_HEADER_LENGTH; 9550 } 9551 9552 /* 9553 * Allocate memory for the retrieved mode page and its headers. Set 9554 * a pointer to the page itself. 9555 */ 9556 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching); 9557 header = kmem_zalloc(buflen, KM_SLEEP); 9558 9559 /* Get the information from the device. */ 9560 if (un->un_f_cfg_is_atapi == TRUE) { 9561 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, header, buflen, 9562 MODEPAGE_CACHING, SD_PATH_DIRECT); 9563 } else { 9564 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, header, buflen, 9565 MODEPAGE_CACHING, SD_PATH_DIRECT); 9566 } 9567 if (rval != 0) { 9568 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 9569 "sd_disable_caching: Mode Sense Failed\n"); 9570 kmem_free(header, buflen); 9571 return (rval); 9572 } 9573 9574 /* 9575 * Determine size of Block Descriptors in order to locate 9576 * the mode page data. ATAPI devices return 0, SCSI devices 9577 * should return MODE_BLK_DESC_LENGTH. 9578 */ 9579 if (un->un_f_cfg_is_atapi == TRUE) { 9580 struct mode_header_grp2 *mhp; 9581 mhp = (struct mode_header_grp2 *)header; 9582 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo; 9583 } else { 9584 bd_len = ((struct mode_header *)header)->bdesc_length; 9585 } 9586 9587 if (bd_len > MODE_BLK_DESC_LENGTH) { 9588 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 9589 "sd_disable_caching: Mode Sense returned invalid " 9590 "block descriptor length\n"); 9591 kmem_free(header, buflen); 9592 return (EIO); 9593 } 9594 9595 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len); 9596 9597 /* Check the relevant bits on successful mode sense. */ 9598 if ((mode_caching_page->wce) || !(mode_caching_page->rcd)) { 9599 /* 9600 * Read or write caching is enabled. Disable both of them. 9601 */ 9602 mode_caching_page->wce = 0; 9603 mode_caching_page->rcd = 1; 9604 9605 /* Clear reserved bits before mode select. */ 9606 mode_caching_page->mode_page.ps = 0; 9607 9608 /* 9609 * Clear out mode header for mode select. 9610 * The rest of the retrieved page will be reused. 9611 */ 9612 bzero(header, hdrlen); 9613 9614 /* Change the cache page to disable all caching. */ 9615 if (un->un_f_cfg_is_atapi == TRUE) { 9616 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, header, 9617 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9618 } else { 9619 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, header, 9620 buflen, SD_SAVE_PAGE, SD_PATH_DIRECT); 9621 } 9622 } 9623 9624 kmem_free(header, buflen); 9625 return (rval); 9626 } 9627 9628 9629 /* 9630 * Function: sd_make_device 9631 * 9632 * Description: Utility routine to return the Solaris device number from 9633 * the data in the device's dev_info structure. 9634 * 9635 * Return Code: The Solaris device number 9636 * 9637 * Context: Any 9638 */ 9639 9640 static dev_t 9641 sd_make_device(dev_info_t *devi) 9642 { 9643 return (makedevice(ddi_name_to_major(ddi_get_name(devi)), 9644 ddi_get_instance(devi) << SDUNIT_SHIFT)); 9645 } 9646 9647 9648 /* 9649 * Function: sd_pm_entry 9650 * 9651 * Description: Called at the start of a new command to manage power 9652 * and busy status of a device. This includes determining whether 9653 * the current power state of the device is sufficient for 9654 * performing the command or whether it must be changed. 9655 * The PM framework is notified appropriately. 9656 * Only with a return status of DDI_SUCCESS will the 9657 * component be busy to the framework. 9658 * 9659 * All callers of sd_pm_entry must check the return status 9660 * and only call sd_pm_exit it it was DDI_SUCCESS. A status 9661 * of DDI_FAILURE indicates the device failed to power up. 9662 * In this case un_pm_count has been adjusted so the result 9663 * on exit is still powered down, ie. count is less than 0. 9664 * Calling sd_pm_exit with this count value hits an ASSERT. 9665 * 9666 * Return Code: DDI_SUCCESS or DDI_FAILURE 9667 * 9668 * Context: Kernel thread context. 9669 */ 9670 9671 static int 9672 sd_pm_entry(struct sd_lun *un) 9673 { 9674 int return_status = DDI_SUCCESS; 9675 9676 ASSERT(!mutex_owned(SD_MUTEX(un))); 9677 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9678 9679 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n"); 9680 9681 if (un->un_f_pm_is_enabled == FALSE) { 9682 SD_TRACE(SD_LOG_IO_PM, un, 9683 "sd_pm_entry: exiting, PM not enabled\n"); 9684 return (return_status); 9685 } 9686 9687 /* 9688 * Just increment a counter if PM is enabled. On the transition from 9689 * 0 ==> 1, mark the device as busy. The iodone side will decrement 9690 * the count with each IO and mark the device as idle when the count 9691 * hits 0. 9692 * 9693 * If the count is less than 0 the device is powered down. If a powered 9694 * down device is successfully powered up then the count must be 9695 * incremented to reflect the power up. Note that it'll get incremented 9696 * a second time to become busy. 9697 * 9698 * Because the following has the potential to change the device state 9699 * and must release the un_pm_mutex to do so, only one thread can be 9700 * allowed through at a time. 9701 */ 9702 9703 mutex_enter(&un->un_pm_mutex); 9704 while (un->un_pm_busy == TRUE) { 9705 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex); 9706 } 9707 un->un_pm_busy = TRUE; 9708 9709 if (un->un_pm_count < 1) { 9710 9711 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n"); 9712 9713 /* 9714 * Indicate we are now busy so the framework won't attempt to 9715 * power down the device. This call will only fail if either 9716 * we passed a bad component number or the device has no 9717 * components. Neither of these should ever happen. 9718 */ 9719 mutex_exit(&un->un_pm_mutex); 9720 return_status = pm_busy_component(SD_DEVINFO(un), 0); 9721 ASSERT(return_status == DDI_SUCCESS); 9722 9723 mutex_enter(&un->un_pm_mutex); 9724 9725 if (un->un_pm_count < 0) { 9726 mutex_exit(&un->un_pm_mutex); 9727 9728 SD_TRACE(SD_LOG_IO_PM, un, 9729 "sd_pm_entry: power up component\n"); 9730 9731 /* 9732 * pm_raise_power will cause sdpower to be called 9733 * which brings the device power level to the 9734 * desired state, ON in this case. If successful, 9735 * un_pm_count and un_power_level will be updated 9736 * appropriately. 9737 */ 9738 return_status = pm_raise_power(SD_DEVINFO(un), 0, 9739 SD_SPINDLE_ON); 9740 9741 mutex_enter(&un->un_pm_mutex); 9742 9743 if (return_status != DDI_SUCCESS) { 9744 /* 9745 * Power up failed. 9746 * Idle the device and adjust the count 9747 * so the result on exit is that we're 9748 * still powered down, ie. count is less than 0. 9749 */ 9750 SD_TRACE(SD_LOG_IO_PM, un, 9751 "sd_pm_entry: power up failed," 9752 " idle the component\n"); 9753 9754 (void) pm_idle_component(SD_DEVINFO(un), 0); 9755 un->un_pm_count--; 9756 } else { 9757 /* 9758 * Device is powered up, verify the 9759 * count is non-negative. 9760 * This is debug only. 9761 */ 9762 ASSERT(un->un_pm_count == 0); 9763 } 9764 } 9765 9766 if (return_status == DDI_SUCCESS) { 9767 /* 9768 * For performance, now that the device has been tagged 9769 * as busy, and it's known to be powered up, update the 9770 * chain types to use jump tables that do not include 9771 * pm. This significantly lowers the overhead and 9772 * therefore improves performance. 9773 */ 9774 9775 mutex_exit(&un->un_pm_mutex); 9776 mutex_enter(SD_MUTEX(un)); 9777 SD_TRACE(SD_LOG_IO_PM, un, 9778 "sd_pm_entry: changing uscsi_chain_type from %d\n", 9779 un->un_uscsi_chain_type); 9780 9781 if (ISREMOVABLE(un)) { 9782 un->un_buf_chain_type = 9783 SD_CHAIN_INFO_RMMEDIA_NO_PM; 9784 } else { 9785 un->un_buf_chain_type = 9786 SD_CHAIN_INFO_DISK_NO_PM; 9787 } 9788 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM; 9789 9790 SD_TRACE(SD_LOG_IO_PM, un, 9791 " changed uscsi_chain_type to %d\n", 9792 un->un_uscsi_chain_type); 9793 mutex_exit(SD_MUTEX(un)); 9794 mutex_enter(&un->un_pm_mutex); 9795 9796 if (un->un_pm_idle_timeid == NULL) { 9797 /* 300 ms. */ 9798 un->un_pm_idle_timeid = 9799 timeout(sd_pm_idletimeout_handler, un, 9800 (drv_usectohz((clock_t)300000))); 9801 /* 9802 * Include an extra call to busy which keeps the 9803 * device busy with-respect-to the PM layer 9804 * until the timer fires, at which time it'll 9805 * get the extra idle call. 9806 */ 9807 (void) pm_busy_component(SD_DEVINFO(un), 0); 9808 } 9809 } 9810 } 9811 un->un_pm_busy = FALSE; 9812 /* Next... */ 9813 cv_signal(&un->un_pm_busy_cv); 9814 9815 un->un_pm_count++; 9816 9817 SD_TRACE(SD_LOG_IO_PM, un, 9818 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count); 9819 9820 mutex_exit(&un->un_pm_mutex); 9821 9822 return (return_status); 9823 } 9824 9825 9826 /* 9827 * Function: sd_pm_exit 9828 * 9829 * Description: Called at the completion of a command to manage busy 9830 * status for the device. If the device becomes idle the 9831 * PM framework is notified. 9832 * 9833 * Context: Kernel thread context 9834 */ 9835 9836 static void 9837 sd_pm_exit(struct sd_lun *un) 9838 { 9839 ASSERT(!mutex_owned(SD_MUTEX(un))); 9840 ASSERT(!mutex_owned(&un->un_pm_mutex)); 9841 9842 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n"); 9843 9844 /* 9845 * After attach the following flag is only read, so don't 9846 * take the penalty of acquiring a mutex for it. 9847 */ 9848 if (un->un_f_pm_is_enabled == TRUE) { 9849 9850 mutex_enter(&un->un_pm_mutex); 9851 un->un_pm_count--; 9852 9853 SD_TRACE(SD_LOG_IO_PM, un, 9854 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count); 9855 9856 ASSERT(un->un_pm_count >= 0); 9857 if (un->un_pm_count == 0) { 9858 mutex_exit(&un->un_pm_mutex); 9859 9860 SD_TRACE(SD_LOG_IO_PM, un, 9861 "sd_pm_exit: idle component\n"); 9862 9863 (void) pm_idle_component(SD_DEVINFO(un), 0); 9864 9865 } else { 9866 mutex_exit(&un->un_pm_mutex); 9867 } 9868 } 9869 9870 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n"); 9871 } 9872 9873 9874 /* 9875 * Function: sdopen 9876 * 9877 * Description: Driver's open(9e) entry point function. 9878 * 9879 * Arguments: dev_i - pointer to device number 9880 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE) 9881 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 9882 * cred_p - user credential pointer 9883 * 9884 * Return Code: EINVAL 9885 * ENXIO 9886 * EIO 9887 * EROFS 9888 * EBUSY 9889 * 9890 * Context: Kernel thread context 9891 */ 9892 /* ARGSUSED */ 9893 static int 9894 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 9895 { 9896 struct sd_lun *un; 9897 int nodelay; 9898 int part; 9899 int partmask; 9900 int instance; 9901 dev_t dev; 9902 int rval = EIO; 9903 9904 /* Validate the open type */ 9905 if (otyp >= OTYPCNT) { 9906 return (EINVAL); 9907 } 9908 9909 dev = *dev_p; 9910 instance = SDUNIT(dev); 9911 mutex_enter(&sd_detach_mutex); 9912 9913 /* 9914 * Fail the open if there is no softstate for the instance, or 9915 * if another thread somewhere is trying to detach the instance. 9916 */ 9917 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 9918 (un->un_detach_count != 0)) { 9919 mutex_exit(&sd_detach_mutex); 9920 /* 9921 * The probe cache only needs to be cleared when open (9e) fails 9922 * with ENXIO (4238046). 9923 */ 9924 /* 9925 * un-conditionally clearing probe cache is ok with 9926 * separate sd/ssd binaries 9927 * x86 platform can be an issue with both parallel 9928 * and fibre in 1 binary 9929 */ 9930 sd_scsi_clear_probe_cache(); 9931 return (ENXIO); 9932 } 9933 9934 /* 9935 * The un_layer_count is to prevent another thread in specfs from 9936 * trying to detach the instance, which can happen when we are 9937 * called from a higher-layer driver instead of thru specfs. 9938 * This will not be needed when DDI provides a layered driver 9939 * interface that allows specfs to know that an instance is in 9940 * use by a layered driver & should not be detached. 9941 * 9942 * Note: the semantics for layered driver opens are exactly one 9943 * close for every open. 9944 */ 9945 if (otyp == OTYP_LYR) { 9946 un->un_layer_count++; 9947 } 9948 9949 /* 9950 * Keep a count of the current # of opens in progress. This is because 9951 * some layered drivers try to call us as a regular open. This can 9952 * cause problems that we cannot prevent, however by keeping this count 9953 * we can at least keep our open and detach routines from racing against 9954 * each other under such conditions. 9955 */ 9956 un->un_opens_in_progress++; 9957 mutex_exit(&sd_detach_mutex); 9958 9959 nodelay = (flag & (FNDELAY | FNONBLOCK)); 9960 part = SDPART(dev); 9961 partmask = 1 << part; 9962 9963 /* 9964 * We use a semaphore here in order to serialize 9965 * open and close requests on the device. 9966 */ 9967 sema_p(&un->un_semoclose); 9968 9969 mutex_enter(SD_MUTEX(un)); 9970 9971 /* 9972 * All device accesses go thru sdstrategy() where we check 9973 * on suspend status but there could be a scsi_poll command, 9974 * which bypasses sdstrategy(), so we need to check pm 9975 * status. 9976 */ 9977 9978 if (!nodelay) { 9979 while ((un->un_state == SD_STATE_SUSPENDED) || 9980 (un->un_state == SD_STATE_PM_CHANGING)) { 9981 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 9982 } 9983 9984 mutex_exit(SD_MUTEX(un)); 9985 if (sd_pm_entry(un) != DDI_SUCCESS) { 9986 rval = EIO; 9987 SD_ERROR(SD_LOG_OPEN_CLOSE, un, 9988 "sdopen: sd_pm_entry failed\n"); 9989 goto open_failed_with_pm; 9990 } 9991 mutex_enter(SD_MUTEX(un)); 9992 } 9993 9994 /* check for previous exclusive open */ 9995 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un); 9996 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 9997 "sdopen: exclopen=%x, flag=%x, regopen=%x\n", 9998 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 9999 10000 if (un->un_exclopen & (partmask)) { 10001 goto excl_open_fail; 10002 } 10003 10004 if (flag & FEXCL) { 10005 int i; 10006 if (un->un_ocmap.lyropen[part]) { 10007 goto excl_open_fail; 10008 } 10009 for (i = 0; i < (OTYPCNT - 1); i++) { 10010 if (un->un_ocmap.regopen[i] & (partmask)) { 10011 goto excl_open_fail; 10012 } 10013 } 10014 } 10015 10016 /* 10017 * Check the write permission if this is a removable media device, 10018 * NDELAY has not been set, and writable permission is requested. 10019 * 10020 * Note: If NDELAY was set and this is write-protected media the WRITE 10021 * attempt will fail with EIO as part of the I/O processing. This is a 10022 * more permissive implementation that allows the open to succeed and 10023 * WRITE attempts to fail when appropriate. 10024 */ 10025 if (ISREMOVABLE(un)) { 10026 if ((flag & FWRITE) && (!nodelay)) { 10027 mutex_exit(SD_MUTEX(un)); 10028 /* 10029 * Defer the check for write permission on writable 10030 * DVD drive till sdstrategy and will not fail open even 10031 * if FWRITE is set as the device can be writable 10032 * depending upon the media and the media can change 10033 * after the call to open(). 10034 */ 10035 if (un->un_f_dvdram_writable_device == FALSE) { 10036 if (ISCD(un) || sr_check_wp(dev)) { 10037 rval = EROFS; 10038 mutex_enter(SD_MUTEX(un)); 10039 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10040 "write to cd or write protected media\n"); 10041 goto open_fail; 10042 } 10043 } 10044 mutex_enter(SD_MUTEX(un)); 10045 } 10046 } 10047 10048 /* 10049 * If opening in NDELAY/NONBLOCK mode, just return. 10050 * Check if disk is ready and has a valid geometry later. 10051 */ 10052 if (!nodelay) { 10053 mutex_exit(SD_MUTEX(un)); 10054 rval = sd_ready_and_valid(un); 10055 mutex_enter(SD_MUTEX(un)); 10056 /* 10057 * Fail if device is not ready or if the number of disk 10058 * blocks is zero or negative for non CD devices. 10059 */ 10060 if ((rval != SD_READY_VALID) || 10061 (!ISCD(un) && un->un_map[part].dkl_nblk <= 0)) { 10062 if (ISREMOVABLE(un)) { 10063 rval = ENXIO; 10064 } else { 10065 rval = EIO; 10066 } 10067 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10068 "device not ready or invalid disk block value\n"); 10069 goto open_fail; 10070 } 10071 #if defined(__i386) || defined(__amd64) 10072 } else { 10073 uchar_t *cp; 10074 /* 10075 * x86 requires special nodelay handling, so that p0 is 10076 * always defined and accessible. 10077 * Invalidate geometry only if device is not already open. 10078 */ 10079 cp = &un->un_ocmap.chkd[0]; 10080 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10081 if (*cp != (uchar_t)0) { 10082 break; 10083 } 10084 cp++; 10085 } 10086 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10087 un->un_f_geometry_is_valid = FALSE; 10088 } 10089 10090 #endif 10091 } 10092 10093 if (otyp == OTYP_LYR) { 10094 un->un_ocmap.lyropen[part]++; 10095 } else { 10096 un->un_ocmap.regopen[otyp] |= partmask; 10097 } 10098 10099 /* Set up open and exclusive open flags */ 10100 if (flag & FEXCL) { 10101 un->un_exclopen |= (partmask); 10102 } 10103 10104 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: " 10105 "open of part %d type %d\n", part, otyp); 10106 10107 mutex_exit(SD_MUTEX(un)); 10108 if (!nodelay) { 10109 sd_pm_exit(un); 10110 } 10111 10112 sema_v(&un->un_semoclose); 10113 10114 mutex_enter(&sd_detach_mutex); 10115 un->un_opens_in_progress--; 10116 mutex_exit(&sd_detach_mutex); 10117 10118 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n"); 10119 return (DDI_SUCCESS); 10120 10121 excl_open_fail: 10122 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n"); 10123 rval = EBUSY; 10124 10125 open_fail: 10126 mutex_exit(SD_MUTEX(un)); 10127 10128 /* 10129 * On a failed open we must exit the pm management. 10130 */ 10131 if (!nodelay) { 10132 sd_pm_exit(un); 10133 } 10134 open_failed_with_pm: 10135 sema_v(&un->un_semoclose); 10136 10137 mutex_enter(&sd_detach_mutex); 10138 un->un_opens_in_progress--; 10139 if (otyp == OTYP_LYR) { 10140 un->un_layer_count--; 10141 } 10142 mutex_exit(&sd_detach_mutex); 10143 10144 return (rval); 10145 } 10146 10147 10148 /* 10149 * Function: sdclose 10150 * 10151 * Description: Driver's close(9e) entry point function. 10152 * 10153 * Arguments: dev - device number 10154 * flag - file status flag, informational only 10155 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR) 10156 * cred_p - user credential pointer 10157 * 10158 * Return Code: ENXIO 10159 * 10160 * Context: Kernel thread context 10161 */ 10162 /* ARGSUSED */ 10163 static int 10164 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 10165 { 10166 struct sd_lun *un; 10167 uchar_t *cp; 10168 int part; 10169 int nodelay; 10170 int rval = 0; 10171 10172 /* Validate the open type */ 10173 if (otyp >= OTYPCNT) { 10174 return (ENXIO); 10175 } 10176 10177 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10178 return (ENXIO); 10179 } 10180 10181 part = SDPART(dev); 10182 nodelay = flag & (FNDELAY | FNONBLOCK); 10183 10184 SD_TRACE(SD_LOG_OPEN_CLOSE, un, 10185 "sdclose: close of part %d type %d\n", part, otyp); 10186 10187 /* 10188 * We use a semaphore here in order to serialize 10189 * open and close requests on the device. 10190 */ 10191 sema_p(&un->un_semoclose); 10192 10193 mutex_enter(SD_MUTEX(un)); 10194 10195 /* Don't proceed if power is being changed. */ 10196 while (un->un_state == SD_STATE_PM_CHANGING) { 10197 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10198 } 10199 10200 if (un->un_exclopen & (1 << part)) { 10201 un->un_exclopen &= ~(1 << part); 10202 } 10203 10204 /* Update the open partition map */ 10205 if (otyp == OTYP_LYR) { 10206 un->un_ocmap.lyropen[part] -= 1; 10207 } else { 10208 un->un_ocmap.regopen[otyp] &= ~(1 << part); 10209 } 10210 10211 cp = &un->un_ocmap.chkd[0]; 10212 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 10213 if (*cp != NULL) { 10214 break; 10215 } 10216 cp++; 10217 } 10218 10219 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 10220 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n"); 10221 10222 /* 10223 * We avoid persistance upon the last close, and set 10224 * the throttle back to the maximum. 10225 */ 10226 un->un_throttle = un->un_saved_throttle; 10227 10228 if (un->un_state == SD_STATE_OFFLINE) { 10229 if (un->un_f_is_fibre == FALSE) { 10230 scsi_log(SD_DEVINFO(un), sd_label, 10231 CE_WARN, "offline\n"); 10232 } 10233 un->un_f_geometry_is_valid = FALSE; 10234 10235 } else { 10236 /* 10237 * Flush any outstanding writes in NVRAM cache. 10238 * Note: SYNCHRONIZE CACHE is an optional SCSI-2 10239 * cmd, it may not work for non-Pluto devices. 10240 * SYNCHRONIZE CACHE is not required for removables, 10241 * except DVD-RAM drives. 10242 * 10243 * Also note: because SYNCHRONIZE CACHE is currently 10244 * the only command issued here that requires the 10245 * drive be powered up, only do the power up before 10246 * sending the Sync Cache command. If additional 10247 * commands are added which require a powered up 10248 * drive, the following sequence may have to change. 10249 * 10250 * And finally, note that parallel SCSI on SPARC 10251 * only issues a Sync Cache to DVD-RAM, a newly 10252 * supported device. 10253 */ 10254 #if defined(__i386) || defined(__amd64) 10255 if (!ISREMOVABLE(un) || 10256 un->un_f_dvdram_writable_device == TRUE) { 10257 #else 10258 if (un->un_f_dvdram_writable_device == TRUE) { 10259 #endif 10260 mutex_exit(SD_MUTEX(un)); 10261 if (sd_pm_entry(un) == DDI_SUCCESS) { 10262 if (sd_send_scsi_SYNCHRONIZE_CACHE(un) 10263 != 0) { 10264 rval = EIO; 10265 } 10266 sd_pm_exit(un); 10267 } else { 10268 rval = EIO; 10269 } 10270 mutex_enter(SD_MUTEX(un)); 10271 } 10272 10273 /* 10274 * For removable media devices, send an ALLOW MEDIA 10275 * REMOVAL command, but don't get upset if it fails. 10276 * Also invalidate the geometry. We need to raise 10277 * the power of the drive before we can call 10278 * sd_send_scsi_DOORLOCK() 10279 */ 10280 if (ISREMOVABLE(un)) { 10281 mutex_exit(SD_MUTEX(un)); 10282 if (sd_pm_entry(un) == DDI_SUCCESS) { 10283 rval = sd_send_scsi_DOORLOCK(un, 10284 SD_REMOVAL_ALLOW, SD_PATH_DIRECT); 10285 10286 sd_pm_exit(un); 10287 if (ISCD(un) && (rval != 0) && 10288 (nodelay != 0)) { 10289 rval = ENXIO; 10290 } 10291 } else { 10292 rval = EIO; 10293 } 10294 mutex_enter(SD_MUTEX(un)); 10295 10296 sr_ejected(un); 10297 /* 10298 * Destroy the cache (if it exists) which was 10299 * allocated for the write maps since this is 10300 * the last close for this media. 10301 */ 10302 if (un->un_wm_cache) { 10303 /* 10304 * Check if there are pending commands. 10305 * and if there are give a warning and 10306 * do not destroy the cache. 10307 */ 10308 if (un->un_ncmds_in_driver > 0) { 10309 scsi_log(SD_DEVINFO(un), 10310 sd_label, CE_WARN, 10311 "Unable to clean up memory " 10312 "because of pending I/O\n"); 10313 } else { 10314 kmem_cache_destroy( 10315 un->un_wm_cache); 10316 un->un_wm_cache = NULL; 10317 } 10318 } 10319 } 10320 } 10321 } 10322 10323 mutex_exit(SD_MUTEX(un)); 10324 sema_v(&un->un_semoclose); 10325 10326 if (otyp == OTYP_LYR) { 10327 mutex_enter(&sd_detach_mutex); 10328 /* 10329 * The detach routine may run when the layer count 10330 * drops to zero. 10331 */ 10332 un->un_layer_count--; 10333 mutex_exit(&sd_detach_mutex); 10334 } 10335 10336 return (rval); 10337 } 10338 10339 10340 /* 10341 * Function: sd_ready_and_valid 10342 * 10343 * Description: Test if device is ready and has a valid geometry. 10344 * 10345 * Arguments: dev - device number 10346 * un - driver soft state (unit) structure 10347 * 10348 * Return Code: SD_READY_VALID ready and valid label 10349 * SD_READY_NOT_VALID ready, geom ops never applicable 10350 * SD_NOT_READY_VALID not ready, no label 10351 * 10352 * Context: Never called at interrupt context. 10353 */ 10354 10355 static int 10356 sd_ready_and_valid(struct sd_lun *un) 10357 { 10358 struct sd_errstats *stp; 10359 uint64_t capacity; 10360 uint_t lbasize; 10361 int rval = SD_READY_VALID; 10362 char name_str[48]; 10363 10364 ASSERT(un != NULL); 10365 ASSERT(!mutex_owned(SD_MUTEX(un))); 10366 10367 mutex_enter(SD_MUTEX(un)); 10368 if (ISREMOVABLE(un)) { 10369 mutex_exit(SD_MUTEX(un)); 10370 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 10371 rval = SD_NOT_READY_VALID; 10372 mutex_enter(SD_MUTEX(un)); 10373 goto done; 10374 } 10375 10376 mutex_enter(SD_MUTEX(un)); 10377 if ((un->un_f_geometry_is_valid == FALSE) || 10378 (un->un_f_blockcount_is_valid == FALSE) || 10379 (un->un_f_tgt_blocksize_is_valid == FALSE)) { 10380 10381 /* capacity has to be read every open. */ 10382 mutex_exit(SD_MUTEX(un)); 10383 if (sd_send_scsi_READ_CAPACITY(un, &capacity, 10384 &lbasize, SD_PATH_DIRECT) != 0) { 10385 mutex_enter(SD_MUTEX(un)); 10386 un->un_f_geometry_is_valid = FALSE; 10387 rval = SD_NOT_READY_VALID; 10388 goto done; 10389 } else { 10390 mutex_enter(SD_MUTEX(un)); 10391 sd_update_block_info(un, lbasize, capacity); 10392 } 10393 } 10394 10395 /* 10396 * If this is a non 512 block device, allocate space for 10397 * the wmap cache. This is being done here since every time 10398 * a media is changed this routine will be called and the 10399 * block size is a function of media rather than device. 10400 */ 10401 if (NOT_DEVBSIZE(un)) { 10402 if (!(un->un_wm_cache)) { 10403 (void) snprintf(name_str, sizeof (name_str), 10404 "%s%d_cache", 10405 ddi_driver_name(SD_DEVINFO(un)), 10406 ddi_get_instance(SD_DEVINFO(un))); 10407 un->un_wm_cache = kmem_cache_create( 10408 name_str, sizeof (struct sd_w_map), 10409 8, sd_wm_cache_constructor, 10410 sd_wm_cache_destructor, NULL, 10411 (void *)un, NULL, 0); 10412 if (!(un->un_wm_cache)) { 10413 rval = ENOMEM; 10414 goto done; 10415 } 10416 } 10417 } 10418 10419 /* 10420 * Check if the media in the device is writable or not. 10421 */ 10422 if ((un->un_f_geometry_is_valid == FALSE) && ISCD(un)) { 10423 sd_check_for_writable_cd(un); 10424 } 10425 10426 } else { 10427 /* 10428 * Do a test unit ready to clear any unit attention from non-cd 10429 * devices. 10430 */ 10431 mutex_exit(SD_MUTEX(un)); 10432 (void) sd_send_scsi_TEST_UNIT_READY(un, 0); 10433 mutex_enter(SD_MUTEX(un)); 10434 } 10435 10436 10437 if (un->un_state == SD_STATE_NORMAL) { 10438 /* 10439 * If the target is not yet ready here (defined by a TUR 10440 * failure), invalidate the geometry and print an 'offline' 10441 * message. This is a legacy message, as the state of the 10442 * target is not actually changed to SD_STATE_OFFLINE. 10443 * 10444 * If the TUR fails for EACCES (Reservation Conflict), it 10445 * means there actually is nothing wrong with the target that 10446 * would require invalidating the geometry, so continue in 10447 * that case as if the TUR was successful. 10448 */ 10449 int err; 10450 10451 mutex_exit(SD_MUTEX(un)); 10452 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 10453 mutex_enter(SD_MUTEX(un)); 10454 10455 if ((err != 0) && (err != EACCES)) { 10456 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 10457 "offline\n"); 10458 un->un_f_geometry_is_valid = FALSE; 10459 rval = SD_NOT_READY_VALID; 10460 goto done; 10461 } 10462 } 10463 10464 if (un->un_f_format_in_progress == FALSE) { 10465 /* 10466 * Note: sd_validate_geometry may return TRUE, but that does 10467 * not necessarily mean un_f_geometry_is_valid == TRUE! 10468 */ 10469 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 10470 if (rval == ENOTSUP) { 10471 if (un->un_f_geometry_is_valid == TRUE) 10472 rval = 0; 10473 else { 10474 rval = SD_READY_NOT_VALID; 10475 goto done; 10476 } 10477 } 10478 if (rval != 0) { 10479 /* 10480 * We don't check the validity of geometry for 10481 * CDROMs. Also we assume we have a good label 10482 * even if sd_validate_geometry returned ENOMEM. 10483 */ 10484 if (!ISCD(un) && rval != ENOMEM) { 10485 rval = SD_NOT_READY_VALID; 10486 goto done; 10487 } 10488 } 10489 } 10490 10491 #ifdef DOESNTWORK /* on eliteII, see 1118607 */ 10492 /* 10493 * check to see if this disk is write protected, if it is and we have 10494 * not set read-only, then fail 10495 */ 10496 if ((flag & FWRITE) && (sr_check_wp(dev))) { 10497 New_state(un, SD_STATE_CLOSED); 10498 goto done; 10499 } 10500 #endif 10501 10502 /* 10503 * If this is a removable media device, try and send 10504 * a PREVENT MEDIA REMOVAL command, but don't get upset 10505 * if it fails. For a CD, however, it is an error 10506 */ 10507 if (ISREMOVABLE(un)) { 10508 mutex_exit(SD_MUTEX(un)); 10509 if ((sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 10510 SD_PATH_DIRECT) != 0) && ISCD(un)) { 10511 rval = SD_NOT_READY_VALID; 10512 mutex_enter(SD_MUTEX(un)); 10513 goto done; 10514 } 10515 mutex_enter(SD_MUTEX(un)); 10516 } 10517 10518 /* The state has changed, inform the media watch routines */ 10519 un->un_mediastate = DKIO_INSERTED; 10520 cv_broadcast(&un->un_state_cv); 10521 rval = SD_READY_VALID; 10522 10523 done: 10524 10525 /* 10526 * Initialize the capacity kstat value, if no media previously 10527 * (capacity kstat is 0) and a media has been inserted 10528 * (un_blockcount > 0). 10529 * This is a more generic way then checking for ISREMOVABLE. 10530 */ 10531 if (un->un_errstats != NULL) { 10532 stp = (struct sd_errstats *)un->un_errstats->ks_data; 10533 if ((stp->sd_capacity.value.ui64 == 0) && 10534 (un->un_f_blockcount_is_valid == TRUE)) { 10535 stp->sd_capacity.value.ui64 = 10536 (uint64_t)((uint64_t)un->un_blockcount * 10537 un->un_sys_blocksize); 10538 } 10539 } 10540 10541 mutex_exit(SD_MUTEX(un)); 10542 return (rval); 10543 } 10544 10545 10546 /* 10547 * Function: sdmin 10548 * 10549 * Description: Routine to limit the size of a data transfer. Used in 10550 * conjunction with physio(9F). 10551 * 10552 * Arguments: bp - pointer to the indicated buf(9S) struct. 10553 * 10554 * Context: Kernel thread context. 10555 */ 10556 10557 static void 10558 sdmin(struct buf *bp) 10559 { 10560 struct sd_lun *un; 10561 int instance; 10562 10563 instance = SDUNIT(bp->b_edev); 10564 10565 un = ddi_get_soft_state(sd_state, instance); 10566 ASSERT(un != NULL); 10567 10568 if (bp->b_bcount > un->un_max_xfer_size) { 10569 bp->b_bcount = un->un_max_xfer_size; 10570 } 10571 } 10572 10573 10574 /* 10575 * Function: sdread 10576 * 10577 * Description: Driver's read(9e) entry point function. 10578 * 10579 * Arguments: dev - device number 10580 * uio - structure pointer describing where data is to be stored 10581 * in user's space 10582 * cred_p - user credential pointer 10583 * 10584 * Return Code: ENXIO 10585 * EIO 10586 * EINVAL 10587 * value returned by physio 10588 * 10589 * Context: Kernel thread context. 10590 */ 10591 /* ARGSUSED */ 10592 static int 10593 sdread(dev_t dev, struct uio *uio, cred_t *cred_p) 10594 { 10595 struct sd_lun *un = NULL; 10596 int secmask; 10597 int err; 10598 10599 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10600 return (ENXIO); 10601 } 10602 10603 ASSERT(!mutex_owned(SD_MUTEX(un))); 10604 10605 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10606 mutex_enter(SD_MUTEX(un)); 10607 /* 10608 * Because the call to sd_ready_and_valid will issue I/O we 10609 * must wait here if either the device is suspended or 10610 * if it's power level is changing. 10611 */ 10612 while ((un->un_state == SD_STATE_SUSPENDED) || 10613 (un->un_state == SD_STATE_PM_CHANGING)) { 10614 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10615 } 10616 un->un_ncmds_in_driver++; 10617 mutex_exit(SD_MUTEX(un)); 10618 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10619 mutex_enter(SD_MUTEX(un)); 10620 un->un_ncmds_in_driver--; 10621 ASSERT(un->un_ncmds_in_driver >= 0); 10622 mutex_exit(SD_MUTEX(un)); 10623 return (EIO); 10624 } 10625 mutex_enter(SD_MUTEX(un)); 10626 un->un_ncmds_in_driver--; 10627 ASSERT(un->un_ncmds_in_driver >= 0); 10628 mutex_exit(SD_MUTEX(un)); 10629 } 10630 10631 /* 10632 * Read requests are restricted to multiples of the system block size. 10633 */ 10634 secmask = un->un_sys_blocksize - 1; 10635 10636 if (uio->uio_loffset & ((offset_t)(secmask))) { 10637 SD_ERROR(SD_LOG_READ_WRITE, un, 10638 "sdread: file offset not modulo %d\n", 10639 un->un_sys_blocksize); 10640 err = EINVAL; 10641 } else if (uio->uio_iov->iov_len & (secmask)) { 10642 SD_ERROR(SD_LOG_READ_WRITE, un, 10643 "sdread: transfer length not modulo %d\n", 10644 un->un_sys_blocksize); 10645 err = EINVAL; 10646 } else { 10647 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio); 10648 } 10649 return (err); 10650 } 10651 10652 10653 /* 10654 * Function: sdwrite 10655 * 10656 * Description: Driver's write(9e) entry point function. 10657 * 10658 * Arguments: dev - device number 10659 * uio - structure pointer describing where data is stored in 10660 * user's space 10661 * cred_p - user credential pointer 10662 * 10663 * Return Code: ENXIO 10664 * EIO 10665 * EINVAL 10666 * value returned by physio 10667 * 10668 * Context: Kernel thread context. 10669 */ 10670 /* ARGSUSED */ 10671 static int 10672 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 10673 { 10674 struct sd_lun *un = NULL; 10675 int secmask; 10676 int err; 10677 10678 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10679 return (ENXIO); 10680 } 10681 10682 ASSERT(!mutex_owned(SD_MUTEX(un))); 10683 10684 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10685 mutex_enter(SD_MUTEX(un)); 10686 /* 10687 * Because the call to sd_ready_and_valid will issue I/O we 10688 * must wait here if either the device is suspended or 10689 * if it's power level is changing. 10690 */ 10691 while ((un->un_state == SD_STATE_SUSPENDED) || 10692 (un->un_state == SD_STATE_PM_CHANGING)) { 10693 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10694 } 10695 un->un_ncmds_in_driver++; 10696 mutex_exit(SD_MUTEX(un)); 10697 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10698 mutex_enter(SD_MUTEX(un)); 10699 un->un_ncmds_in_driver--; 10700 ASSERT(un->un_ncmds_in_driver >= 0); 10701 mutex_exit(SD_MUTEX(un)); 10702 return (EIO); 10703 } 10704 mutex_enter(SD_MUTEX(un)); 10705 un->un_ncmds_in_driver--; 10706 ASSERT(un->un_ncmds_in_driver >= 0); 10707 mutex_exit(SD_MUTEX(un)); 10708 } 10709 10710 /* 10711 * Write requests are restricted to multiples of the system block size. 10712 */ 10713 secmask = un->un_sys_blocksize - 1; 10714 10715 if (uio->uio_loffset & ((offset_t)(secmask))) { 10716 SD_ERROR(SD_LOG_READ_WRITE, un, 10717 "sdwrite: file offset not modulo %d\n", 10718 un->un_sys_blocksize); 10719 err = EINVAL; 10720 } else if (uio->uio_iov->iov_len & (secmask)) { 10721 SD_ERROR(SD_LOG_READ_WRITE, un, 10722 "sdwrite: transfer length not modulo %d\n", 10723 un->un_sys_blocksize); 10724 err = EINVAL; 10725 } else { 10726 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio); 10727 } 10728 return (err); 10729 } 10730 10731 10732 /* 10733 * Function: sdaread 10734 * 10735 * Description: Driver's aread(9e) entry point function. 10736 * 10737 * Arguments: dev - device number 10738 * aio - structure pointer describing where data is to be stored 10739 * cred_p - user credential pointer 10740 * 10741 * Return Code: ENXIO 10742 * EIO 10743 * EINVAL 10744 * value returned by aphysio 10745 * 10746 * Context: Kernel thread context. 10747 */ 10748 /* ARGSUSED */ 10749 static int 10750 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10751 { 10752 struct sd_lun *un = NULL; 10753 struct uio *uio = aio->aio_uio; 10754 int secmask; 10755 int err; 10756 10757 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10758 return (ENXIO); 10759 } 10760 10761 ASSERT(!mutex_owned(SD_MUTEX(un))); 10762 10763 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10764 mutex_enter(SD_MUTEX(un)); 10765 /* 10766 * Because the call to sd_ready_and_valid will issue I/O we 10767 * must wait here if either the device is suspended or 10768 * if it's power level is changing. 10769 */ 10770 while ((un->un_state == SD_STATE_SUSPENDED) || 10771 (un->un_state == SD_STATE_PM_CHANGING)) { 10772 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10773 } 10774 un->un_ncmds_in_driver++; 10775 mutex_exit(SD_MUTEX(un)); 10776 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10777 mutex_enter(SD_MUTEX(un)); 10778 un->un_ncmds_in_driver--; 10779 ASSERT(un->un_ncmds_in_driver >= 0); 10780 mutex_exit(SD_MUTEX(un)); 10781 return (EIO); 10782 } 10783 mutex_enter(SD_MUTEX(un)); 10784 un->un_ncmds_in_driver--; 10785 ASSERT(un->un_ncmds_in_driver >= 0); 10786 mutex_exit(SD_MUTEX(un)); 10787 } 10788 10789 /* 10790 * Read requests are restricted to multiples of the system block size. 10791 */ 10792 secmask = un->un_sys_blocksize - 1; 10793 10794 if (uio->uio_loffset & ((offset_t)(secmask))) { 10795 SD_ERROR(SD_LOG_READ_WRITE, un, 10796 "sdaread: file offset not modulo %d\n", 10797 un->un_sys_blocksize); 10798 err = EINVAL; 10799 } else if (uio->uio_iov->iov_len & (secmask)) { 10800 SD_ERROR(SD_LOG_READ_WRITE, un, 10801 "sdaread: transfer length not modulo %d\n", 10802 un->un_sys_blocksize); 10803 err = EINVAL; 10804 } else { 10805 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio); 10806 } 10807 return (err); 10808 } 10809 10810 10811 /* 10812 * Function: sdawrite 10813 * 10814 * Description: Driver's awrite(9e) entry point function. 10815 * 10816 * Arguments: dev - device number 10817 * aio - structure pointer describing where data is stored 10818 * cred_p - user credential pointer 10819 * 10820 * Return Code: ENXIO 10821 * EIO 10822 * EINVAL 10823 * value returned by aphysio 10824 * 10825 * Context: Kernel thread context. 10826 */ 10827 /* ARGSUSED */ 10828 static int 10829 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 10830 { 10831 struct sd_lun *un = NULL; 10832 struct uio *uio = aio->aio_uio; 10833 int secmask; 10834 int err; 10835 10836 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 10837 return (ENXIO); 10838 } 10839 10840 ASSERT(!mutex_owned(SD_MUTEX(un))); 10841 10842 if ((un->un_f_geometry_is_valid == FALSE) && !ISCD(un)) { 10843 mutex_enter(SD_MUTEX(un)); 10844 /* 10845 * Because the call to sd_ready_and_valid will issue I/O we 10846 * must wait here if either the device is suspended or 10847 * if it's power level is changing. 10848 */ 10849 while ((un->un_state == SD_STATE_SUSPENDED) || 10850 (un->un_state == SD_STATE_PM_CHANGING)) { 10851 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 10852 } 10853 un->un_ncmds_in_driver++; 10854 mutex_exit(SD_MUTEX(un)); 10855 if ((sd_ready_and_valid(un)) != SD_READY_VALID) { 10856 mutex_enter(SD_MUTEX(un)); 10857 un->un_ncmds_in_driver--; 10858 ASSERT(un->un_ncmds_in_driver >= 0); 10859 mutex_exit(SD_MUTEX(un)); 10860 return (EIO); 10861 } 10862 mutex_enter(SD_MUTEX(un)); 10863 un->un_ncmds_in_driver--; 10864 ASSERT(un->un_ncmds_in_driver >= 0); 10865 mutex_exit(SD_MUTEX(un)); 10866 } 10867 10868 /* 10869 * Write requests are restricted to multiples of the system block size. 10870 */ 10871 secmask = un->un_sys_blocksize - 1; 10872 10873 if (uio->uio_loffset & ((offset_t)(secmask))) { 10874 SD_ERROR(SD_LOG_READ_WRITE, un, 10875 "sdawrite: file offset not modulo %d\n", 10876 un->un_sys_blocksize); 10877 err = EINVAL; 10878 } else if (uio->uio_iov->iov_len & (secmask)) { 10879 SD_ERROR(SD_LOG_READ_WRITE, un, 10880 "sdawrite: transfer length not modulo %d\n", 10881 un->un_sys_blocksize); 10882 err = EINVAL; 10883 } else { 10884 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio); 10885 } 10886 return (err); 10887 } 10888 10889 10890 10891 10892 10893 /* 10894 * Driver IO processing follows the following sequence: 10895 * 10896 * sdioctl(9E) sdstrategy(9E) biodone(9F) 10897 * | | ^ 10898 * v v | 10899 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+ 10900 * | | | | 10901 * v | | | 10902 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone() 10903 * | | ^ ^ 10904 * v v | | 10905 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | | 10906 * | | | | 10907 * +---+ | +------------+ +-------+ 10908 * | | | | 10909 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10910 * | v | | 10911 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() | 10912 * | | ^ | 10913 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10914 * | v | | 10915 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() | 10916 * | | ^ | 10917 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| | 10918 * | v | | 10919 * | sd_checksum_iostart() sd_checksum_iodone() | 10920 * | | ^ | 10921 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+ 10922 * | v | | 10923 * | sd_pm_iostart() sd_pm_iodone() | 10924 * | | ^ | 10925 * | | | | 10926 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+ 10927 * | ^ 10928 * v | 10929 * sd_core_iostart() | 10930 * | | 10931 * | +------>(*destroypkt)() 10932 * +-> sd_start_cmds() <-+ | | 10933 * | | | v 10934 * | | | scsi_destroy_pkt(9F) 10935 * | | | 10936 * +->(*initpkt)() +- sdintr() 10937 * | | | | 10938 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx() 10939 * | +-> scsi_setup_cdb(9F) | 10940 * | | 10941 * +--> scsi_transport(9F) | 10942 * | | 10943 * +----> SCSA ---->+ 10944 * 10945 * 10946 * This code is based upon the following presumtions: 10947 * 10948 * - iostart and iodone functions operate on buf(9S) structures. These 10949 * functions perform the necessary operations on the buf(9S) and pass 10950 * them along to the next function in the chain by using the macros 10951 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE() 10952 * (for iodone side functions). 10953 * 10954 * - The iostart side functions may sleep. The iodone side functions 10955 * are called under interrupt context and may NOT sleep. Therefore 10956 * iodone side functions also may not call iostart side functions. 10957 * (NOTE: iostart side functions should NOT sleep for memory, as 10958 * this could result in deadlock.) 10959 * 10960 * - An iostart side function may call its corresponding iodone side 10961 * function directly (if necessary). 10962 * 10963 * - In the event of an error, an iostart side function can return a buf(9S) 10964 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and 10965 * b_error in the usual way of course). 10966 * 10967 * - The taskq mechanism may be used by the iodone side functions to dispatch 10968 * requests to the iostart side functions. The iostart side functions in 10969 * this case would be called under the context of a taskq thread, so it's 10970 * OK for them to block/sleep/spin in this case. 10971 * 10972 * - iostart side functions may allocate "shadow" buf(9S) structs and 10973 * pass them along to the next function in the chain. The corresponding 10974 * iodone side functions must coalesce the "shadow" bufs and return 10975 * the "original" buf to the next higher layer. 10976 * 10977 * - The b_private field of the buf(9S) struct holds a pointer to 10978 * an sd_xbuf struct, which contains information needed to 10979 * construct the scsi_pkt for the command. 10980 * 10981 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each 10982 * layer must acquire & release the SD_MUTEX(un) as needed. 10983 */ 10984 10985 10986 /* 10987 * Create taskq for all targets in the system. This is created at 10988 * _init(9E) and destroyed at _fini(9E). 10989 * 10990 * Note: here we set the minalloc to a reasonably high number to ensure that 10991 * we will have an adequate supply of task entries available at interrupt time. 10992 * This is used in conjunction with the TASKQ_PREPOPULATE flag in 10993 * sd_create_taskq(). Since we do not want to sleep for allocations at 10994 * interrupt time, set maxalloc equal to minalloc. That way we will just fail 10995 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq 10996 * requests any one instant in time. 10997 */ 10998 #define SD_TASKQ_NUMTHREADS 8 10999 #define SD_TASKQ_MINALLOC 256 11000 #define SD_TASKQ_MAXALLOC 256 11001 11002 static taskq_t *sd_tq = NULL; 11003 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC; 11004 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC; 11005 11006 /* 11007 * The following task queue is being created for the write part of 11008 * read-modify-write of non-512 block size devices. 11009 * Limit the number of threads to 1 for now. This number has been choosen 11010 * considering the fact that it applies only to dvd ram drives/MO drives 11011 * currently. Performance for which is not main criteria at this stage. 11012 * Note: It needs to be explored if we can use a single taskq in future 11013 */ 11014 #define SD_WMR_TASKQ_NUMTHREADS 1 11015 static taskq_t *sd_wmr_tq = NULL; 11016 11017 /* 11018 * Function: sd_taskq_create 11019 * 11020 * Description: Create taskq thread(s) and preallocate task entries 11021 * 11022 * Return Code: Returns a pointer to the allocated taskq_t. 11023 * 11024 * Context: Can sleep. Requires blockable context. 11025 * 11026 * Notes: - The taskq() facility currently is NOT part of the DDI. 11027 * (definitely NOT recommeded for 3rd-party drivers!) :-) 11028 * - taskq_create() will block for memory, also it will panic 11029 * if it cannot create the requested number of threads. 11030 * - Currently taskq_create() creates threads that cannot be 11031 * swapped. 11032 * - We use TASKQ_PREPOPULATE to ensure we have an adequate 11033 * supply of taskq entries at interrupt time (ie, so that we 11034 * do not have to sleep for memory) 11035 */ 11036 11037 static void 11038 sd_taskq_create(void) 11039 { 11040 char taskq_name[TASKQ_NAMELEN]; 11041 11042 ASSERT(sd_tq == NULL); 11043 ASSERT(sd_wmr_tq == NULL); 11044 11045 (void) snprintf(taskq_name, sizeof (taskq_name), 11046 "%s_drv_taskq", sd_label); 11047 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS, 11048 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11049 TASKQ_PREPOPULATE)); 11050 11051 (void) snprintf(taskq_name, sizeof (taskq_name), 11052 "%s_rmw_taskq", sd_label); 11053 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS, 11054 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc, 11055 TASKQ_PREPOPULATE)); 11056 } 11057 11058 11059 /* 11060 * Function: sd_taskq_delete 11061 * 11062 * Description: Complementary cleanup routine for sd_taskq_create(). 11063 * 11064 * Context: Kernel thread context. 11065 */ 11066 11067 static void 11068 sd_taskq_delete(void) 11069 { 11070 ASSERT(sd_tq != NULL); 11071 ASSERT(sd_wmr_tq != NULL); 11072 taskq_destroy(sd_tq); 11073 taskq_destroy(sd_wmr_tq); 11074 sd_tq = NULL; 11075 sd_wmr_tq = NULL; 11076 } 11077 11078 11079 /* 11080 * Function: sdstrategy 11081 * 11082 * Description: Driver's strategy (9E) entry point function. 11083 * 11084 * Arguments: bp - pointer to buf(9S) 11085 * 11086 * Return Code: Always returns zero 11087 * 11088 * Context: Kernel thread context. 11089 */ 11090 11091 static int 11092 sdstrategy(struct buf *bp) 11093 { 11094 struct sd_lun *un; 11095 11096 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11097 if (un == NULL) { 11098 bioerror(bp, EIO); 11099 bp->b_resid = bp->b_bcount; 11100 biodone(bp); 11101 return (0); 11102 } 11103 /* As was done in the past, fail new cmds. if state is dumping. */ 11104 if (un->un_state == SD_STATE_DUMPING) { 11105 bioerror(bp, ENXIO); 11106 bp->b_resid = bp->b_bcount; 11107 biodone(bp); 11108 return (0); 11109 } 11110 11111 ASSERT(!mutex_owned(SD_MUTEX(un))); 11112 11113 /* 11114 * Commands may sneak in while we released the mutex in 11115 * DDI_SUSPEND, we should block new commands. However, old 11116 * commands that are still in the driver at this point should 11117 * still be allowed to drain. 11118 */ 11119 mutex_enter(SD_MUTEX(un)); 11120 /* 11121 * Must wait here if either the device is suspended or 11122 * if it's power level is changing. 11123 */ 11124 while ((un->un_state == SD_STATE_SUSPENDED) || 11125 (un->un_state == SD_STATE_PM_CHANGING)) { 11126 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 11127 } 11128 11129 un->un_ncmds_in_driver++; 11130 11131 /* 11132 * atapi: Since we are running the CD for now in PIO mode we need to 11133 * call bp_mapin here to avoid bp_mapin called interrupt context under 11134 * the HBA's init_pkt routine. 11135 */ 11136 if (un->un_f_cfg_is_atapi == TRUE) { 11137 mutex_exit(SD_MUTEX(un)); 11138 bp_mapin(bp); 11139 mutex_enter(SD_MUTEX(un)); 11140 } 11141 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n", 11142 un->un_ncmds_in_driver); 11143 11144 mutex_exit(SD_MUTEX(un)); 11145 11146 /* 11147 * This will (eventually) allocate the sd_xbuf area and 11148 * call sd_xbuf_strategy(). We just want to return the 11149 * result of ddi_xbuf_qstrategy so that we have an opt- 11150 * imized tail call which saves us a stack frame. 11151 */ 11152 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr)); 11153 } 11154 11155 11156 /* 11157 * Function: sd_xbuf_strategy 11158 * 11159 * Description: Function for initiating IO operations via the 11160 * ddi_xbuf_qstrategy() mechanism. 11161 * 11162 * Context: Kernel thread context. 11163 */ 11164 11165 static void 11166 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg) 11167 { 11168 struct sd_lun *un = arg; 11169 11170 ASSERT(bp != NULL); 11171 ASSERT(xp != NULL); 11172 ASSERT(un != NULL); 11173 ASSERT(!mutex_owned(SD_MUTEX(un))); 11174 11175 /* 11176 * Initialize the fields in the xbuf and save a pointer to the 11177 * xbuf in bp->b_private. 11178 */ 11179 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL); 11180 11181 /* Send the buf down the iostart chain */ 11182 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp); 11183 } 11184 11185 11186 /* 11187 * Function: sd_xbuf_init 11188 * 11189 * Description: Prepare the given sd_xbuf struct for use. 11190 * 11191 * Arguments: un - ptr to softstate 11192 * bp - ptr to associated buf(9S) 11193 * xp - ptr to associated sd_xbuf 11194 * chain_type - IO chain type to use: 11195 * SD_CHAIN_NULL 11196 * SD_CHAIN_BUFIO 11197 * SD_CHAIN_USCSI 11198 * SD_CHAIN_DIRECT 11199 * SD_CHAIN_DIRECT_PRIORITY 11200 * pktinfop - ptr to private data struct for scsi_pkt(9S) 11201 * initialization; may be NULL if none. 11202 * 11203 * Context: Kernel thread context 11204 */ 11205 11206 static void 11207 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 11208 uchar_t chain_type, void *pktinfop) 11209 { 11210 int index; 11211 11212 ASSERT(un != NULL); 11213 ASSERT(bp != NULL); 11214 ASSERT(xp != NULL); 11215 11216 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n", 11217 bp, chain_type); 11218 11219 xp->xb_un = un; 11220 xp->xb_pktp = NULL; 11221 xp->xb_pktinfo = pktinfop; 11222 xp->xb_private = bp->b_private; 11223 xp->xb_blkno = (daddr_t)bp->b_blkno; 11224 11225 /* 11226 * Set up the iostart and iodone chain indexes in the xbuf, based 11227 * upon the specified chain type to use. 11228 */ 11229 switch (chain_type) { 11230 case SD_CHAIN_NULL: 11231 /* 11232 * Fall thru to just use the values for the buf type, even 11233 * tho for the NULL chain these values will never be used. 11234 */ 11235 /* FALLTHRU */ 11236 case SD_CHAIN_BUFIO: 11237 index = un->un_buf_chain_type; 11238 break; 11239 case SD_CHAIN_USCSI: 11240 index = un->un_uscsi_chain_type; 11241 break; 11242 case SD_CHAIN_DIRECT: 11243 index = un->un_direct_chain_type; 11244 break; 11245 case SD_CHAIN_DIRECT_PRIORITY: 11246 index = un->un_priority_chain_type; 11247 break; 11248 default: 11249 /* We're really broken if we ever get here... */ 11250 panic("sd_xbuf_init: illegal chain type!"); 11251 /*NOTREACHED*/ 11252 } 11253 11254 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index; 11255 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index; 11256 11257 /* 11258 * It might be a bit easier to simply bzero the entire xbuf above, 11259 * but it turns out that since we init a fair number of members anyway, 11260 * we save a fair number cycles by doing explicit assignment of zero. 11261 */ 11262 xp->xb_pkt_flags = 0; 11263 xp->xb_dma_resid = 0; 11264 xp->xb_retry_count = 0; 11265 xp->xb_victim_retry_count = 0; 11266 xp->xb_ua_retry_count = 0; 11267 xp->xb_sense_bp = NULL; 11268 xp->xb_sense_status = 0; 11269 xp->xb_sense_state = 0; 11270 xp->xb_sense_resid = 0; 11271 11272 bp->b_private = xp; 11273 bp->b_flags &= ~(B_DONE | B_ERROR); 11274 bp->b_resid = 0; 11275 bp->av_forw = NULL; 11276 bp->av_back = NULL; 11277 bioerror(bp, 0); 11278 11279 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n"); 11280 } 11281 11282 11283 /* 11284 * Function: sd_uscsi_strategy 11285 * 11286 * Description: Wrapper for calling into the USCSI chain via physio(9F) 11287 * 11288 * Arguments: bp - buf struct ptr 11289 * 11290 * Return Code: Always returns 0 11291 * 11292 * Context: Kernel thread context 11293 */ 11294 11295 static int 11296 sd_uscsi_strategy(struct buf *bp) 11297 { 11298 struct sd_lun *un; 11299 struct sd_uscsi_info *uip; 11300 struct sd_xbuf *xp; 11301 uchar_t chain_type; 11302 11303 ASSERT(bp != NULL); 11304 11305 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp)); 11306 if (un == NULL) { 11307 bioerror(bp, EIO); 11308 bp->b_resid = bp->b_bcount; 11309 biodone(bp); 11310 return (0); 11311 } 11312 11313 ASSERT(!mutex_owned(SD_MUTEX(un))); 11314 11315 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp); 11316 11317 mutex_enter(SD_MUTEX(un)); 11318 /* 11319 * atapi: Since we are running the CD for now in PIO mode we need to 11320 * call bp_mapin here to avoid bp_mapin called interrupt context under 11321 * the HBA's init_pkt routine. 11322 */ 11323 if (un->un_f_cfg_is_atapi == TRUE) { 11324 mutex_exit(SD_MUTEX(un)); 11325 bp_mapin(bp); 11326 mutex_enter(SD_MUTEX(un)); 11327 } 11328 un->un_ncmds_in_driver++; 11329 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n", 11330 un->un_ncmds_in_driver); 11331 mutex_exit(SD_MUTEX(un)); 11332 11333 /* 11334 * A pointer to a struct sd_uscsi_info is expected in bp->b_private 11335 */ 11336 ASSERT(bp->b_private != NULL); 11337 uip = (struct sd_uscsi_info *)bp->b_private; 11338 11339 switch (uip->ui_flags) { 11340 case SD_PATH_DIRECT: 11341 chain_type = SD_CHAIN_DIRECT; 11342 break; 11343 case SD_PATH_DIRECT_PRIORITY: 11344 chain_type = SD_CHAIN_DIRECT_PRIORITY; 11345 break; 11346 default: 11347 chain_type = SD_CHAIN_USCSI; 11348 break; 11349 } 11350 11351 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 11352 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp); 11353 11354 /* Use the index obtained within xbuf_init */ 11355 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp); 11356 11357 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp); 11358 11359 return (0); 11360 } 11361 11362 11363 /* 11364 * These routines perform raw i/o operations. 11365 */ 11366 /*ARGSUSED*/ 11367 static void 11368 sduscsimin(struct buf *bp) 11369 { 11370 /* 11371 * do not break up because the CDB count would then 11372 * be incorrect and data underruns would result (incomplete 11373 * read/writes which would be retried and then failed, see 11374 * sdintr(). 11375 */ 11376 } 11377 11378 11379 11380 /* 11381 * Function: sd_send_scsi_cmd 11382 * 11383 * Description: Runs a USCSI command for user (when called thru sdioctl), 11384 * or for the driver 11385 * 11386 * Arguments: dev - the dev_t for the device 11387 * incmd - ptr to a valid uscsi_cmd struct 11388 * cdbspace - UIO_USERSPACE or UIO_SYSSPACE 11389 * dataspace - UIO_USERSPACE or UIO_SYSSPACE 11390 * rqbufspace - UIO_USERSPACE or UIO_SYSSPACE 11391 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 11392 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 11393 * to use the USCSI "direct" chain and bypass the normal 11394 * command waitq. 11395 * 11396 * Return Code: 0 - successful completion of the given command 11397 * EIO - scsi_reset() failed, or see biowait()/physio() codes. 11398 * ENXIO - soft state not found for specified dev 11399 * EINVAL 11400 * EFAULT - copyin/copyout error 11401 * return code of biowait(9F) or physio(9F): 11402 * EIO - IO error, caller may check incmd->uscsi_status 11403 * ENXIO 11404 * EACCES - reservation conflict 11405 * 11406 * Context: Waits for command to complete. Can sleep. 11407 */ 11408 11409 static int 11410 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, 11411 enum uio_seg cdbspace, enum uio_seg dataspace, enum uio_seg rqbufspace, 11412 int path_flag) 11413 { 11414 struct sd_uscsi_info *uip; 11415 struct uscsi_cmd *uscmd; 11416 struct sd_lun *un; 11417 struct buf *bp; 11418 int rval; 11419 int flags; 11420 11421 un = ddi_get_soft_state(sd_state, SDUNIT(dev)); 11422 if (un == NULL) { 11423 return (ENXIO); 11424 } 11425 11426 ASSERT(!mutex_owned(SD_MUTEX(un))); 11427 11428 #ifdef SDDEBUG 11429 switch (dataspace) { 11430 case UIO_USERSPACE: 11431 SD_TRACE(SD_LOG_IO, un, 11432 "sd_send_scsi_cmd: entry: un:0x%p UIO_USERSPACE\n", un); 11433 break; 11434 case UIO_SYSSPACE: 11435 SD_TRACE(SD_LOG_IO, un, 11436 "sd_send_scsi_cmd: entry: un:0x%p UIO_SYSSPACE\n", un); 11437 break; 11438 default: 11439 SD_TRACE(SD_LOG_IO, un, 11440 "sd_send_scsi_cmd: entry: un:0x%p UNEXPECTED SPACE\n", un); 11441 break; 11442 } 11443 #endif 11444 11445 /* 11446 * Perform resets directly; no need to generate a command to do it. 11447 */ 11448 if (incmd->uscsi_flags & (USCSI_RESET | USCSI_RESET_ALL)) { 11449 flags = ((incmd->uscsi_flags & USCSI_RESET_ALL) != 0) ? 11450 RESET_ALL : RESET_TARGET; 11451 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: Issuing reset\n"); 11452 if (scsi_reset(SD_ADDRESS(un), flags) == 0) { 11453 /* Reset attempt was unsuccessful */ 11454 SD_TRACE(SD_LOG_IO, un, 11455 "sd_send_scsi_cmd: reset: failure\n"); 11456 return (EIO); 11457 } 11458 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: reset: success\n"); 11459 return (0); 11460 } 11461 11462 /* Perfunctory sanity check... */ 11463 if (incmd->uscsi_cdblen <= 0) { 11464 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11465 "invalid uscsi_cdblen, returning EINVAL\n"); 11466 return (EINVAL); 11467 } 11468 11469 /* 11470 * In order to not worry about where the uscsi structure came from 11471 * (or where the cdb it points to came from) we're going to make 11472 * kmem_alloc'd copies of them here. This will also allow reference 11473 * to the data they contain long after this process has gone to 11474 * sleep and its kernel stack has been unmapped, etc. 11475 * 11476 * First get some memory for the uscsi_cmd struct and copy the 11477 * contents of the given uscsi_cmd struct into it. 11478 */ 11479 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 11480 bcopy(incmd, uscmd, sizeof (struct uscsi_cmd)); 11481 11482 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: uscsi_cmd", 11483 (uchar_t *)uscmd, sizeof (struct uscsi_cmd), SD_LOG_HEX); 11484 11485 /* 11486 * Now get some space for the CDB, and copy the given CDB into 11487 * it. Use ddi_copyin() in case the data is in user space. 11488 */ 11489 uscmd->uscsi_cdb = kmem_zalloc((size_t)incmd->uscsi_cdblen, KM_SLEEP); 11490 flags = (cdbspace == UIO_SYSSPACE) ? FKIOCTL : 0; 11491 if (ddi_copyin(incmd->uscsi_cdb, uscmd->uscsi_cdb, 11492 (uint_t)incmd->uscsi_cdblen, flags) != 0) { 11493 kmem_free(uscmd->uscsi_cdb, (size_t)incmd->uscsi_cdblen); 11494 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11495 return (EFAULT); 11496 } 11497 11498 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_cmd: CDB", 11499 (uchar_t *)uscmd->uscsi_cdb, incmd->uscsi_cdblen, SD_LOG_HEX); 11500 11501 bp = getrbuf(KM_SLEEP); 11502 11503 /* 11504 * Allocate an sd_uscsi_info struct and fill it with the info 11505 * needed by sd_initpkt_for_uscsi(). Then put the pointer into 11506 * b_private in the buf for sd_initpkt_for_uscsi(). Note that 11507 * since we allocate the buf here in this function, we do not 11508 * need to preserve the prior contents of b_private. 11509 * The sd_uscsi_info struct is also used by sd_uscsi_strategy() 11510 */ 11511 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP); 11512 uip->ui_flags = path_flag; 11513 uip->ui_cmdp = uscmd; 11514 bp->b_private = uip; 11515 11516 /* 11517 * Initialize Request Sense buffering, if requested. 11518 */ 11519 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11520 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11521 /* 11522 * Here uscmd->uscsi_rqbuf currently points to the caller's 11523 * buffer, but we replace this with a kernel buffer that 11524 * we allocate to use with the sense data. The sense data 11525 * (if present) gets copied into this new buffer before the 11526 * command is completed. Then we copy the sense data from 11527 * our allocated buf into the caller's buffer below. Note 11528 * that incmd->uscsi_rqbuf and incmd->uscsi_rqlen are used 11529 * below to perform the copy back to the caller's buf. 11530 */ 11531 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 11532 if (rqbufspace == UIO_USERSPACE) { 11533 uscmd->uscsi_rqlen = SENSE_LENGTH; 11534 uscmd->uscsi_rqresid = SENSE_LENGTH; 11535 } else { 11536 uchar_t rlen = min(SENSE_LENGTH, uscmd->uscsi_rqlen); 11537 uscmd->uscsi_rqlen = rlen; 11538 uscmd->uscsi_rqresid = rlen; 11539 } 11540 } else { 11541 uscmd->uscsi_rqbuf = NULL; 11542 uscmd->uscsi_rqlen = 0; 11543 uscmd->uscsi_rqresid = 0; 11544 } 11545 11546 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: rqbuf:0x%p rqlen:%d\n", 11547 uscmd->uscsi_rqbuf, uscmd->uscsi_rqlen); 11548 11549 if (un->un_f_is_fibre == FALSE) { 11550 /* 11551 * Force asynchronous mode, if necessary. Doing this here 11552 * has the unfortunate effect of running other queued 11553 * commands async also, but since the main purpose of this 11554 * capability is downloading new drive firmware, we can 11555 * probably live with it. 11556 */ 11557 if ((uscmd->uscsi_flags & USCSI_ASYNC) != 0) { 11558 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11559 == 1) { 11560 if (scsi_ifsetcap(SD_ADDRESS(un), 11561 "synchronous", 0, 1) == 1) { 11562 SD_TRACE(SD_LOG_IO, un, 11563 "sd_send_scsi_cmd: forced async ok\n"); 11564 } else { 11565 SD_TRACE(SD_LOG_IO, un, 11566 "sd_send_scsi_cmd:\ 11567 forced async failed\n"); 11568 rval = EINVAL; 11569 goto done; 11570 } 11571 } 11572 } 11573 11574 /* 11575 * Re-enable synchronous mode, if requested 11576 */ 11577 if (uscmd->uscsi_flags & USCSI_SYNC) { 11578 if (scsi_ifgetcap(SD_ADDRESS(un), "synchronous", 1) 11579 == 0) { 11580 int i = scsi_ifsetcap(SD_ADDRESS(un), 11581 "synchronous", 1, 1); 11582 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11583 "re-enabled sync %s\n", 11584 (i == 1) ? "ok" : "failed"); 11585 } 11586 } 11587 } 11588 11589 /* 11590 * Commands sent with priority are intended for error recovery 11591 * situations, and do not have retries performed. 11592 */ 11593 if (path_flag == SD_PATH_DIRECT_PRIORITY) { 11594 uscmd->uscsi_flags |= USCSI_DIAGNOSE; 11595 } 11596 11597 /* 11598 * If we're going to do actual I/O, let physio do all the right things 11599 */ 11600 if (uscmd->uscsi_buflen != 0) { 11601 struct iovec aiov; 11602 struct uio auio; 11603 struct uio *uio = &auio; 11604 11605 bzero(&auio, sizeof (struct uio)); 11606 bzero(&aiov, sizeof (struct iovec)); 11607 aiov.iov_base = uscmd->uscsi_bufaddr; 11608 aiov.iov_len = uscmd->uscsi_buflen; 11609 uio->uio_iov = &aiov; 11610 11611 uio->uio_iovcnt = 1; 11612 uio->uio_resid = uscmd->uscsi_buflen; 11613 uio->uio_segflg = dataspace; 11614 11615 /* 11616 * physio() will block here until the command completes.... 11617 */ 11618 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling physio.\n"); 11619 11620 rval = physio(sd_uscsi_strategy, bp, dev, 11621 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE), 11622 sduscsimin, uio); 11623 11624 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11625 "returned from physio with 0x%x\n", rval); 11626 11627 } else { 11628 /* 11629 * We have to mimic what physio would do here! Argh! 11630 */ 11631 bp->b_flags = B_BUSY | 11632 ((uscmd->uscsi_flags & USCSI_READ) ? B_READ : B_WRITE); 11633 bp->b_edev = dev; 11634 bp->b_dev = cmpdev(dev); /* maybe unnecessary? */ 11635 bp->b_bcount = 0; 11636 bp->b_blkno = 0; 11637 11638 SD_TRACE(SD_LOG_IO, un, 11639 "sd_send_scsi_cmd: calling sd_uscsi_strategy...\n"); 11640 11641 (void) sd_uscsi_strategy(bp); 11642 11643 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: calling biowait\n"); 11644 11645 rval = biowait(bp); 11646 11647 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11648 "returned from biowait with 0x%x\n", rval); 11649 } 11650 11651 done: 11652 11653 #ifdef SDDEBUG 11654 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11655 "uscsi_status: 0x%02x uscsi_resid:0x%x\n", 11656 uscmd->uscsi_status, uscmd->uscsi_resid); 11657 if (uscmd->uscsi_bufaddr != NULL) { 11658 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11659 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n", 11660 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen); 11661 if (dataspace == UIO_SYSSPACE) { 11662 SD_DUMP_MEMORY(un, SD_LOG_IO, 11663 "data", (uchar_t *)uscmd->uscsi_bufaddr, 11664 uscmd->uscsi_buflen, SD_LOG_HEX); 11665 } 11666 } 11667 #endif 11668 11669 /* 11670 * Get the status and residual to return to the caller. 11671 */ 11672 incmd->uscsi_status = uscmd->uscsi_status; 11673 incmd->uscsi_resid = uscmd->uscsi_resid; 11674 11675 /* 11676 * If the caller wants sense data, copy back whatever sense data 11677 * we may have gotten, and update the relevant rqsense info. 11678 */ 11679 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 11680 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 11681 11682 int rqlen = uscmd->uscsi_rqlen - uscmd->uscsi_rqresid; 11683 rqlen = min(((int)incmd->uscsi_rqlen), rqlen); 11684 11685 /* Update the Request Sense status and resid */ 11686 incmd->uscsi_rqresid = incmd->uscsi_rqlen - rqlen; 11687 incmd->uscsi_rqstatus = uscmd->uscsi_rqstatus; 11688 11689 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11690 "uscsi_rqstatus: 0x%02x uscsi_rqresid:0x%x\n", 11691 incmd->uscsi_rqstatus, incmd->uscsi_rqresid); 11692 11693 /* Copy out the sense data for user processes */ 11694 if ((incmd->uscsi_rqbuf != NULL) && (rqlen != 0)) { 11695 int flags = 11696 (rqbufspace == UIO_USERSPACE) ? 0 : FKIOCTL; 11697 if (ddi_copyout(uscmd->uscsi_rqbuf, incmd->uscsi_rqbuf, 11698 rqlen, flags) != 0) { 11699 rval = EFAULT; 11700 } 11701 /* 11702 * Note: Can't touch incmd->uscsi_rqbuf so use 11703 * uscmd->uscsi_rqbuf instead. They're the same. 11704 */ 11705 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_cmd: " 11706 "incmd->uscsi_rqbuf: 0x%p rqlen:%d\n", 11707 incmd->uscsi_rqbuf, rqlen); 11708 SD_DUMP_MEMORY(un, SD_LOG_IO, "rq", 11709 (uchar_t *)uscmd->uscsi_rqbuf, rqlen, SD_LOG_HEX); 11710 } 11711 } 11712 11713 /* 11714 * Free allocated resources and return; mapout the buf in case it was 11715 * mapped in by a lower layer. 11716 */ 11717 bp_mapout(bp); 11718 freerbuf(bp); 11719 kmem_free(uip, sizeof (struct sd_uscsi_info)); 11720 if (uscmd->uscsi_rqbuf != NULL) { 11721 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH); 11722 } 11723 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen); 11724 kmem_free(uscmd, sizeof (struct uscsi_cmd)); 11725 11726 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_cmd: exit\n"); 11727 11728 return (rval); 11729 } 11730 11731 11732 /* 11733 * Function: sd_buf_iodone 11734 * 11735 * Description: Frees the sd_xbuf & returns the buf to its originator. 11736 * 11737 * Context: May be called from interrupt context. 11738 */ 11739 /* ARGSUSED */ 11740 static void 11741 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp) 11742 { 11743 struct sd_xbuf *xp; 11744 11745 ASSERT(un != NULL); 11746 ASSERT(bp != NULL); 11747 ASSERT(!mutex_owned(SD_MUTEX(un))); 11748 11749 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n"); 11750 11751 xp = SD_GET_XBUF(bp); 11752 ASSERT(xp != NULL); 11753 11754 mutex_enter(SD_MUTEX(un)); 11755 11756 /* 11757 * Grab time when the cmd completed. 11758 * This is used for determining if the system has been 11759 * idle long enough to make it idle to the PM framework. 11760 * This is for lowering the overhead, and therefore improving 11761 * performance per I/O operation. 11762 */ 11763 un->un_pm_idle_time = ddi_get_time(); 11764 11765 un->un_ncmds_in_driver--; 11766 ASSERT(un->un_ncmds_in_driver >= 0); 11767 SD_INFO(SD_LOG_IO, un, "sd_buf_iodone: un_ncmds_in_driver = %ld\n", 11768 un->un_ncmds_in_driver); 11769 11770 mutex_exit(SD_MUTEX(un)); 11771 11772 ddi_xbuf_done(bp, un->un_xbuf_attr); /* xbuf is gone after this */ 11773 biodone(bp); /* bp is gone after this */ 11774 11775 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n"); 11776 } 11777 11778 11779 /* 11780 * Function: sd_uscsi_iodone 11781 * 11782 * Description: Frees the sd_xbuf & returns the buf to its originator. 11783 * 11784 * Context: May be called from interrupt context. 11785 */ 11786 /* ARGSUSED */ 11787 static void 11788 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 11789 { 11790 struct sd_xbuf *xp; 11791 11792 ASSERT(un != NULL); 11793 ASSERT(bp != NULL); 11794 11795 xp = SD_GET_XBUF(bp); 11796 ASSERT(xp != NULL); 11797 ASSERT(!mutex_owned(SD_MUTEX(un))); 11798 11799 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n"); 11800 11801 mutex_enter(SD_MUTEX(un)); 11802 11803 /* 11804 * Grab time when the cmd completed. 11805 * This is used for determining if the system has been 11806 * idle long enough to make it idle to the PM framework. 11807 * This is for lowering the overhead, and therefore improving 11808 * performance per I/O operation. 11809 */ 11810 un->un_pm_idle_time = ddi_get_time(); 11811 11812 un->un_ncmds_in_driver--; 11813 ASSERT(un->un_ncmds_in_driver >= 0); 11814 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n", 11815 un->un_ncmds_in_driver); 11816 11817 mutex_exit(SD_MUTEX(un)); 11818 11819 kmem_free(xp, sizeof (struct sd_xbuf)); 11820 biodone(bp); 11821 11822 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n"); 11823 } 11824 11825 11826 /* 11827 * Function: sd_mapblockaddr_iostart 11828 * 11829 * Description: Verify request lies withing the partition limits for 11830 * the indicated minor device. Issue "overrun" buf if 11831 * request would exceed partition range. Converts 11832 * partition-relative block address to absolute. 11833 * 11834 * Context: Can sleep 11835 * 11836 * Issues: This follows what the old code did, in terms of accessing 11837 * some of the partition info in the unit struct without holding 11838 * the mutext. This is a general issue, if the partition info 11839 * can be altered while IO is in progress... as soon as we send 11840 * a buf, its partitioning can be invalid before it gets to the 11841 * device. Probably the right fix is to move partitioning out 11842 * of the driver entirely. 11843 */ 11844 11845 static void 11846 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp) 11847 { 11848 daddr_t nblocks; /* #blocks in the given partition */ 11849 daddr_t blocknum; /* Block number specified by the buf */ 11850 size_t requested_nblocks; 11851 size_t available_nblocks; 11852 int partition; 11853 diskaddr_t partition_offset; 11854 struct sd_xbuf *xp; 11855 11856 11857 ASSERT(un != NULL); 11858 ASSERT(bp != NULL); 11859 ASSERT(!mutex_owned(SD_MUTEX(un))); 11860 11861 SD_TRACE(SD_LOG_IO_PARTITION, un, 11862 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp); 11863 11864 xp = SD_GET_XBUF(bp); 11865 ASSERT(xp != NULL); 11866 11867 /* 11868 * If the geometry is not indicated as valid, attempt to access 11869 * the unit & verify the geometry/label. This can be the case for 11870 * removable-media devices, of if the device was opened in 11871 * NDELAY/NONBLOCK mode. 11872 */ 11873 if ((un->un_f_geometry_is_valid != TRUE) && 11874 (sd_ready_and_valid(un) != SD_READY_VALID)) { 11875 /* 11876 * For removable devices it is possible to start an I/O 11877 * without a media by opening the device in nodelay mode. 11878 * Also for writable CDs there can be many scenarios where 11879 * there is no geometry yet but volume manager is trying to 11880 * issue a read() just because it can see TOC on the CD. So 11881 * do not print a message for removables. 11882 */ 11883 if (!ISREMOVABLE(un)) { 11884 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 11885 "i/o to invalid geometry\n"); 11886 } 11887 bioerror(bp, EIO); 11888 bp->b_resid = bp->b_bcount; 11889 SD_BEGIN_IODONE(index, un, bp); 11890 return; 11891 } 11892 11893 partition = SDPART(bp->b_edev); 11894 11895 /* #blocks in partition */ 11896 nblocks = un->un_map[partition].dkl_nblk; /* #blocks in partition */ 11897 11898 /* Use of a local variable potentially improves performance slightly */ 11899 partition_offset = un->un_offset[partition]; 11900 11901 /* 11902 * blocknum is the starting block number of the request. At this 11903 * point it is still relative to the start of the minor device. 11904 */ 11905 blocknum = xp->xb_blkno; 11906 11907 /* 11908 * Legacy: If the starting block number is one past the last block 11909 * in the partition, do not set B_ERROR in the buf. 11910 */ 11911 if (blocknum == nblocks) { 11912 goto error_exit; 11913 } 11914 11915 /* 11916 * Confirm that the first block of the request lies within the 11917 * partition limits. Also the requested number of bytes must be 11918 * a multiple of the system block size. 11919 */ 11920 if ((blocknum < 0) || (blocknum >= nblocks) || 11921 ((bp->b_bcount & (un->un_sys_blocksize - 1)) != 0)) { 11922 bp->b_flags |= B_ERROR; 11923 goto error_exit; 11924 } 11925 11926 /* 11927 * If the requsted # blocks exceeds the available # blocks, that 11928 * is an overrun of the partition. 11929 */ 11930 requested_nblocks = SD_BYTES2SYSBLOCKS(un, bp->b_bcount); 11931 available_nblocks = (size_t)(nblocks - blocknum); 11932 ASSERT(nblocks >= blocknum); 11933 11934 if (requested_nblocks > available_nblocks) { 11935 /* 11936 * Allocate an "overrun" buf to allow the request to proceed 11937 * for the amount of space available in the partition. The 11938 * amount not transferred will be added into the b_resid 11939 * when the operation is complete. The overrun buf 11940 * replaces the original buf here, and the original buf 11941 * is saved inside the overrun buf, for later use. 11942 */ 11943 size_t resid = SD_SYSBLOCKS2BYTES(un, 11944 (offset_t)(requested_nblocks - available_nblocks)); 11945 size_t count = bp->b_bcount - resid; 11946 /* 11947 * Note: count is an unsigned entity thus it'll NEVER 11948 * be less than 0 so ASSERT the original values are 11949 * correct. 11950 */ 11951 ASSERT(bp->b_bcount >= resid); 11952 11953 bp = sd_bioclone_alloc(bp, count, blocknum, 11954 (int (*)(struct buf *)) sd_mapblockaddr_iodone); 11955 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */ 11956 ASSERT(xp != NULL); 11957 } 11958 11959 /* At this point there should be no residual for this buf. */ 11960 ASSERT(bp->b_resid == 0); 11961 11962 /* Convert the block number to an absolute address. */ 11963 xp->xb_blkno += partition_offset; 11964 11965 SD_NEXT_IOSTART(index, un, bp); 11966 11967 SD_TRACE(SD_LOG_IO_PARTITION, un, 11968 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp); 11969 11970 return; 11971 11972 error_exit: 11973 bp->b_resid = bp->b_bcount; 11974 SD_BEGIN_IODONE(index, un, bp); 11975 SD_TRACE(SD_LOG_IO_PARTITION, un, 11976 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp); 11977 } 11978 11979 11980 /* 11981 * Function: sd_mapblockaddr_iodone 11982 * 11983 * Description: Completion-side processing for partition management. 11984 * 11985 * Context: May be called under interrupt context 11986 */ 11987 11988 static void 11989 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp) 11990 { 11991 /* int partition; */ /* Not used, see below. */ 11992 ASSERT(un != NULL); 11993 ASSERT(bp != NULL); 11994 ASSERT(!mutex_owned(SD_MUTEX(un))); 11995 11996 SD_TRACE(SD_LOG_IO_PARTITION, un, 11997 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp); 11998 11999 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) { 12000 /* 12001 * We have an "overrun" buf to deal with... 12002 */ 12003 struct sd_xbuf *xp; 12004 struct buf *obp; /* ptr to the original buf */ 12005 12006 xp = SD_GET_XBUF(bp); 12007 ASSERT(xp != NULL); 12008 12009 /* Retrieve the pointer to the original buf */ 12010 obp = (struct buf *)xp->xb_private; 12011 ASSERT(obp != NULL); 12012 12013 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid); 12014 bioerror(obp, bp->b_error); 12015 12016 sd_bioclone_free(bp); 12017 12018 /* 12019 * Get back the original buf. 12020 * Note that since the restoration of xb_blkno below 12021 * was removed, the sd_xbuf is not needed. 12022 */ 12023 bp = obp; 12024 /* 12025 * xp = SD_GET_XBUF(bp); 12026 * ASSERT(xp != NULL); 12027 */ 12028 } 12029 12030 /* 12031 * Convert sd->xb_blkno back to a minor-device relative value. 12032 * Note: this has been commented out, as it is not needed in the 12033 * current implementation of the driver (ie, since this function 12034 * is at the top of the layering chains, so the info will be 12035 * discarded) and it is in the "hot" IO path. 12036 * 12037 * partition = getminor(bp->b_edev) & SDPART_MASK; 12038 * xp->xb_blkno -= un->un_offset[partition]; 12039 */ 12040 12041 SD_NEXT_IODONE(index, un, bp); 12042 12043 SD_TRACE(SD_LOG_IO_PARTITION, un, 12044 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp); 12045 } 12046 12047 12048 /* 12049 * Function: sd_mapblocksize_iostart 12050 * 12051 * Description: Convert between system block size (un->un_sys_blocksize) 12052 * and target block size (un->un_tgt_blocksize). 12053 * 12054 * Context: Can sleep to allocate resources. 12055 * 12056 * Assumptions: A higher layer has already performed any partition validation, 12057 * and converted the xp->xb_blkno to an absolute value relative 12058 * to the start of the device. 12059 * 12060 * It is also assumed that the higher layer has implemented 12061 * an "overrun" mechanism for the case where the request would 12062 * read/write beyond the end of a partition. In this case we 12063 * assume (and ASSERT) that bp->b_resid == 0. 12064 * 12065 * Note: The implementation for this routine assumes the target 12066 * block size remains constant between allocation and transport. 12067 */ 12068 12069 static void 12070 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp) 12071 { 12072 struct sd_mapblocksize_info *bsp; 12073 struct sd_xbuf *xp; 12074 offset_t first_byte; 12075 daddr_t start_block, end_block; 12076 daddr_t request_bytes; 12077 ushort_t is_aligned = FALSE; 12078 12079 ASSERT(un != NULL); 12080 ASSERT(bp != NULL); 12081 ASSERT(!mutex_owned(SD_MUTEX(un))); 12082 ASSERT(bp->b_resid == 0); 12083 12084 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12085 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp); 12086 12087 /* 12088 * For a non-writable CD, a write request is an error 12089 */ 12090 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) && 12091 (un->un_f_mmc_writable_media == FALSE)) { 12092 bioerror(bp, EIO); 12093 bp->b_resid = bp->b_bcount; 12094 SD_BEGIN_IODONE(index, un, bp); 12095 return; 12096 } 12097 12098 /* 12099 * We do not need a shadow buf if the device is using 12100 * un->un_sys_blocksize as its block size or if bcount == 0. 12101 * In this case there is no layer-private data block allocated. 12102 */ 12103 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12104 (bp->b_bcount == 0)) { 12105 goto done; 12106 } 12107 12108 #if defined(__i386) || defined(__amd64) 12109 /* We do not support non-block-aligned transfers for ROD devices */ 12110 ASSERT(!ISROD(un)); 12111 #endif 12112 12113 xp = SD_GET_XBUF(bp); 12114 ASSERT(xp != NULL); 12115 12116 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12117 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n", 12118 un->un_tgt_blocksize, un->un_sys_blocksize); 12119 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12120 "request start block:0x%x\n", xp->xb_blkno); 12121 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: " 12122 "request len:0x%x\n", bp->b_bcount); 12123 12124 /* 12125 * Allocate the layer-private data area for the mapblocksize layer. 12126 * Layers are allowed to use the xp_private member of the sd_xbuf 12127 * struct to store the pointer to their layer-private data block, but 12128 * each layer also has the responsibility of restoring the prior 12129 * contents of xb_private before returning the buf/xbuf to the 12130 * higher layer that sent it. 12131 * 12132 * Here we save the prior contents of xp->xb_private into the 12133 * bsp->mbs_oprivate field of our layer-private data area. This value 12134 * is restored by sd_mapblocksize_iodone() just prior to freeing up 12135 * the layer-private area and returning the buf/xbuf to the layer 12136 * that sent it. 12137 * 12138 * Note that here we use kmem_zalloc for the allocation as there are 12139 * parts of the mapblocksize code that expect certain fields to be 12140 * zero unless explicitly set to a required value. 12141 */ 12142 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12143 bsp->mbs_oprivate = xp->xb_private; 12144 xp->xb_private = bsp; 12145 12146 /* 12147 * This treats the data on the disk (target) as an array of bytes. 12148 * first_byte is the byte offset, from the beginning of the device, 12149 * to the location of the request. This is converted from a 12150 * un->un_sys_blocksize block address to a byte offset, and then back 12151 * to a block address based upon a un->un_tgt_blocksize block size. 12152 * 12153 * xp->xb_blkno should be absolute upon entry into this function, 12154 * but, but it is based upon partitions that use the "system" 12155 * block size. It must be adjusted to reflect the block size of 12156 * the target. 12157 * 12158 * Note that end_block is actually the block that follows the last 12159 * block of the request, but that's what is needed for the computation. 12160 */ 12161 first_byte = SD_SYSBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12162 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize; 12163 end_block = (first_byte + bp->b_bcount + un->un_tgt_blocksize - 1) / 12164 un->un_tgt_blocksize; 12165 12166 /* request_bytes is rounded up to a multiple of the target block size */ 12167 request_bytes = (end_block - start_block) * un->un_tgt_blocksize; 12168 12169 /* 12170 * See if the starting address of the request and the request 12171 * length are aligned on a un->un_tgt_blocksize boundary. If aligned 12172 * then we do not need to allocate a shadow buf to handle the request. 12173 */ 12174 if (((first_byte % un->un_tgt_blocksize) == 0) && 12175 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) { 12176 is_aligned = TRUE; 12177 } 12178 12179 if ((bp->b_flags & B_READ) == 0) { 12180 /* 12181 * Lock the range for a write operation. An aligned request is 12182 * considered a simple write; otherwise the request must be a 12183 * read-modify-write. 12184 */ 12185 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1, 12186 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW); 12187 } 12188 12189 /* 12190 * Alloc a shadow buf if the request is not aligned. Also, this is 12191 * where the READ command is generated for a read-modify-write. (The 12192 * write phase is deferred until after the read completes.) 12193 */ 12194 if (is_aligned == FALSE) { 12195 12196 struct sd_mapblocksize_info *shadow_bsp; 12197 struct sd_xbuf *shadow_xp; 12198 struct buf *shadow_bp; 12199 12200 /* 12201 * Allocate the shadow buf and it associated xbuf. Note that 12202 * after this call the xb_blkno value in both the original 12203 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the 12204 * same: absolute relative to the start of the device, and 12205 * adjusted for the target block size. The b_blkno in the 12206 * shadow buf will also be set to this value. We should never 12207 * change b_blkno in the original bp however. 12208 * 12209 * Note also that the shadow buf will always need to be a 12210 * READ command, regardless of whether the incoming command 12211 * is a READ or a WRITE. 12212 */ 12213 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ, 12214 xp->xb_blkno, 12215 (int (*)(struct buf *)) sd_mapblocksize_iodone); 12216 12217 shadow_xp = SD_GET_XBUF(shadow_bp); 12218 12219 /* 12220 * Allocate the layer-private data for the shadow buf. 12221 * (No need to preserve xb_private in the shadow xbuf.) 12222 */ 12223 shadow_xp->xb_private = shadow_bsp = 12224 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP); 12225 12226 /* 12227 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone 12228 * to figure out where the start of the user data is (based upon 12229 * the system block size) in the data returned by the READ 12230 * command (which will be based upon the target blocksize). Note 12231 * that this is only really used if the request is unaligned. 12232 */ 12233 bsp->mbs_copy_offset = (ssize_t)(first_byte - 12234 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize)); 12235 ASSERT((bsp->mbs_copy_offset >= 0) && 12236 (bsp->mbs_copy_offset < un->un_tgt_blocksize)); 12237 12238 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset; 12239 12240 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index; 12241 12242 /* Transfer the wmap (if any) to the shadow buf */ 12243 shadow_bsp->mbs_wmp = bsp->mbs_wmp; 12244 bsp->mbs_wmp = NULL; 12245 12246 /* 12247 * The shadow buf goes on from here in place of the 12248 * original buf. 12249 */ 12250 shadow_bsp->mbs_orig_bp = bp; 12251 bp = shadow_bp; 12252 } 12253 12254 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12255 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno); 12256 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12257 "sd_mapblocksize_iostart: tgt request len:0x%x\n", 12258 request_bytes); 12259 SD_INFO(SD_LOG_IO_RMMEDIA, un, 12260 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp); 12261 12262 done: 12263 SD_NEXT_IOSTART(index, un, bp); 12264 12265 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12266 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp); 12267 } 12268 12269 12270 /* 12271 * Function: sd_mapblocksize_iodone 12272 * 12273 * Description: Completion side processing for block-size mapping. 12274 * 12275 * Context: May be called under interrupt context 12276 */ 12277 12278 static void 12279 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp) 12280 { 12281 struct sd_mapblocksize_info *bsp; 12282 struct sd_xbuf *xp; 12283 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */ 12284 struct buf *orig_bp; /* ptr to the original buf */ 12285 offset_t shadow_end; 12286 offset_t request_end; 12287 offset_t shadow_start; 12288 ssize_t copy_offset; 12289 size_t copy_length; 12290 size_t shortfall; 12291 uint_t is_write; /* TRUE if this bp is a WRITE */ 12292 uint_t has_wmap; /* TRUE is this bp has a wmap */ 12293 12294 ASSERT(un != NULL); 12295 ASSERT(bp != NULL); 12296 12297 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 12298 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp); 12299 12300 /* 12301 * There is no shadow buf or layer-private data if the target is 12302 * using un->un_sys_blocksize as its block size or if bcount == 0. 12303 */ 12304 if ((un->un_tgt_blocksize == un->un_sys_blocksize) || 12305 (bp->b_bcount == 0)) { 12306 goto exit; 12307 } 12308 12309 xp = SD_GET_XBUF(bp); 12310 ASSERT(xp != NULL); 12311 12312 /* Retrieve the pointer to the layer-private data area from the xbuf. */ 12313 bsp = xp->xb_private; 12314 12315 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE; 12316 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE; 12317 12318 if (is_write) { 12319 /* 12320 * For a WRITE request we must free up the block range that 12321 * we have locked up. This holds regardless of whether this is 12322 * an aligned write request or a read-modify-write request. 12323 */ 12324 sd_range_unlock(un, bsp->mbs_wmp); 12325 bsp->mbs_wmp = NULL; 12326 } 12327 12328 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) { 12329 /* 12330 * An aligned read or write command will have no shadow buf; 12331 * there is not much else to do with it. 12332 */ 12333 goto done; 12334 } 12335 12336 orig_bp = bsp->mbs_orig_bp; 12337 ASSERT(orig_bp != NULL); 12338 orig_xp = SD_GET_XBUF(orig_bp); 12339 ASSERT(orig_xp != NULL); 12340 ASSERT(!mutex_owned(SD_MUTEX(un))); 12341 12342 if (!is_write && has_wmap) { 12343 /* 12344 * A READ with a wmap means this is the READ phase of a 12345 * read-modify-write. If an error occurred on the READ then 12346 * we do not proceed with the WRITE phase or copy any data. 12347 * Just release the write maps and return with an error. 12348 */ 12349 if ((bp->b_resid != 0) || (bp->b_error != 0)) { 12350 orig_bp->b_resid = orig_bp->b_bcount; 12351 bioerror(orig_bp, bp->b_error); 12352 sd_range_unlock(un, bsp->mbs_wmp); 12353 goto freebuf_done; 12354 } 12355 } 12356 12357 /* 12358 * Here is where we set up to copy the data from the shadow buf 12359 * into the space associated with the original buf. 12360 * 12361 * To deal with the conversion between block sizes, these 12362 * computations treat the data as an array of bytes, with the 12363 * first byte (byte 0) corresponding to the first byte in the 12364 * first block on the disk. 12365 */ 12366 12367 /* 12368 * shadow_start and shadow_len indicate the location and size of 12369 * the data returned with the shadow IO request. 12370 */ 12371 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno); 12372 shadow_end = shadow_start + bp->b_bcount - bp->b_resid; 12373 12374 /* 12375 * copy_offset gives the offset (in bytes) from the start of the first 12376 * block of the READ request to the beginning of the data. We retrieve 12377 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved 12378 * there by sd_mapblockize_iostart(). copy_length gives the amount of 12379 * data to be copied (in bytes). 12380 */ 12381 copy_offset = bsp->mbs_copy_offset; 12382 ASSERT((copy_offset >= 0) && (copy_offset < un->un_tgt_blocksize)); 12383 copy_length = orig_bp->b_bcount; 12384 request_end = shadow_start + copy_offset + orig_bp->b_bcount; 12385 12386 /* 12387 * Set up the resid and error fields of orig_bp as appropriate. 12388 */ 12389 if (shadow_end >= request_end) { 12390 /* We got all the requested data; set resid to zero */ 12391 orig_bp->b_resid = 0; 12392 } else { 12393 /* 12394 * We failed to get enough data to fully satisfy the original 12395 * request. Just copy back whatever data we got and set 12396 * up the residual and error code as required. 12397 * 12398 * 'shortfall' is the amount by which the data received with the 12399 * shadow buf has "fallen short" of the requested amount. 12400 */ 12401 shortfall = (size_t)(request_end - shadow_end); 12402 12403 if (shortfall > orig_bp->b_bcount) { 12404 /* 12405 * We did not get enough data to even partially 12406 * fulfill the original request. The residual is 12407 * equal to the amount requested. 12408 */ 12409 orig_bp->b_resid = orig_bp->b_bcount; 12410 } else { 12411 /* 12412 * We did not get all the data that we requested 12413 * from the device, but we will try to return what 12414 * portion we did get. 12415 */ 12416 orig_bp->b_resid = shortfall; 12417 } 12418 ASSERT(copy_length >= orig_bp->b_resid); 12419 copy_length -= orig_bp->b_resid; 12420 } 12421 12422 /* Propagate the error code from the shadow buf to the original buf */ 12423 bioerror(orig_bp, bp->b_error); 12424 12425 if (is_write) { 12426 goto freebuf_done; /* No data copying for a WRITE */ 12427 } 12428 12429 if (has_wmap) { 12430 /* 12431 * This is a READ command from the READ phase of a 12432 * read-modify-write request. We have to copy the data given 12433 * by the user OVER the data returned by the READ command, 12434 * then convert the command from a READ to a WRITE and send 12435 * it back to the target. 12436 */ 12437 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset, 12438 copy_length); 12439 12440 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */ 12441 12442 /* 12443 * Dispatch the WRITE command to the taskq thread, which 12444 * will in turn send the command to the target. When the 12445 * WRITE command completes, we (sd_mapblocksize_iodone()) 12446 * will get called again as part of the iodone chain 12447 * processing for it. Note that we will still be dealing 12448 * with the shadow buf at that point. 12449 */ 12450 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp, 12451 KM_NOSLEEP) != 0) { 12452 /* 12453 * Dispatch was successful so we are done. Return 12454 * without going any higher up the iodone chain. Do 12455 * not free up any layer-private data until after the 12456 * WRITE completes. 12457 */ 12458 return; 12459 } 12460 12461 /* 12462 * Dispatch of the WRITE command failed; set up the error 12463 * condition and send this IO back up the iodone chain. 12464 */ 12465 bioerror(orig_bp, EIO); 12466 orig_bp->b_resid = orig_bp->b_bcount; 12467 12468 } else { 12469 /* 12470 * This is a regular READ request (ie, not a RMW). Copy the 12471 * data from the shadow buf into the original buf. The 12472 * copy_offset compensates for any "misalignment" between the 12473 * shadow buf (with its un->un_tgt_blocksize blocks) and the 12474 * original buf (with its un->un_sys_blocksize blocks). 12475 */ 12476 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr, 12477 copy_length); 12478 } 12479 12480 freebuf_done: 12481 12482 /* 12483 * At this point we still have both the shadow buf AND the original 12484 * buf to deal with, as well as the layer-private data area in each. 12485 * Local variables are as follows: 12486 * 12487 * bp -- points to shadow buf 12488 * xp -- points to xbuf of shadow buf 12489 * bsp -- points to layer-private data area of shadow buf 12490 * orig_bp -- points to original buf 12491 * 12492 * First free the shadow buf and its associated xbuf, then free the 12493 * layer-private data area from the shadow buf. There is no need to 12494 * restore xb_private in the shadow xbuf. 12495 */ 12496 sd_shadow_buf_free(bp); 12497 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12498 12499 /* 12500 * Now update the local variables to point to the original buf, xbuf, 12501 * and layer-private area. 12502 */ 12503 bp = orig_bp; 12504 xp = SD_GET_XBUF(bp); 12505 ASSERT(xp != NULL); 12506 ASSERT(xp == orig_xp); 12507 bsp = xp->xb_private; 12508 ASSERT(bsp != NULL); 12509 12510 done: 12511 /* 12512 * Restore xb_private to whatever it was set to by the next higher 12513 * layer in the chain, then free the layer-private data area. 12514 */ 12515 xp->xb_private = bsp->mbs_oprivate; 12516 kmem_free(bsp, sizeof (struct sd_mapblocksize_info)); 12517 12518 exit: 12519 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp), 12520 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp); 12521 12522 SD_NEXT_IODONE(index, un, bp); 12523 } 12524 12525 12526 /* 12527 * Function: sd_checksum_iostart 12528 * 12529 * Description: A stub function for a layer that's currently not used. 12530 * For now just a placeholder. 12531 * 12532 * Context: Kernel thread context 12533 */ 12534 12535 static void 12536 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp) 12537 { 12538 ASSERT(un != NULL); 12539 ASSERT(bp != NULL); 12540 ASSERT(!mutex_owned(SD_MUTEX(un))); 12541 SD_NEXT_IOSTART(index, un, bp); 12542 } 12543 12544 12545 /* 12546 * Function: sd_checksum_iodone 12547 * 12548 * Description: A stub function for a layer that's currently not used. 12549 * For now just a placeholder. 12550 * 12551 * Context: May be called under interrupt context 12552 */ 12553 12554 static void 12555 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp) 12556 { 12557 ASSERT(un != NULL); 12558 ASSERT(bp != NULL); 12559 ASSERT(!mutex_owned(SD_MUTEX(un))); 12560 SD_NEXT_IODONE(index, un, bp); 12561 } 12562 12563 12564 /* 12565 * Function: sd_checksum_uscsi_iostart 12566 * 12567 * Description: A stub function for a layer that's currently not used. 12568 * For now just a placeholder. 12569 * 12570 * Context: Kernel thread context 12571 */ 12572 12573 static void 12574 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp) 12575 { 12576 ASSERT(un != NULL); 12577 ASSERT(bp != NULL); 12578 ASSERT(!mutex_owned(SD_MUTEX(un))); 12579 SD_NEXT_IOSTART(index, un, bp); 12580 } 12581 12582 12583 /* 12584 * Function: sd_checksum_uscsi_iodone 12585 * 12586 * Description: A stub function for a layer that's currently not used. 12587 * For now just a placeholder. 12588 * 12589 * Context: May be called under interrupt context 12590 */ 12591 12592 static void 12593 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp) 12594 { 12595 ASSERT(un != NULL); 12596 ASSERT(bp != NULL); 12597 ASSERT(!mutex_owned(SD_MUTEX(un))); 12598 SD_NEXT_IODONE(index, un, bp); 12599 } 12600 12601 12602 /* 12603 * Function: sd_pm_iostart 12604 * 12605 * Description: iostart-side routine for Power mangement. 12606 * 12607 * Context: Kernel thread context 12608 */ 12609 12610 static void 12611 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp) 12612 { 12613 ASSERT(un != NULL); 12614 ASSERT(bp != NULL); 12615 ASSERT(!mutex_owned(SD_MUTEX(un))); 12616 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12617 12618 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n"); 12619 12620 if (sd_pm_entry(un) != DDI_SUCCESS) { 12621 /* 12622 * Set up to return the failed buf back up the 'iodone' 12623 * side of the calling chain. 12624 */ 12625 bioerror(bp, EIO); 12626 bp->b_resid = bp->b_bcount; 12627 12628 SD_BEGIN_IODONE(index, un, bp); 12629 12630 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12631 return; 12632 } 12633 12634 SD_NEXT_IOSTART(index, un, bp); 12635 12636 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n"); 12637 } 12638 12639 12640 /* 12641 * Function: sd_pm_iodone 12642 * 12643 * Description: iodone-side routine for power mangement. 12644 * 12645 * Context: may be called from interrupt context 12646 */ 12647 12648 static void 12649 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp) 12650 { 12651 ASSERT(un != NULL); 12652 ASSERT(bp != NULL); 12653 ASSERT(!mutex_owned(&un->un_pm_mutex)); 12654 12655 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n"); 12656 12657 /* 12658 * After attach the following flag is only read, so don't 12659 * take the penalty of acquiring a mutex for it. 12660 */ 12661 if (un->un_f_pm_is_enabled == TRUE) { 12662 sd_pm_exit(un); 12663 } 12664 12665 SD_NEXT_IODONE(index, un, bp); 12666 12667 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n"); 12668 } 12669 12670 12671 /* 12672 * Function: sd_core_iostart 12673 * 12674 * Description: Primary driver function for enqueuing buf(9S) structs from 12675 * the system and initiating IO to the target device 12676 * 12677 * Context: Kernel thread context. Can sleep. 12678 * 12679 * Assumptions: - The given xp->xb_blkno is absolute 12680 * (ie, relative to the start of the device). 12681 * - The IO is to be done using the native blocksize of 12682 * the device, as specified in un->un_tgt_blocksize. 12683 */ 12684 /* ARGSUSED */ 12685 static void 12686 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp) 12687 { 12688 struct sd_xbuf *xp; 12689 12690 ASSERT(un != NULL); 12691 ASSERT(bp != NULL); 12692 ASSERT(!mutex_owned(SD_MUTEX(un))); 12693 ASSERT(bp->b_resid == 0); 12694 12695 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp); 12696 12697 xp = SD_GET_XBUF(bp); 12698 ASSERT(xp != NULL); 12699 12700 mutex_enter(SD_MUTEX(un)); 12701 12702 /* 12703 * If we are currently in the failfast state, fail any new IO 12704 * that has B_FAILFAST set, then return. 12705 */ 12706 if ((bp->b_flags & B_FAILFAST) && 12707 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) { 12708 mutex_exit(SD_MUTEX(un)); 12709 bioerror(bp, EIO); 12710 bp->b_resid = bp->b_bcount; 12711 SD_BEGIN_IODONE(index, un, bp); 12712 return; 12713 } 12714 12715 if (SD_IS_DIRECT_PRIORITY(xp)) { 12716 /* 12717 * Priority command -- transport it immediately. 12718 * 12719 * Note: We may want to assert that USCSI_DIAGNOSE is set, 12720 * because all direct priority commands should be associated 12721 * with error recovery actions which we don't want to retry. 12722 */ 12723 sd_start_cmds(un, bp); 12724 } else { 12725 /* 12726 * Normal command -- add it to the wait queue, then start 12727 * transporting commands from the wait queue. 12728 */ 12729 sd_add_buf_to_waitq(un, bp); 12730 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 12731 sd_start_cmds(un, NULL); 12732 } 12733 12734 mutex_exit(SD_MUTEX(un)); 12735 12736 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp); 12737 } 12738 12739 12740 /* 12741 * Function: sd_init_cdb_limits 12742 * 12743 * Description: This is to handle scsi_pkt initialization differences 12744 * between the driver platforms. 12745 * 12746 * Legacy behaviors: 12747 * 12748 * If the block number or the sector count exceeds the 12749 * capabilities of a Group 0 command, shift over to a 12750 * Group 1 command. We don't blindly use Group 1 12751 * commands because a) some drives (CDC Wren IVs) get a 12752 * bit confused, and b) there is probably a fair amount 12753 * of speed difference for a target to receive and decode 12754 * a 10 byte command instead of a 6 byte command. 12755 * 12756 * The xfer time difference of 6 vs 10 byte CDBs is 12757 * still significant so this code is still worthwhile. 12758 * 10 byte CDBs are very inefficient with the fas HBA driver 12759 * and older disks. Each CDB byte took 1 usec with some 12760 * popular disks. 12761 * 12762 * Context: Must be called at attach time 12763 */ 12764 12765 static void 12766 sd_init_cdb_limits(struct sd_lun *un) 12767 { 12768 /* 12769 * Use CDB_GROUP1 commands for most devices except for 12770 * parallel SCSI fixed drives in which case we get better 12771 * performance using CDB_GROUP0 commands (where applicable). 12772 */ 12773 un->un_mincdb = SD_CDB_GROUP1; 12774 #if !defined(__fibre) 12775 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) && 12776 !ISREMOVABLE(un)) { 12777 un->un_mincdb = SD_CDB_GROUP0; 12778 } 12779 #endif 12780 12781 /* 12782 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4 12783 * commands for fixed disks unless we are building for a 32 bit 12784 * kernel. 12785 */ 12786 #ifdef _LP64 12787 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP4; 12788 #else 12789 un->un_maxcdb = (ISREMOVABLE(un)) ? SD_CDB_GROUP5 : SD_CDB_GROUP1; 12790 #endif 12791 12792 /* 12793 * x86 systems require the PKT_DMA_PARTIAL flag 12794 */ 12795 #if defined(__x86) 12796 un->un_pkt_flags = PKT_DMA_PARTIAL; 12797 #else 12798 un->un_pkt_flags = 0; 12799 #endif 12800 12801 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE) 12802 ? sizeof (struct scsi_arq_status) : 1); 12803 un->un_cmd_timeout = (ushort_t)sd_io_time; 12804 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout; 12805 } 12806 12807 12808 /* 12809 * Function: sd_initpkt_for_buf 12810 * 12811 * Description: Allocate and initialize for transport a scsi_pkt struct, 12812 * based upon the info specified in the given buf struct. 12813 * 12814 * Assumes the xb_blkno in the request is absolute (ie, 12815 * relative to the start of the device (NOT partition!). 12816 * Also assumes that the request is using the native block 12817 * size of the device (as returned by the READ CAPACITY 12818 * command). 12819 * 12820 * Return Code: SD_PKT_ALLOC_SUCCESS 12821 * SD_PKT_ALLOC_FAILURE 12822 * SD_PKT_ALLOC_FAILURE_NO_DMA 12823 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12824 * 12825 * Context: Kernel thread and may be called from software interrupt context 12826 * as part of a sdrunout callback. This function may not block or 12827 * call routines that block 12828 */ 12829 12830 static int 12831 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp) 12832 { 12833 struct sd_xbuf *xp; 12834 struct scsi_pkt *pktp = NULL; 12835 struct sd_lun *un; 12836 size_t blockcount; 12837 daddr_t startblock; 12838 int rval; 12839 int cmd_flags; 12840 12841 ASSERT(bp != NULL); 12842 ASSERT(pktpp != NULL); 12843 xp = SD_GET_XBUF(bp); 12844 ASSERT(xp != NULL); 12845 un = SD_GET_UN(bp); 12846 ASSERT(un != NULL); 12847 ASSERT(mutex_owned(SD_MUTEX(un))); 12848 ASSERT(bp->b_resid == 0); 12849 12850 SD_TRACE(SD_LOG_IO_CORE, un, 12851 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp); 12852 12853 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12854 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) { 12855 /* 12856 * Already have a scsi_pkt -- just need DMA resources. 12857 * We must recompute the CDB in case the mapping returns 12858 * a nonzero pkt_resid. 12859 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer 12860 * that is being retried, the unmap/remap of the DMA resouces 12861 * will result in the entire transfer starting over again 12862 * from the very first block. 12863 */ 12864 ASSERT(xp->xb_pktp != NULL); 12865 pktp = xp->xb_pktp; 12866 } else { 12867 pktp = NULL; 12868 } 12869 #endif /* __i386 || __amd64 */ 12870 12871 startblock = xp->xb_blkno; /* Absolute block num. */ 12872 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount); 12873 12874 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12875 12876 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK); 12877 12878 #else 12879 12880 cmd_flags = un->un_pkt_flags | xp->xb_pkt_flags; 12881 12882 #endif 12883 12884 /* 12885 * sd_setup_rw_pkt will determine the appropriate CDB group to use, 12886 * call scsi_init_pkt, and build the CDB. 12887 */ 12888 rval = sd_setup_rw_pkt(un, &pktp, bp, 12889 cmd_flags, sdrunout, (caddr_t)un, 12890 startblock, blockcount); 12891 12892 if (rval == 0) { 12893 /* 12894 * Success. 12895 * 12896 * If partial DMA is being used and required for this transfer. 12897 * set it up here. 12898 */ 12899 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 && 12900 (pktp->pkt_resid != 0)) { 12901 12902 /* 12903 * Save the CDB length and pkt_resid for the 12904 * next xfer 12905 */ 12906 xp->xb_dma_resid = pktp->pkt_resid; 12907 12908 /* rezero resid */ 12909 pktp->pkt_resid = 0; 12910 12911 } else { 12912 xp->xb_dma_resid = 0; 12913 } 12914 12915 pktp->pkt_flags = un->un_tagflags; 12916 pktp->pkt_time = un->un_cmd_timeout; 12917 pktp->pkt_comp = sdintr; 12918 12919 pktp->pkt_private = bp; 12920 *pktpp = pktp; 12921 12922 SD_TRACE(SD_LOG_IO_CORE, un, 12923 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp); 12924 12925 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 12926 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED; 12927 #endif 12928 12929 return (SD_PKT_ALLOC_SUCCESS); 12930 12931 } 12932 12933 /* 12934 * SD_PKT_ALLOC_FAILURE is the only expected failure code 12935 * from sd_setup_rw_pkt. 12936 */ 12937 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 12938 12939 if (rval == SD_PKT_ALLOC_FAILURE) { 12940 *pktpp = NULL; 12941 /* 12942 * Set the driver state to RWAIT to indicate the driver 12943 * is waiting on resource allocations. The driver will not 12944 * suspend, pm_suspend, or detatch while the state is RWAIT. 12945 */ 12946 New_state(un, SD_STATE_RWAIT); 12947 12948 SD_ERROR(SD_LOG_IO_CORE, un, 12949 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp); 12950 12951 if ((bp->b_flags & B_ERROR) != 0) { 12952 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 12953 } 12954 return (SD_PKT_ALLOC_FAILURE); 12955 } else { 12956 /* 12957 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL 12958 * 12959 * This should never happen. Maybe someone messed with the 12960 * kernel's minphys? 12961 */ 12962 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 12963 "Request rejected: too large for CDB: " 12964 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount); 12965 SD_ERROR(SD_LOG_IO_CORE, un, 12966 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp); 12967 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 12968 12969 } 12970 } 12971 12972 12973 /* 12974 * Function: sd_destroypkt_for_buf 12975 * 12976 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing). 12977 * 12978 * Context: Kernel thread or interrupt context 12979 */ 12980 12981 static void 12982 sd_destroypkt_for_buf(struct buf *bp) 12983 { 12984 ASSERT(bp != NULL); 12985 ASSERT(SD_GET_UN(bp) != NULL); 12986 12987 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12988 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp); 12989 12990 ASSERT(SD_GET_PKTP(bp) != NULL); 12991 scsi_destroy_pkt(SD_GET_PKTP(bp)); 12992 12993 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp), 12994 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp); 12995 } 12996 12997 /* 12998 * Function: sd_setup_rw_pkt 12999 * 13000 * Description: Determines appropriate CDB group for the requested LBA 13001 * and transfer length, calls scsi_init_pkt, and builds 13002 * the CDB. Do not use for partial DMA transfers except 13003 * for the initial transfer since the CDB size must 13004 * remain constant. 13005 * 13006 * Context: Kernel thread and may be called from software interrupt 13007 * context as part of a sdrunout callback. This function may not 13008 * block or call routines that block 13009 */ 13010 13011 13012 int 13013 sd_setup_rw_pkt(struct sd_lun *un, 13014 struct scsi_pkt **pktpp, struct buf *bp, int flags, 13015 int (*callback)(caddr_t), caddr_t callback_arg, 13016 diskaddr_t lba, uint32_t blockcount) 13017 { 13018 struct scsi_pkt *return_pktp; 13019 union scsi_cdb *cdbp; 13020 struct sd_cdbinfo *cp = NULL; 13021 int i; 13022 13023 /* 13024 * See which size CDB to use, based upon the request. 13025 */ 13026 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) { 13027 13028 /* 13029 * Check lba and block count against sd_cdbtab limits. 13030 * In the partial DMA case, we have to use the same size 13031 * CDB for all the transfers. Check lba + blockcount 13032 * against the max LBA so we know that segment of the 13033 * transfer can use the CDB we select. 13034 */ 13035 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) && 13036 (blockcount <= sd_cdbtab[i].sc_maxlen)) { 13037 13038 /* 13039 * The command will fit into the CDB type 13040 * specified by sd_cdbtab[i]. 13041 */ 13042 cp = sd_cdbtab + i; 13043 13044 /* 13045 * Call scsi_init_pkt so we can fill in the 13046 * CDB. 13047 */ 13048 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp, 13049 bp, cp->sc_grpcode, un->un_status_len, 0, 13050 flags, callback, callback_arg); 13051 13052 if (return_pktp != NULL) { 13053 13054 /* 13055 * Return new value of pkt 13056 */ 13057 *pktpp = return_pktp; 13058 13059 /* 13060 * To be safe, zero the CDB insuring there is 13061 * no leftover data from a previous command. 13062 */ 13063 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode); 13064 13065 /* 13066 * Handle partial DMA mapping 13067 */ 13068 if (return_pktp->pkt_resid != 0) { 13069 13070 /* 13071 * Not going to xfer as many blocks as 13072 * originally expected 13073 */ 13074 blockcount -= 13075 SD_BYTES2TGTBLOCKS(un, 13076 return_pktp->pkt_resid); 13077 } 13078 13079 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp; 13080 13081 /* 13082 * Set command byte based on the CDB 13083 * type we matched. 13084 */ 13085 cdbp->scc_cmd = cp->sc_grpmask | 13086 ((bp->b_flags & B_READ) ? 13087 SCMD_READ : SCMD_WRITE); 13088 13089 SD_FILL_SCSI1_LUN(un, return_pktp); 13090 13091 /* 13092 * Fill in LBA and length 13093 */ 13094 ASSERT((cp->sc_grpcode == CDB_GROUP1) || 13095 (cp->sc_grpcode == CDB_GROUP4) || 13096 (cp->sc_grpcode == CDB_GROUP0) || 13097 (cp->sc_grpcode == CDB_GROUP5)); 13098 13099 if (cp->sc_grpcode == CDB_GROUP1) { 13100 FORMG1ADDR(cdbp, lba); 13101 FORMG1COUNT(cdbp, blockcount); 13102 return (0); 13103 } else if (cp->sc_grpcode == CDB_GROUP4) { 13104 FORMG4LONGADDR(cdbp, lba); 13105 FORMG4COUNT(cdbp, blockcount); 13106 return (0); 13107 } else if (cp->sc_grpcode == CDB_GROUP0) { 13108 FORMG0ADDR(cdbp, lba); 13109 FORMG0COUNT(cdbp, blockcount); 13110 return (0); 13111 } else if (cp->sc_grpcode == CDB_GROUP5) { 13112 FORMG5ADDR(cdbp, lba); 13113 FORMG5COUNT(cdbp, blockcount); 13114 return (0); 13115 } 13116 13117 /* 13118 * It should be impossible to not match one 13119 * of the CDB types above, so we should never 13120 * reach this point. Set the CDB command byte 13121 * to test-unit-ready to avoid writing 13122 * to somewhere we don't intend. 13123 */ 13124 cdbp->scc_cmd = SCMD_TEST_UNIT_READY; 13125 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13126 } else { 13127 /* 13128 * Couldn't get scsi_pkt 13129 */ 13130 return (SD_PKT_ALLOC_FAILURE); 13131 } 13132 } 13133 } 13134 13135 /* 13136 * None of the available CDB types were suitable. This really 13137 * should never happen: on a 64 bit system we support 13138 * READ16/WRITE16 which will hold an entire 64 bit disk address 13139 * and on a 32 bit system we will refuse to bind to a device 13140 * larger than 2TB so addresses will never be larger than 32 bits. 13141 */ 13142 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13143 } 13144 13145 /* 13146 * Function: sd_setup_next_rw_pkt 13147 * 13148 * Description: Setup packet for partial DMA transfers, except for the 13149 * initial transfer. sd_setup_rw_pkt should be used for 13150 * the initial transfer. 13151 * 13152 * Context: Kernel thread and may be called from interrupt context. 13153 */ 13154 13155 int 13156 sd_setup_next_rw_pkt(struct sd_lun *un, 13157 struct scsi_pkt *pktp, struct buf *bp, 13158 diskaddr_t lba, uint32_t blockcount) 13159 { 13160 uchar_t com; 13161 union scsi_cdb *cdbp; 13162 uchar_t cdb_group_id; 13163 13164 ASSERT(pktp != NULL); 13165 ASSERT(pktp->pkt_cdbp != NULL); 13166 13167 cdbp = (union scsi_cdb *)pktp->pkt_cdbp; 13168 com = cdbp->scc_cmd; 13169 cdb_group_id = CDB_GROUPID(com); 13170 13171 ASSERT((cdb_group_id == CDB_GROUPID_0) || 13172 (cdb_group_id == CDB_GROUPID_1) || 13173 (cdb_group_id == CDB_GROUPID_4) || 13174 (cdb_group_id == CDB_GROUPID_5)); 13175 13176 /* 13177 * Move pkt to the next portion of the xfer. 13178 * func is NULL_FUNC so we do not have to release 13179 * the disk mutex here. 13180 */ 13181 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0, 13182 NULL_FUNC, NULL) == pktp) { 13183 /* Success. Handle partial DMA */ 13184 if (pktp->pkt_resid != 0) { 13185 blockcount -= 13186 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid); 13187 } 13188 13189 cdbp->scc_cmd = com; 13190 SD_FILL_SCSI1_LUN(un, pktp); 13191 if (cdb_group_id == CDB_GROUPID_1) { 13192 FORMG1ADDR(cdbp, lba); 13193 FORMG1COUNT(cdbp, blockcount); 13194 return (0); 13195 } else if (cdb_group_id == CDB_GROUPID_4) { 13196 FORMG4LONGADDR(cdbp, lba); 13197 FORMG4COUNT(cdbp, blockcount); 13198 return (0); 13199 } else if (cdb_group_id == CDB_GROUPID_0) { 13200 FORMG0ADDR(cdbp, lba); 13201 FORMG0COUNT(cdbp, blockcount); 13202 return (0); 13203 } else if (cdb_group_id == CDB_GROUPID_5) { 13204 FORMG5ADDR(cdbp, lba); 13205 FORMG5COUNT(cdbp, blockcount); 13206 return (0); 13207 } 13208 13209 /* Unreachable */ 13210 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL); 13211 } 13212 13213 /* 13214 * Error setting up next portion of cmd transfer. 13215 * Something is definitely very wrong and this 13216 * should not happen. 13217 */ 13218 return (SD_PKT_ALLOC_FAILURE); 13219 } 13220 13221 /* 13222 * Function: sd_initpkt_for_uscsi 13223 * 13224 * Description: Allocate and initialize for transport a scsi_pkt struct, 13225 * based upon the info specified in the given uscsi_cmd struct. 13226 * 13227 * Return Code: SD_PKT_ALLOC_SUCCESS 13228 * SD_PKT_ALLOC_FAILURE 13229 * SD_PKT_ALLOC_FAILURE_NO_DMA 13230 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL 13231 * 13232 * Context: Kernel thread and may be called from software interrupt context 13233 * as part of a sdrunout callback. This function may not block or 13234 * call routines that block 13235 */ 13236 13237 static int 13238 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp) 13239 { 13240 struct uscsi_cmd *uscmd; 13241 struct sd_xbuf *xp; 13242 struct scsi_pkt *pktp; 13243 struct sd_lun *un; 13244 uint32_t flags = 0; 13245 13246 ASSERT(bp != NULL); 13247 ASSERT(pktpp != NULL); 13248 xp = SD_GET_XBUF(bp); 13249 ASSERT(xp != NULL); 13250 un = SD_GET_UN(bp); 13251 ASSERT(un != NULL); 13252 ASSERT(mutex_owned(SD_MUTEX(un))); 13253 13254 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13255 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13256 ASSERT(uscmd != NULL); 13257 13258 SD_TRACE(SD_LOG_IO_CORE, un, 13259 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp); 13260 13261 /* Allocate the scsi_pkt for the command. */ 13262 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, 13263 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen, 13264 sizeof (struct scsi_arq_status), 0, un->un_pkt_flags, 13265 sdrunout, (caddr_t)un); 13266 13267 if (pktp == NULL) { 13268 *pktpp = NULL; 13269 /* 13270 * Set the driver state to RWAIT to indicate the driver 13271 * is waiting on resource allocations. The driver will not 13272 * suspend, pm_suspend, or detatch while the state is RWAIT. 13273 */ 13274 New_state(un, SD_STATE_RWAIT); 13275 13276 SD_ERROR(SD_LOG_IO_CORE, un, 13277 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp); 13278 13279 if ((bp->b_flags & B_ERROR) != 0) { 13280 return (SD_PKT_ALLOC_FAILURE_NO_DMA); 13281 } 13282 return (SD_PKT_ALLOC_FAILURE); 13283 } 13284 13285 /* 13286 * We do not do DMA breakup for USCSI commands, so return failure 13287 * here if all the needed DMA resources were not allocated. 13288 */ 13289 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) && 13290 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) { 13291 scsi_destroy_pkt(pktp); 13292 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: " 13293 "No partial DMA for USCSI. exit: buf:0x%p\n", bp); 13294 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL); 13295 } 13296 13297 /* Init the cdb from the given uscsi struct */ 13298 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp, 13299 uscmd->uscsi_cdb[0], 0, 0, 0); 13300 13301 SD_FILL_SCSI1_LUN(un, pktp); 13302 13303 /* 13304 * Set up the optional USCSI flags. See the uscsi (7I) man page 13305 * for listing of the supported flags. 13306 */ 13307 13308 if (uscmd->uscsi_flags & USCSI_SILENT) { 13309 flags |= FLAG_SILENT; 13310 } 13311 13312 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) { 13313 flags |= FLAG_DIAGNOSE; 13314 } 13315 13316 if (uscmd->uscsi_flags & USCSI_ISOLATE) { 13317 flags |= FLAG_ISOLATE; 13318 } 13319 13320 if (un->un_f_is_fibre == FALSE) { 13321 if (uscmd->uscsi_flags & USCSI_RENEGOT) { 13322 flags |= FLAG_RENEGOTIATE_WIDE_SYNC; 13323 } 13324 } 13325 13326 /* 13327 * Set the pkt flags here so we save time later. 13328 * Note: These flags are NOT in the uscsi man page!!! 13329 */ 13330 if (uscmd->uscsi_flags & USCSI_HEAD) { 13331 flags |= FLAG_HEAD; 13332 } 13333 13334 if (uscmd->uscsi_flags & USCSI_NOINTR) { 13335 flags |= FLAG_NOINTR; 13336 } 13337 13338 /* 13339 * For tagged queueing, things get a bit complicated. 13340 * Check first for head of queue and last for ordered queue. 13341 * If neither head nor order, use the default driver tag flags. 13342 */ 13343 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) { 13344 if (uscmd->uscsi_flags & USCSI_HTAG) { 13345 flags |= FLAG_HTAG; 13346 } else if (uscmd->uscsi_flags & USCSI_OTAG) { 13347 flags |= FLAG_OTAG; 13348 } else { 13349 flags |= un->un_tagflags & FLAG_TAGMASK; 13350 } 13351 } 13352 13353 if (uscmd->uscsi_flags & USCSI_NODISCON) { 13354 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON; 13355 } 13356 13357 pktp->pkt_flags = flags; 13358 13359 /* Copy the caller's CDB into the pkt... */ 13360 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen); 13361 13362 if (uscmd->uscsi_timeout == 0) { 13363 pktp->pkt_time = un->un_uscsi_timeout; 13364 } else { 13365 pktp->pkt_time = uscmd->uscsi_timeout; 13366 } 13367 13368 /* need it later to identify USCSI request in sdintr */ 13369 xp->xb_pkt_flags |= SD_XB_USCSICMD; 13370 13371 xp->xb_sense_resid = uscmd->uscsi_rqresid; 13372 13373 pktp->pkt_private = bp; 13374 pktp->pkt_comp = sdintr; 13375 *pktpp = pktp; 13376 13377 SD_TRACE(SD_LOG_IO_CORE, un, 13378 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp); 13379 13380 return (SD_PKT_ALLOC_SUCCESS); 13381 } 13382 13383 13384 /* 13385 * Function: sd_destroypkt_for_uscsi 13386 * 13387 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi 13388 * IOs.. Also saves relevant info into the associated uscsi_cmd 13389 * struct. 13390 * 13391 * Context: May be called under interrupt context 13392 */ 13393 13394 static void 13395 sd_destroypkt_for_uscsi(struct buf *bp) 13396 { 13397 struct uscsi_cmd *uscmd; 13398 struct sd_xbuf *xp; 13399 struct scsi_pkt *pktp; 13400 struct sd_lun *un; 13401 13402 ASSERT(bp != NULL); 13403 xp = SD_GET_XBUF(bp); 13404 ASSERT(xp != NULL); 13405 un = SD_GET_UN(bp); 13406 ASSERT(un != NULL); 13407 ASSERT(!mutex_owned(SD_MUTEX(un))); 13408 pktp = SD_GET_PKTP(bp); 13409 ASSERT(pktp != NULL); 13410 13411 SD_TRACE(SD_LOG_IO_CORE, un, 13412 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp); 13413 13414 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */ 13415 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo; 13416 ASSERT(uscmd != NULL); 13417 13418 /* Save the status and the residual into the uscsi_cmd struct */ 13419 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK); 13420 uscmd->uscsi_resid = bp->b_resid; 13421 13422 /* 13423 * If enabled, copy any saved sense data into the area specified 13424 * by the uscsi command. 13425 */ 13426 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) && 13427 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) { 13428 /* 13429 * Note: uscmd->uscsi_rqbuf should always point to a buffer 13430 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd()) 13431 */ 13432 uscmd->uscsi_rqstatus = xp->xb_sense_status; 13433 uscmd->uscsi_rqresid = xp->xb_sense_resid; 13434 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf, SENSE_LENGTH); 13435 } 13436 13437 /* We are done with the scsi_pkt; free it now */ 13438 ASSERT(SD_GET_PKTP(bp) != NULL); 13439 scsi_destroy_pkt(SD_GET_PKTP(bp)); 13440 13441 SD_TRACE(SD_LOG_IO_CORE, un, 13442 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp); 13443 } 13444 13445 13446 /* 13447 * Function: sd_bioclone_alloc 13448 * 13449 * Description: Allocate a buf(9S) and init it as per the given buf 13450 * and the various arguments. The associated sd_xbuf 13451 * struct is (nearly) duplicated. The struct buf *bp 13452 * argument is saved in new_xp->xb_private. 13453 * 13454 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13455 * datalen - size of data area for the shadow bp 13456 * blkno - starting LBA 13457 * func - function pointer for b_iodone in the shadow buf. (May 13458 * be NULL if none.) 13459 * 13460 * Return Code: Pointer to allocates buf(9S) struct 13461 * 13462 * Context: Can sleep. 13463 */ 13464 13465 static struct buf * 13466 sd_bioclone_alloc(struct buf *bp, size_t datalen, 13467 daddr_t blkno, int (*func)(struct buf *)) 13468 { 13469 struct sd_lun *un; 13470 struct sd_xbuf *xp; 13471 struct sd_xbuf *new_xp; 13472 struct buf *new_bp; 13473 13474 ASSERT(bp != NULL); 13475 xp = SD_GET_XBUF(bp); 13476 ASSERT(xp != NULL); 13477 un = SD_GET_UN(bp); 13478 ASSERT(un != NULL); 13479 ASSERT(!mutex_owned(SD_MUTEX(un))); 13480 13481 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func, 13482 NULL, KM_SLEEP); 13483 13484 new_bp->b_lblkno = blkno; 13485 13486 /* 13487 * Allocate an xbuf for the shadow bp and copy the contents of the 13488 * original xbuf into it. 13489 */ 13490 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13491 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13492 13493 /* 13494 * The given bp is automatically saved in the xb_private member 13495 * of the new xbuf. Callers are allowed to depend on this. 13496 */ 13497 new_xp->xb_private = bp; 13498 13499 new_bp->b_private = new_xp; 13500 13501 return (new_bp); 13502 } 13503 13504 /* 13505 * Function: sd_shadow_buf_alloc 13506 * 13507 * Description: Allocate a buf(9S) and init it as per the given buf 13508 * and the various arguments. The associated sd_xbuf 13509 * struct is (nearly) duplicated. The struct buf *bp 13510 * argument is saved in new_xp->xb_private. 13511 * 13512 * Arguments: bp - ptr the the buf(9S) to be "shadowed" 13513 * datalen - size of data area for the shadow bp 13514 * bflags - B_READ or B_WRITE (pseudo flag) 13515 * blkno - starting LBA 13516 * func - function pointer for b_iodone in the shadow buf. (May 13517 * be NULL if none.) 13518 * 13519 * Return Code: Pointer to allocates buf(9S) struct 13520 * 13521 * Context: Can sleep. 13522 */ 13523 13524 static struct buf * 13525 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags, 13526 daddr_t blkno, int (*func)(struct buf *)) 13527 { 13528 struct sd_lun *un; 13529 struct sd_xbuf *xp; 13530 struct sd_xbuf *new_xp; 13531 struct buf *new_bp; 13532 13533 ASSERT(bp != NULL); 13534 xp = SD_GET_XBUF(bp); 13535 ASSERT(xp != NULL); 13536 un = SD_GET_UN(bp); 13537 ASSERT(un != NULL); 13538 ASSERT(!mutex_owned(SD_MUTEX(un))); 13539 13540 if (bp->b_flags & (B_PAGEIO | B_PHYS)) { 13541 bp_mapin(bp); 13542 } 13543 13544 bflags &= (B_READ | B_WRITE); 13545 #if defined(__i386) || defined(__amd64) 13546 new_bp = getrbuf(KM_SLEEP); 13547 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP); 13548 new_bp->b_bcount = datalen; 13549 new_bp->b_flags = bp->b_flags | bflags; 13550 #else 13551 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL, 13552 datalen, bflags, SLEEP_FUNC, NULL); 13553 #endif 13554 new_bp->av_forw = NULL; 13555 new_bp->av_back = NULL; 13556 new_bp->b_dev = bp->b_dev; 13557 new_bp->b_blkno = blkno; 13558 new_bp->b_iodone = func; 13559 new_bp->b_edev = bp->b_edev; 13560 new_bp->b_resid = 0; 13561 13562 /* We need to preserve the B_FAILFAST flag */ 13563 if (bp->b_flags & B_FAILFAST) { 13564 new_bp->b_flags |= B_FAILFAST; 13565 } 13566 13567 /* 13568 * Allocate an xbuf for the shadow bp and copy the contents of the 13569 * original xbuf into it. 13570 */ 13571 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 13572 bcopy(xp, new_xp, sizeof (struct sd_xbuf)); 13573 13574 /* Need later to copy data between the shadow buf & original buf! */ 13575 new_xp->xb_pkt_flags |= PKT_CONSISTENT; 13576 13577 /* 13578 * The given bp is automatically saved in the xb_private member 13579 * of the new xbuf. Callers are allowed to depend on this. 13580 */ 13581 new_xp->xb_private = bp; 13582 13583 new_bp->b_private = new_xp; 13584 13585 return (new_bp); 13586 } 13587 13588 /* 13589 * Function: sd_bioclone_free 13590 * 13591 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations 13592 * in the larger than partition operation. 13593 * 13594 * Context: May be called under interrupt context 13595 */ 13596 13597 static void 13598 sd_bioclone_free(struct buf *bp) 13599 { 13600 struct sd_xbuf *xp; 13601 13602 ASSERT(bp != NULL); 13603 xp = SD_GET_XBUF(bp); 13604 ASSERT(xp != NULL); 13605 13606 /* 13607 * Call bp_mapout() before freeing the buf, in case a lower 13608 * layer or HBA had done a bp_mapin(). we must do this here 13609 * as we are the "originator" of the shadow buf. 13610 */ 13611 bp_mapout(bp); 13612 13613 /* 13614 * Null out b_iodone before freeing the bp, to ensure that the driver 13615 * never gets confused by a stale value in this field. (Just a little 13616 * extra defensiveness here.) 13617 */ 13618 bp->b_iodone = NULL; 13619 13620 freerbuf(bp); 13621 13622 kmem_free(xp, sizeof (struct sd_xbuf)); 13623 } 13624 13625 /* 13626 * Function: sd_shadow_buf_free 13627 * 13628 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations. 13629 * 13630 * Context: May be called under interrupt context 13631 */ 13632 13633 static void 13634 sd_shadow_buf_free(struct buf *bp) 13635 { 13636 struct sd_xbuf *xp; 13637 13638 ASSERT(bp != NULL); 13639 xp = SD_GET_XBUF(bp); 13640 ASSERT(xp != NULL); 13641 13642 #if defined(__sparc) 13643 /* 13644 * Call bp_mapout() before freeing the buf, in case a lower 13645 * layer or HBA had done a bp_mapin(). we must do this here 13646 * as we are the "originator" of the shadow buf. 13647 */ 13648 bp_mapout(bp); 13649 #endif 13650 13651 /* 13652 * Null out b_iodone before freeing the bp, to ensure that the driver 13653 * never gets confused by a stale value in this field. (Just a little 13654 * extra defensiveness here.) 13655 */ 13656 bp->b_iodone = NULL; 13657 13658 #if defined(__i386) || defined(__amd64) 13659 kmem_free(bp->b_un.b_addr, bp->b_bcount); 13660 freerbuf(bp); 13661 #else 13662 scsi_free_consistent_buf(bp); 13663 #endif 13664 13665 kmem_free(xp, sizeof (struct sd_xbuf)); 13666 } 13667 13668 13669 /* 13670 * Function: sd_print_transport_rejected_message 13671 * 13672 * Description: This implements the ludicrously complex rules for printing 13673 * a "transport rejected" message. This is to address the 13674 * specific problem of having a flood of this error message 13675 * produced when a failover occurs. 13676 * 13677 * Context: Any. 13678 */ 13679 13680 static void 13681 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp, 13682 int code) 13683 { 13684 ASSERT(un != NULL); 13685 ASSERT(mutex_owned(SD_MUTEX(un))); 13686 ASSERT(xp != NULL); 13687 13688 /* 13689 * Print the "transport rejected" message under the following 13690 * conditions: 13691 * 13692 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set 13693 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR. 13694 * - If the error code IS a TRAN_FATAL_ERROR, then the message is 13695 * printed the FIRST time a TRAN_FATAL_ERROR is returned from 13696 * scsi_transport(9F) (which indicates that the target might have 13697 * gone off-line). This uses the un->un_tran_fatal_count 13698 * count, which is incremented whenever a TRAN_FATAL_ERROR is 13699 * received, and reset to zero whenver a TRAN_ACCEPT is returned 13700 * from scsi_transport(). 13701 * 13702 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of 13703 * the preceeding cases in order for the message to be printed. 13704 */ 13705 if ((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) { 13706 if ((sd_level_mask & SD_LOGMASK_DIAG) || 13707 (code != TRAN_FATAL_ERROR) || 13708 (un->un_tran_fatal_count == 1)) { 13709 switch (code) { 13710 case TRAN_BADPKT: 13711 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13712 "transport rejected bad packet\n"); 13713 break; 13714 case TRAN_FATAL_ERROR: 13715 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13716 "transport rejected fatal error\n"); 13717 break; 13718 default: 13719 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 13720 "transport rejected (%d)\n", code); 13721 break; 13722 } 13723 } 13724 } 13725 } 13726 13727 13728 /* 13729 * Function: sd_add_buf_to_waitq 13730 * 13731 * Description: Add the given buf(9S) struct to the wait queue for the 13732 * instance. If sorting is enabled, then the buf is added 13733 * to the queue via an elevator sort algorithm (a la 13734 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key. 13735 * If sorting is not enabled, then the buf is just added 13736 * to the end of the wait queue. 13737 * 13738 * Return Code: void 13739 * 13740 * Context: Does not sleep/block, therefore technically can be called 13741 * from any context. However if sorting is enabled then the 13742 * execution time is indeterminate, and may take long if 13743 * the wait queue grows large. 13744 */ 13745 13746 static void 13747 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp) 13748 { 13749 struct buf *ap; 13750 13751 ASSERT(bp != NULL); 13752 ASSERT(un != NULL); 13753 ASSERT(mutex_owned(SD_MUTEX(un))); 13754 13755 /* If the queue is empty, add the buf as the only entry & return. */ 13756 if (un->un_waitq_headp == NULL) { 13757 ASSERT(un->un_waitq_tailp == NULL); 13758 un->un_waitq_headp = un->un_waitq_tailp = bp; 13759 bp->av_forw = NULL; 13760 return; 13761 } 13762 13763 ASSERT(un->un_waitq_tailp != NULL); 13764 13765 /* 13766 * If sorting is disabled, just add the buf to the tail end of 13767 * the wait queue and return. 13768 */ 13769 if (un->un_f_disksort_disabled) { 13770 un->un_waitq_tailp->av_forw = bp; 13771 un->un_waitq_tailp = bp; 13772 bp->av_forw = NULL; 13773 return; 13774 } 13775 13776 /* 13777 * Sort thru the list of requests currently on the wait queue 13778 * and add the new buf request at the appropriate position. 13779 * 13780 * The un->un_waitq_headp is an activity chain pointer on which 13781 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The 13782 * first queue holds those requests which are positioned after 13783 * the current SD_GET_BLKNO() (in the first request); the second holds 13784 * requests which came in after their SD_GET_BLKNO() number was passed. 13785 * Thus we implement a one way scan, retracting after reaching 13786 * the end of the drive to the first request on the second 13787 * queue, at which time it becomes the first queue. 13788 * A one-way scan is natural because of the way UNIX read-ahead 13789 * blocks are allocated. 13790 * 13791 * If we lie after the first request, then we must locate the 13792 * second request list and add ourselves to it. 13793 */ 13794 ap = un->un_waitq_headp; 13795 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) { 13796 while (ap->av_forw != NULL) { 13797 /* 13798 * Look for an "inversion" in the (normally 13799 * ascending) block numbers. This indicates 13800 * the start of the second request list. 13801 */ 13802 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) { 13803 /* 13804 * Search the second request list for the 13805 * first request at a larger block number. 13806 * We go before that; however if there is 13807 * no such request, we go at the end. 13808 */ 13809 do { 13810 if (SD_GET_BLKNO(bp) < 13811 SD_GET_BLKNO(ap->av_forw)) { 13812 goto insert; 13813 } 13814 ap = ap->av_forw; 13815 } while (ap->av_forw != NULL); 13816 goto insert; /* after last */ 13817 } 13818 ap = ap->av_forw; 13819 } 13820 13821 /* 13822 * No inversions... we will go after the last, and 13823 * be the first request in the second request list. 13824 */ 13825 goto insert; 13826 } 13827 13828 /* 13829 * Request is at/after the current request... 13830 * sort in the first request list. 13831 */ 13832 while (ap->av_forw != NULL) { 13833 /* 13834 * We want to go after the current request (1) if 13835 * there is an inversion after it (i.e. it is the end 13836 * of the first request list), or (2) if the next 13837 * request is a larger block no. than our request. 13838 */ 13839 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) || 13840 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) { 13841 goto insert; 13842 } 13843 ap = ap->av_forw; 13844 } 13845 13846 /* 13847 * Neither a second list nor a larger request, therefore 13848 * we go at the end of the first list (which is the same 13849 * as the end of the whole schebang). 13850 */ 13851 insert: 13852 bp->av_forw = ap->av_forw; 13853 ap->av_forw = bp; 13854 13855 /* 13856 * If we inserted onto the tail end of the waitq, make sure the 13857 * tail pointer is updated. 13858 */ 13859 if (ap == un->un_waitq_tailp) { 13860 un->un_waitq_tailp = bp; 13861 } 13862 } 13863 13864 13865 /* 13866 * Function: sd_start_cmds 13867 * 13868 * Description: Remove and transport cmds from the driver queues. 13869 * 13870 * Arguments: un - pointer to the unit (soft state) struct for the target. 13871 * 13872 * immed_bp - ptr to a buf to be transported immediately. Only 13873 * the immed_bp is transported; bufs on the waitq are not 13874 * processed and the un_retry_bp is not checked. If immed_bp is 13875 * NULL, then normal queue processing is performed. 13876 * 13877 * Context: May be called from kernel thread context, interrupt context, 13878 * or runout callback context. This function may not block or 13879 * call routines that block. 13880 */ 13881 13882 static void 13883 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp) 13884 { 13885 struct sd_xbuf *xp; 13886 struct buf *bp; 13887 void (*statp)(kstat_io_t *); 13888 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13889 void (*saved_statp)(kstat_io_t *); 13890 #endif 13891 int rval; 13892 13893 ASSERT(un != NULL); 13894 ASSERT(mutex_owned(SD_MUTEX(un))); 13895 ASSERT(un->un_ncmds_in_transport >= 0); 13896 ASSERT(un->un_throttle >= 0); 13897 13898 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n"); 13899 13900 do { 13901 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13902 saved_statp = NULL; 13903 #endif 13904 13905 /* 13906 * If we are syncing or dumping, fail the command to 13907 * avoid recursively calling back into scsi_transport(). 13908 * See panic.c for more information about the states 13909 * the system can be in during panic. 13910 */ 13911 if ((un->un_state == SD_STATE_DUMPING) || 13912 (un->un_in_callback > 1)) { 13913 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13914 "sd_start_cmds: panicking\n"); 13915 goto exit; 13916 } 13917 13918 if ((bp = immed_bp) != NULL) { 13919 /* 13920 * We have a bp that must be transported immediately. 13921 * It's OK to transport the immed_bp here without doing 13922 * the throttle limit check because the immed_bp is 13923 * always used in a retry/recovery case. This means 13924 * that we know we are not at the throttle limit by 13925 * virtue of the fact that to get here we must have 13926 * already gotten a command back via sdintr(). This also 13927 * relies on (1) the command on un_retry_bp preventing 13928 * further commands from the waitq from being issued; 13929 * and (2) the code in sd_retry_command checking the 13930 * throttle limit before issuing a delayed or immediate 13931 * retry. This holds even if the throttle limit is 13932 * currently ratcheted down from its maximum value. 13933 */ 13934 statp = kstat_runq_enter; 13935 if (bp == un->un_retry_bp) { 13936 ASSERT((un->un_retry_statp == NULL) || 13937 (un->un_retry_statp == kstat_waitq_enter) || 13938 (un->un_retry_statp == 13939 kstat_runq_back_to_waitq)); 13940 /* 13941 * If the waitq kstat was incremented when 13942 * sd_set_retry_bp() queued this bp for a retry, 13943 * then we must set up statp so that the waitq 13944 * count will get decremented correctly below. 13945 * Also we must clear un->un_retry_statp to 13946 * ensure that we do not act on a stale value 13947 * in this field. 13948 */ 13949 if ((un->un_retry_statp == kstat_waitq_enter) || 13950 (un->un_retry_statp == 13951 kstat_runq_back_to_waitq)) { 13952 statp = kstat_waitq_to_runq; 13953 } 13954 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 13955 saved_statp = un->un_retry_statp; 13956 #endif 13957 un->un_retry_statp = NULL; 13958 13959 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 13960 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p " 13961 "un_throttle:%d un_ncmds_in_transport:%d\n", 13962 un, un->un_retry_bp, un->un_throttle, 13963 un->un_ncmds_in_transport); 13964 } else { 13965 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: " 13966 "processing priority bp:0x%p\n", bp); 13967 } 13968 13969 } else if ((bp = un->un_waitq_headp) != NULL) { 13970 /* 13971 * A command on the waitq is ready to go, but do not 13972 * send it if: 13973 * 13974 * (1) the throttle limit has been reached, or 13975 * (2) a retry is pending, or 13976 * (3) a START_STOP_UNIT callback pending, or 13977 * (4) a callback for a SD_PATH_DIRECT_PRIORITY 13978 * command is pending. 13979 * 13980 * For all of these conditions, IO processing will 13981 * restart after the condition is cleared. 13982 */ 13983 if (un->un_ncmds_in_transport >= un->un_throttle) { 13984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13985 "sd_start_cmds: exiting, " 13986 "throttle limit reached!\n"); 13987 goto exit; 13988 } 13989 if (un->un_retry_bp != NULL) { 13990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13991 "sd_start_cmds: exiting, retry pending!\n"); 13992 goto exit; 13993 } 13994 if (un->un_startstop_timeid != NULL) { 13995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 13996 "sd_start_cmds: exiting, " 13997 "START_STOP pending!\n"); 13998 goto exit; 13999 } 14000 if (un->un_direct_priority_timeid != NULL) { 14001 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14002 "sd_start_cmds: exiting, " 14003 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n"); 14004 goto exit; 14005 } 14006 14007 /* Dequeue the command */ 14008 un->un_waitq_headp = bp->av_forw; 14009 if (un->un_waitq_headp == NULL) { 14010 un->un_waitq_tailp = NULL; 14011 } 14012 bp->av_forw = NULL; 14013 statp = kstat_waitq_to_runq; 14014 SD_TRACE(SD_LOG_IO_CORE, un, 14015 "sd_start_cmds: processing waitq bp:0x%p\n", bp); 14016 14017 } else { 14018 /* No work to do so bail out now */ 14019 SD_TRACE(SD_LOG_IO_CORE, un, 14020 "sd_start_cmds: no more work, exiting!\n"); 14021 goto exit; 14022 } 14023 14024 /* 14025 * Reset the state to normal. This is the mechanism by which 14026 * the state transitions from either SD_STATE_RWAIT or 14027 * SD_STATE_OFFLINE to SD_STATE_NORMAL. 14028 * If state is SD_STATE_PM_CHANGING then this command is 14029 * part of the device power control and the state must 14030 * not be put back to normal. Doing so would would 14031 * allow new commands to proceed when they shouldn't, 14032 * the device may be going off. 14033 */ 14034 if ((un->un_state != SD_STATE_SUSPENDED) && 14035 (un->un_state != SD_STATE_PM_CHANGING)) { 14036 New_state(un, SD_STATE_NORMAL); 14037 } 14038 14039 xp = SD_GET_XBUF(bp); 14040 ASSERT(xp != NULL); 14041 14042 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14043 /* 14044 * Allocate the scsi_pkt if we need one, or attach DMA 14045 * resources if we have a scsi_pkt that needs them. The 14046 * latter should only occur for commands that are being 14047 * retried. 14048 */ 14049 if ((xp->xb_pktp == NULL) || 14050 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) { 14051 #else 14052 if (xp->xb_pktp == NULL) { 14053 #endif 14054 /* 14055 * There is no scsi_pkt allocated for this buf. Call 14056 * the initpkt function to allocate & init one. 14057 * 14058 * The scsi_init_pkt runout callback functionality is 14059 * implemented as follows: 14060 * 14061 * 1) The initpkt function always calls 14062 * scsi_init_pkt(9F) with sdrunout specified as the 14063 * callback routine. 14064 * 2) A successful packet allocation is initialized and 14065 * the I/O is transported. 14066 * 3) The I/O associated with an allocation resource 14067 * failure is left on its queue to be retried via 14068 * runout or the next I/O. 14069 * 4) The I/O associated with a DMA error is removed 14070 * from the queue and failed with EIO. Processing of 14071 * the transport queues is also halted to be 14072 * restarted via runout or the next I/O. 14073 * 5) The I/O associated with a CDB size or packet 14074 * size error is removed from the queue and failed 14075 * with EIO. Processing of the transport queues is 14076 * continued. 14077 * 14078 * Note: there is no interface for canceling a runout 14079 * callback. To prevent the driver from detaching or 14080 * suspending while a runout is pending the driver 14081 * state is set to SD_STATE_RWAIT 14082 * 14083 * Note: using the scsi_init_pkt callback facility can 14084 * result in an I/O request persisting at the head of 14085 * the list which cannot be satisfied even after 14086 * multiple retries. In the future the driver may 14087 * implement some kind of maximum runout count before 14088 * failing an I/O. 14089 * 14090 * Note: the use of funcp below may seem superfluous, 14091 * but it helps warlock figure out the correct 14092 * initpkt function calls (see [s]sd.wlcmd). 14093 */ 14094 struct scsi_pkt *pktp; 14095 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp); 14096 14097 ASSERT(bp != un->un_rqs_bp); 14098 14099 funcp = sd_initpkt_map[xp->xb_chain_iostart]; 14100 switch ((*funcp)(bp, &pktp)) { 14101 case SD_PKT_ALLOC_SUCCESS: 14102 xp->xb_pktp = pktp; 14103 SD_TRACE(SD_LOG_IO_CORE, un, 14104 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n", 14105 pktp); 14106 goto got_pkt; 14107 14108 case SD_PKT_ALLOC_FAILURE: 14109 /* 14110 * Temporary (hopefully) resource depletion. 14111 * Since retries and RQS commands always have a 14112 * scsi_pkt allocated, these cases should never 14113 * get here. So the only cases this needs to 14114 * handle is a bp from the waitq (which we put 14115 * back onto the waitq for sdrunout), or a bp 14116 * sent as an immed_bp (which we just fail). 14117 */ 14118 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14119 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n"); 14120 14121 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14122 14123 if (bp == immed_bp) { 14124 /* 14125 * If SD_XB_DMA_FREED is clear, then 14126 * this is a failure to allocate a 14127 * scsi_pkt, and we must fail the 14128 * command. 14129 */ 14130 if ((xp->xb_pkt_flags & 14131 SD_XB_DMA_FREED) == 0) { 14132 break; 14133 } 14134 14135 /* 14136 * If this immediate command is NOT our 14137 * un_retry_bp, then we must fail it. 14138 */ 14139 if (bp != un->un_retry_bp) { 14140 break; 14141 } 14142 14143 /* 14144 * We get here if this cmd is our 14145 * un_retry_bp that was DMAFREED, but 14146 * scsi_init_pkt() failed to reallocate 14147 * DMA resources when we attempted to 14148 * retry it. This can happen when an 14149 * mpxio failover is in progress, but 14150 * we don't want to just fail the 14151 * command in this case. 14152 * 14153 * Use timeout(9F) to restart it after 14154 * a 100ms delay. We don't want to 14155 * let sdrunout() restart it, because 14156 * sdrunout() is just supposed to start 14157 * commands that are sitting on the 14158 * wait queue. The un_retry_bp stays 14159 * set until the command completes, but 14160 * sdrunout can be called many times 14161 * before that happens. Since sdrunout 14162 * cannot tell if the un_retry_bp is 14163 * already in the transport, it could 14164 * end up calling scsi_transport() for 14165 * the un_retry_bp multiple times. 14166 * 14167 * Also: don't schedule the callback 14168 * if some other callback is already 14169 * pending. 14170 */ 14171 if (un->un_retry_statp == NULL) { 14172 /* 14173 * restore the kstat pointer to 14174 * keep kstat counts coherent 14175 * when we do retry the command. 14176 */ 14177 un->un_retry_statp = 14178 saved_statp; 14179 } 14180 14181 if ((un->un_startstop_timeid == NULL) && 14182 (un->un_retry_timeid == NULL) && 14183 (un->un_direct_priority_timeid == 14184 NULL)) { 14185 14186 un->un_retry_timeid = 14187 timeout( 14188 sd_start_retry_command, 14189 un, SD_RESTART_TIMEOUT); 14190 } 14191 goto exit; 14192 } 14193 14194 #else 14195 if (bp == immed_bp) { 14196 break; /* Just fail the command */ 14197 } 14198 #endif 14199 14200 /* Add the buf back to the head of the waitq */ 14201 bp->av_forw = un->un_waitq_headp; 14202 un->un_waitq_headp = bp; 14203 if (un->un_waitq_tailp == NULL) { 14204 un->un_waitq_tailp = bp; 14205 } 14206 goto exit; 14207 14208 case SD_PKT_ALLOC_FAILURE_NO_DMA: 14209 /* 14210 * HBA DMA resource failure. Fail the command 14211 * and continue processing of the queues. 14212 */ 14213 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14214 "sd_start_cmds: " 14215 "SD_PKT_ALLOC_FAILURE_NO_DMA\n"); 14216 break; 14217 14218 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL: 14219 /* 14220 * Note:x86: Partial DMA mapping not supported 14221 * for USCSI commands, and all the needed DMA 14222 * resources were not allocated. 14223 */ 14224 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14225 "sd_start_cmds: " 14226 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n"); 14227 break; 14228 14229 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL: 14230 /* 14231 * Note:x86: Request cannot fit into CDB based 14232 * on lba and len. 14233 */ 14234 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14235 "sd_start_cmds: " 14236 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n"); 14237 break; 14238 14239 default: 14240 /* Should NEVER get here! */ 14241 panic("scsi_initpkt error"); 14242 /*NOTREACHED*/ 14243 } 14244 14245 /* 14246 * Fatal error in allocating a scsi_pkt for this buf. 14247 * Update kstats & return the buf with an error code. 14248 * We must use sd_return_failed_command_no_restart() to 14249 * avoid a recursive call back into sd_start_cmds(). 14250 * However this also means that we must keep processing 14251 * the waitq here in order to avoid stalling. 14252 */ 14253 if (statp == kstat_waitq_to_runq) { 14254 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp); 14255 } 14256 sd_return_failed_command_no_restart(un, bp, EIO); 14257 if (bp == immed_bp) { 14258 /* immed_bp is gone by now, so clear this */ 14259 immed_bp = NULL; 14260 } 14261 continue; 14262 } 14263 got_pkt: 14264 if (bp == immed_bp) { 14265 /* goto the head of the class.... */ 14266 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14267 } 14268 14269 un->un_ncmds_in_transport++; 14270 SD_UPDATE_KSTATS(un, statp, bp); 14271 14272 /* 14273 * Call scsi_transport() to send the command to the target. 14274 * According to SCSA architecture, we must drop the mutex here 14275 * before calling scsi_transport() in order to avoid deadlock. 14276 * Note that the scsi_pkt's completion routine can be executed 14277 * (from interrupt context) even before the call to 14278 * scsi_transport() returns. 14279 */ 14280 SD_TRACE(SD_LOG_IO_CORE, un, 14281 "sd_start_cmds: calling scsi_transport()\n"); 14282 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp); 14283 14284 mutex_exit(SD_MUTEX(un)); 14285 rval = scsi_transport(xp->xb_pktp); 14286 mutex_enter(SD_MUTEX(un)); 14287 14288 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14289 "sd_start_cmds: scsi_transport() returned %d\n", rval); 14290 14291 switch (rval) { 14292 case TRAN_ACCEPT: 14293 /* Clear this with every pkt accepted by the HBA */ 14294 un->un_tran_fatal_count = 0; 14295 break; /* Success; try the next cmd (if any) */ 14296 14297 case TRAN_BUSY: 14298 un->un_ncmds_in_transport--; 14299 ASSERT(un->un_ncmds_in_transport >= 0); 14300 14301 /* 14302 * Don't retry request sense, the sense data 14303 * is lost when another request is sent. 14304 * Free up the rqs buf and retry 14305 * the original failed cmd. Update kstat. 14306 */ 14307 if (bp == un->un_rqs_bp) { 14308 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14309 bp = sd_mark_rqs_idle(un, xp); 14310 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 14311 NULL, NULL, EIO, SD_BSY_TIMEOUT / 500, 14312 kstat_waitq_enter); 14313 goto exit; 14314 } 14315 14316 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */ 14317 /* 14318 * Free the DMA resources for the scsi_pkt. This will 14319 * allow mpxio to select another path the next time 14320 * we call scsi_transport() with this scsi_pkt. 14321 * See sdintr() for the rationalization behind this. 14322 */ 14323 if ((un->un_f_is_fibre == TRUE) && 14324 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 14325 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) { 14326 scsi_dmafree(xp->xb_pktp); 14327 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 14328 } 14329 #endif 14330 14331 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) { 14332 /* 14333 * Commands that are SD_PATH_DIRECT_PRIORITY 14334 * are for error recovery situations. These do 14335 * not use the normal command waitq, so if they 14336 * get a TRAN_BUSY we cannot put them back onto 14337 * the waitq for later retry. One possible 14338 * problem is that there could already be some 14339 * other command on un_retry_bp that is waiting 14340 * for this one to complete, so we would be 14341 * deadlocked if we put this command back onto 14342 * the waitq for later retry (since un_retry_bp 14343 * must complete before the driver gets back to 14344 * commands on the waitq). 14345 * 14346 * To avoid deadlock we must schedule a callback 14347 * that will restart this command after a set 14348 * interval. This should keep retrying for as 14349 * long as the underlying transport keeps 14350 * returning TRAN_BUSY (just like for other 14351 * commands). Use the same timeout interval as 14352 * for the ordinary TRAN_BUSY retry. 14353 */ 14354 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14355 "sd_start_cmds: scsi_transport() returned " 14356 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n"); 14357 14358 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14359 un->un_direct_priority_timeid = 14360 timeout(sd_start_direct_priority_command, 14361 bp, SD_BSY_TIMEOUT / 500); 14362 14363 goto exit; 14364 } 14365 14366 /* 14367 * For TRAN_BUSY, we want to reduce the throttle value, 14368 * unless we are retrying a command. 14369 */ 14370 if (bp != un->un_retry_bp) { 14371 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY); 14372 } 14373 14374 /* 14375 * Set up the bp to be tried again 10 ms later. 14376 * Note:x86: Is there a timeout value in the sd_lun 14377 * for this condition? 14378 */ 14379 sd_set_retry_bp(un, bp, SD_BSY_TIMEOUT / 500, 14380 kstat_runq_back_to_waitq); 14381 goto exit; 14382 14383 case TRAN_FATAL_ERROR: 14384 un->un_tran_fatal_count++; 14385 /* FALLTHRU */ 14386 14387 case TRAN_BADPKT: 14388 default: 14389 un->un_ncmds_in_transport--; 14390 ASSERT(un->un_ncmds_in_transport >= 0); 14391 14392 /* 14393 * If this is our REQUEST SENSE command with a 14394 * transport error, we must get back the pointers 14395 * to the original buf, and mark the REQUEST 14396 * SENSE command as "available". 14397 */ 14398 if (bp == un->un_rqs_bp) { 14399 bp = sd_mark_rqs_idle(un, xp); 14400 xp = SD_GET_XBUF(bp); 14401 } else { 14402 /* 14403 * Legacy behavior: do not update transport 14404 * error count for request sense commands. 14405 */ 14406 SD_UPDATE_ERRSTATS(un, sd_transerrs); 14407 } 14408 14409 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 14410 sd_print_transport_rejected_message(un, xp, rval); 14411 14412 /* 14413 * We must use sd_return_failed_command_no_restart() to 14414 * avoid a recursive call back into sd_start_cmds(). 14415 * However this also means that we must keep processing 14416 * the waitq here in order to avoid stalling. 14417 */ 14418 sd_return_failed_command_no_restart(un, bp, EIO); 14419 14420 /* 14421 * Notify any threads waiting in sd_ddi_suspend() that 14422 * a command completion has occurred. 14423 */ 14424 if (un->un_state == SD_STATE_SUSPENDED) { 14425 cv_broadcast(&un->un_disk_busy_cv); 14426 } 14427 14428 if (bp == immed_bp) { 14429 /* immed_bp is gone by now, so clear this */ 14430 immed_bp = NULL; 14431 } 14432 break; 14433 } 14434 14435 } while (immed_bp == NULL); 14436 14437 exit: 14438 ASSERT(mutex_owned(SD_MUTEX(un))); 14439 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n"); 14440 } 14441 14442 14443 /* 14444 * Function: sd_return_command 14445 * 14446 * Description: Returns a command to its originator (with or without an 14447 * error). Also starts commands waiting to be transported 14448 * to the target. 14449 * 14450 * Context: May be called from interrupt, kernel, or timeout context 14451 */ 14452 14453 static void 14454 sd_return_command(struct sd_lun *un, struct buf *bp) 14455 { 14456 struct sd_xbuf *xp; 14457 #if defined(__i386) || defined(__amd64) 14458 struct scsi_pkt *pktp; 14459 #endif 14460 14461 ASSERT(bp != NULL); 14462 ASSERT(un != NULL); 14463 ASSERT(mutex_owned(SD_MUTEX(un))); 14464 ASSERT(bp != un->un_rqs_bp); 14465 xp = SD_GET_XBUF(bp); 14466 ASSERT(xp != NULL); 14467 14468 #if defined(__i386) || defined(__amd64) 14469 pktp = SD_GET_PKTP(bp); 14470 #endif 14471 14472 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n"); 14473 14474 #if defined(__i386) || defined(__amd64) 14475 /* 14476 * Note:x86: check for the "sdrestart failed" case. 14477 */ 14478 if (((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) && 14479 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) && 14480 (xp->xb_pktp->pkt_resid == 0)) { 14481 14482 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) { 14483 /* 14484 * Successfully set up next portion of cmd 14485 * transfer, try sending it 14486 */ 14487 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 14488 NULL, NULL, 0, (clock_t)0, NULL); 14489 sd_start_cmds(un, NULL); 14490 return; /* Note:x86: need a return here? */ 14491 } 14492 } 14493 #endif 14494 14495 /* 14496 * If this is the failfast bp, clear it from un_failfast_bp. This 14497 * can happen if upon being re-tried the failfast bp either 14498 * succeeded or encountered another error (possibly even a different 14499 * error than the one that precipitated the failfast state, but in 14500 * that case it would have had to exhaust retries as well). Regardless, 14501 * this should not occur whenever the instance is in the active 14502 * failfast state. 14503 */ 14504 if (bp == un->un_failfast_bp) { 14505 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14506 un->un_failfast_bp = NULL; 14507 } 14508 14509 /* 14510 * Clear the failfast state upon successful completion of ANY cmd. 14511 */ 14512 if (bp->b_error == 0) { 14513 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14514 } 14515 14516 /* 14517 * This is used if the command was retried one or more times. Show that 14518 * we are done with it, and allow processing of the waitq to resume. 14519 */ 14520 if (bp == un->un_retry_bp) { 14521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14522 "sd_return_command: un:0x%p: " 14523 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14524 un->un_retry_bp = NULL; 14525 un->un_retry_statp = NULL; 14526 } 14527 14528 SD_UPDATE_RDWR_STATS(un, bp); 14529 SD_UPDATE_PARTITION_STATS(un, bp); 14530 14531 switch (un->un_state) { 14532 case SD_STATE_SUSPENDED: 14533 /* 14534 * Notify any threads waiting in sd_ddi_suspend() that 14535 * a command completion has occurred. 14536 */ 14537 cv_broadcast(&un->un_disk_busy_cv); 14538 break; 14539 default: 14540 sd_start_cmds(un, NULL); 14541 break; 14542 } 14543 14544 /* Return this command up the iodone chain to its originator. */ 14545 mutex_exit(SD_MUTEX(un)); 14546 14547 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14548 xp->xb_pktp = NULL; 14549 14550 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14551 14552 ASSERT(!mutex_owned(SD_MUTEX(un))); 14553 mutex_enter(SD_MUTEX(un)); 14554 14555 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n"); 14556 } 14557 14558 14559 /* 14560 * Function: sd_return_failed_command 14561 * 14562 * Description: Command completion when an error occurred. 14563 * 14564 * Context: May be called from interrupt context 14565 */ 14566 14567 static void 14568 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode) 14569 { 14570 ASSERT(bp != NULL); 14571 ASSERT(un != NULL); 14572 ASSERT(mutex_owned(SD_MUTEX(un))); 14573 14574 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14575 "sd_return_failed_command: entry\n"); 14576 14577 /* 14578 * b_resid could already be nonzero due to a partial data 14579 * transfer, so do not change it here. 14580 */ 14581 SD_BIOERROR(bp, errcode); 14582 14583 sd_return_command(un, bp); 14584 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14585 "sd_return_failed_command: exit\n"); 14586 } 14587 14588 14589 /* 14590 * Function: sd_return_failed_command_no_restart 14591 * 14592 * Description: Same as sd_return_failed_command, but ensures that no 14593 * call back into sd_start_cmds will be issued. 14594 * 14595 * Context: May be called from interrupt context 14596 */ 14597 14598 static void 14599 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp, 14600 int errcode) 14601 { 14602 struct sd_xbuf *xp; 14603 14604 ASSERT(bp != NULL); 14605 ASSERT(un != NULL); 14606 ASSERT(mutex_owned(SD_MUTEX(un))); 14607 xp = SD_GET_XBUF(bp); 14608 ASSERT(xp != NULL); 14609 ASSERT(errcode != 0); 14610 14611 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14612 "sd_return_failed_command_no_restart: entry\n"); 14613 14614 /* 14615 * b_resid could already be nonzero due to a partial data 14616 * transfer, so do not change it here. 14617 */ 14618 SD_BIOERROR(bp, errcode); 14619 14620 /* 14621 * If this is the failfast bp, clear it. This can happen if the 14622 * failfast bp encounterd a fatal error when we attempted to 14623 * re-try it (such as a scsi_transport(9F) failure). However 14624 * we should NOT be in an active failfast state if the failfast 14625 * bp is not NULL. 14626 */ 14627 if (bp == un->un_failfast_bp) { 14628 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE); 14629 un->un_failfast_bp = NULL; 14630 } 14631 14632 if (bp == un->un_retry_bp) { 14633 /* 14634 * This command was retried one or more times. Show that we are 14635 * done with it, and allow processing of the waitq to resume. 14636 */ 14637 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14638 "sd_return_failed_command_no_restart: " 14639 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp); 14640 un->un_retry_bp = NULL; 14641 un->un_retry_statp = NULL; 14642 } 14643 14644 SD_UPDATE_RDWR_STATS(un, bp); 14645 SD_UPDATE_PARTITION_STATS(un, bp); 14646 14647 mutex_exit(SD_MUTEX(un)); 14648 14649 if (xp->xb_pktp != NULL) { 14650 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp); 14651 xp->xb_pktp = NULL; 14652 } 14653 14654 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp); 14655 14656 mutex_enter(SD_MUTEX(un)); 14657 14658 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14659 "sd_return_failed_command_no_restart: exit\n"); 14660 } 14661 14662 14663 /* 14664 * Function: sd_retry_command 14665 * 14666 * Description: queue up a command for retry, or (optionally) fail it 14667 * if retry counts are exhausted. 14668 * 14669 * Arguments: un - Pointer to the sd_lun struct for the target. 14670 * 14671 * bp - Pointer to the buf for the command to be retried. 14672 * 14673 * retry_check_flag - Flag to see which (if any) of the retry 14674 * counts should be decremented/checked. If the indicated 14675 * retry count is exhausted, then the command will not be 14676 * retried; it will be failed instead. This should use a 14677 * value equal to one of the following: 14678 * 14679 * SD_RETRIES_NOCHECK 14680 * SD_RESD_RETRIES_STANDARD 14681 * SD_RETRIES_VICTIM 14682 * 14683 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE 14684 * if the check should be made to see of FLAG_ISOLATE is set 14685 * in the pkt. If FLAG_ISOLATE is set, then the command is 14686 * not retried, it is simply failed. 14687 * 14688 * user_funcp - Ptr to function to call before dispatching the 14689 * command. May be NULL if no action needs to be performed. 14690 * (Primarily intended for printing messages.) 14691 * 14692 * user_arg - Optional argument to be passed along to 14693 * the user_funcp call. 14694 * 14695 * failure_code - errno return code to set in the bp if the 14696 * command is going to be failed. 14697 * 14698 * retry_delay - Retry delay interval in (clock_t) units. May 14699 * be zero which indicates that the retry should be retried 14700 * immediately (ie, without an intervening delay). 14701 * 14702 * statp - Ptr to kstat function to be updated if the command 14703 * is queued for a delayed retry. May be NULL if no kstat 14704 * update is desired. 14705 * 14706 * Context: May be called from interupt context. 14707 */ 14708 14709 static void 14710 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag, 14711 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int 14712 code), void *user_arg, int failure_code, clock_t retry_delay, 14713 void (*statp)(kstat_io_t *)) 14714 { 14715 struct sd_xbuf *xp; 14716 struct scsi_pkt *pktp; 14717 14718 ASSERT(un != NULL); 14719 ASSERT(mutex_owned(SD_MUTEX(un))); 14720 ASSERT(bp != NULL); 14721 xp = SD_GET_XBUF(bp); 14722 ASSERT(xp != NULL); 14723 pktp = SD_GET_PKTP(bp); 14724 ASSERT(pktp != NULL); 14725 14726 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 14727 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp); 14728 14729 /* 14730 * If we are syncing or dumping, fail the command to avoid 14731 * recursively calling back into scsi_transport(). 14732 */ 14733 if (ddi_in_panic()) { 14734 goto fail_command_no_log; 14735 } 14736 14737 /* 14738 * We should never be be retrying a command with FLAG_DIAGNOSE set, so 14739 * log an error and fail the command. 14740 */ 14741 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 14742 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 14743 "ERROR, retrying FLAG_DIAGNOSE command.\n"); 14744 sd_dump_memory(un, SD_LOG_IO, "CDB", 14745 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 14746 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 14747 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 14748 goto fail_command; 14749 } 14750 14751 /* 14752 * If we are suspended, then put the command onto head of the 14753 * wait queue since we don't want to start more commands. 14754 */ 14755 switch (un->un_state) { 14756 case SD_STATE_SUSPENDED: 14757 case SD_STATE_DUMPING: 14758 bp->av_forw = un->un_waitq_headp; 14759 un->un_waitq_headp = bp; 14760 if (un->un_waitq_tailp == NULL) { 14761 un->un_waitq_tailp = bp; 14762 } 14763 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp); 14764 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: " 14765 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp); 14766 return; 14767 default: 14768 break; 14769 } 14770 14771 /* 14772 * If the caller wants us to check FLAG_ISOLATE, then see if that 14773 * is set; if it is then we do not want to retry the command. 14774 * Normally, FLAG_ISOLATE is only used with USCSI cmds. 14775 */ 14776 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) { 14777 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) { 14778 goto fail_command; 14779 } 14780 } 14781 14782 14783 /* 14784 * If SD_RETRIES_FAILFAST is set, it indicates that either a 14785 * command timeout or a selection timeout has occurred. This means 14786 * that we were unable to establish an kind of communication with 14787 * the target, and subsequent retries and/or commands are likely 14788 * to encounter similar results and take a long time to complete. 14789 * 14790 * If this is a failfast error condition, we need to update the 14791 * failfast state, even if this bp does not have B_FAILFAST set. 14792 */ 14793 if (retry_check_flag & SD_RETRIES_FAILFAST) { 14794 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) { 14795 ASSERT(un->un_failfast_bp == NULL); 14796 /* 14797 * If we are already in the active failfast state, and 14798 * another failfast error condition has been detected, 14799 * then fail this command if it has B_FAILFAST set. 14800 * If B_FAILFAST is clear, then maintain the legacy 14801 * behavior of retrying heroically, even tho this will 14802 * take a lot more time to fail the command. 14803 */ 14804 if (bp->b_flags & B_FAILFAST) { 14805 goto fail_command; 14806 } 14807 } else { 14808 /* 14809 * We're not in the active failfast state, but we 14810 * have a failfast error condition, so we must begin 14811 * transition to the next state. We do this regardless 14812 * of whether or not this bp has B_FAILFAST set. 14813 */ 14814 if (un->un_failfast_bp == NULL) { 14815 /* 14816 * This is the first bp to meet a failfast 14817 * condition so save it on un_failfast_bp & 14818 * do normal retry processing. Do not enter 14819 * active failfast state yet. This marks 14820 * entry into the "failfast pending" state. 14821 */ 14822 un->un_failfast_bp = bp; 14823 14824 } else if (un->un_failfast_bp == bp) { 14825 /* 14826 * This is the second time *this* bp has 14827 * encountered a failfast error condition, 14828 * so enter active failfast state & flush 14829 * queues as appropriate. 14830 */ 14831 un->un_failfast_state = SD_FAILFAST_ACTIVE; 14832 un->un_failfast_bp = NULL; 14833 sd_failfast_flushq(un); 14834 14835 /* 14836 * Fail this bp now if B_FAILFAST set; 14837 * otherwise continue with retries. (It would 14838 * be pretty ironic if this bp succeeded on a 14839 * subsequent retry after we just flushed all 14840 * the queues). 14841 */ 14842 if (bp->b_flags & B_FAILFAST) { 14843 goto fail_command; 14844 } 14845 14846 #if !defined(lint) && !defined(__lint) 14847 } else { 14848 /* 14849 * If neither of the preceeding conditionals 14850 * was true, it means that there is some 14851 * *other* bp that has met an inital failfast 14852 * condition and is currently either being 14853 * retried or is waiting to be retried. In 14854 * that case we should perform normal retry 14855 * processing on *this* bp, since there is a 14856 * chance that the current failfast condition 14857 * is transient and recoverable. If that does 14858 * not turn out to be the case, then retries 14859 * will be cleared when the wait queue is 14860 * flushed anyway. 14861 */ 14862 #endif 14863 } 14864 } 14865 } else { 14866 /* 14867 * SD_RETRIES_FAILFAST is clear, which indicates that we 14868 * likely were able to at least establish some level of 14869 * communication with the target and subsequent commands 14870 * and/or retries are likely to get through to the target, 14871 * In this case we want to be aggressive about clearing 14872 * the failfast state. Note that this does not affect 14873 * the "failfast pending" condition. 14874 */ 14875 un->un_failfast_state = SD_FAILFAST_INACTIVE; 14876 } 14877 14878 14879 /* 14880 * Check the specified retry count to see if we can still do 14881 * any retries with this pkt before we should fail it. 14882 */ 14883 switch (retry_check_flag & SD_RETRIES_MASK) { 14884 case SD_RETRIES_VICTIM: 14885 /* 14886 * Check the victim retry count. If exhausted, then fall 14887 * thru & check against the standard retry count. 14888 */ 14889 if (xp->xb_victim_retry_count < un->un_victim_retry_count) { 14890 /* Increment count & proceed with the retry */ 14891 xp->xb_victim_retry_count++; 14892 break; 14893 } 14894 /* Victim retries exhausted, fall back to std. retries... */ 14895 /* FALLTHRU */ 14896 14897 case SD_RETRIES_STANDARD: 14898 if (xp->xb_retry_count >= un->un_retry_count) { 14899 /* Retries exhausted, fail the command */ 14900 SD_TRACE(SD_LOG_IO_CORE, un, 14901 "sd_retry_command: retries exhausted!\n"); 14902 /* 14903 * update b_resid for failed SCMD_READ & SCMD_WRITE 14904 * commands with nonzero pkt_resid. 14905 */ 14906 if ((pktp->pkt_reason == CMD_CMPLT) && 14907 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) && 14908 (pktp->pkt_resid != 0)) { 14909 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F; 14910 if ((op == SCMD_READ) || (op == SCMD_WRITE)) { 14911 SD_UPDATE_B_RESID(bp, pktp); 14912 } 14913 } 14914 goto fail_command; 14915 } 14916 xp->xb_retry_count++; 14917 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14918 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14919 break; 14920 14921 case SD_RETRIES_UA: 14922 if (xp->xb_ua_retry_count >= sd_ua_retry_count) { 14923 /* Retries exhausted, fail the command */ 14924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 14925 "Unit Attention retries exhausted. " 14926 "Check the target.\n"); 14927 goto fail_command; 14928 } 14929 xp->xb_ua_retry_count++; 14930 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14931 "sd_retry_command: retry count:%d\n", 14932 xp->xb_ua_retry_count); 14933 break; 14934 14935 case SD_RETRIES_BUSY: 14936 if (xp->xb_retry_count >= un->un_busy_retry_count) { 14937 /* Retries exhausted, fail the command */ 14938 SD_TRACE(SD_LOG_IO_CORE, un, 14939 "sd_retry_command: retries exhausted!\n"); 14940 goto fail_command; 14941 } 14942 xp->xb_retry_count++; 14943 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14944 "sd_retry_command: retry count:%d\n", xp->xb_retry_count); 14945 break; 14946 14947 case SD_RETRIES_NOCHECK: 14948 default: 14949 /* No retry count to check. Just proceed with the retry */ 14950 break; 14951 } 14952 14953 xp->xb_pktp->pkt_flags |= FLAG_HEAD; 14954 14955 /* 14956 * If we were given a zero timeout, we must attempt to retry the 14957 * command immediately (ie, without a delay). 14958 */ 14959 if (retry_delay == 0) { 14960 /* 14961 * Check some limiting conditions to see if we can actually 14962 * do the immediate retry. If we cannot, then we must 14963 * fall back to queueing up a delayed retry. 14964 */ 14965 if (un->un_ncmds_in_transport >= un->un_throttle) { 14966 /* 14967 * We are at the throttle limit for the target, 14968 * fall back to delayed retry. 14969 */ 14970 retry_delay = SD_BSY_TIMEOUT; 14971 statp = kstat_waitq_enter; 14972 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14973 "sd_retry_command: immed. retry hit throttle!\n"); 14974 } else { 14975 /* 14976 * We're clear to proceed with the immediate retry. 14977 * First call the user-provided function (if any) 14978 */ 14979 if (user_funcp != NULL) { 14980 (*user_funcp)(un, bp, user_arg, 14981 SD_IMMEDIATE_RETRY_ISSUED); 14982 } 14983 14984 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14985 "sd_retry_command: issuing immediate retry\n"); 14986 14987 /* 14988 * Call sd_start_cmds() to transport the command to 14989 * the target. 14990 */ 14991 sd_start_cmds(un, bp); 14992 14993 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 14994 "sd_retry_command exit\n"); 14995 return; 14996 } 14997 } 14998 14999 /* 15000 * Set up to retry the command after a delay. 15001 * First call the user-provided function (if any) 15002 */ 15003 if (user_funcp != NULL) { 15004 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED); 15005 } 15006 15007 sd_set_retry_bp(un, bp, retry_delay, statp); 15008 15009 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15010 return; 15011 15012 fail_command: 15013 15014 if (user_funcp != NULL) { 15015 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED); 15016 } 15017 15018 fail_command_no_log: 15019 15020 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15021 "sd_retry_command: returning failed command\n"); 15022 15023 sd_return_failed_command(un, bp, failure_code); 15024 15025 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n"); 15026 } 15027 15028 15029 /* 15030 * Function: sd_set_retry_bp 15031 * 15032 * Description: Set up the given bp for retry. 15033 * 15034 * Arguments: un - ptr to associated softstate 15035 * bp - ptr to buf(9S) for the command 15036 * retry_delay - time interval before issuing retry (may be 0) 15037 * statp - optional pointer to kstat function 15038 * 15039 * Context: May be called under interrupt context 15040 */ 15041 15042 static void 15043 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay, 15044 void (*statp)(kstat_io_t *)) 15045 { 15046 ASSERT(un != NULL); 15047 ASSERT(mutex_owned(SD_MUTEX(un))); 15048 ASSERT(bp != NULL); 15049 15050 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15051 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp); 15052 15053 /* 15054 * Indicate that the command is being retried. This will not allow any 15055 * other commands on the wait queue to be transported to the target 15056 * until this command has been completed (success or failure). The 15057 * "retry command" is not transported to the target until the given 15058 * time delay expires, unless the user specified a 0 retry_delay. 15059 * 15060 * Note: the timeout(9F) callback routine is what actually calls 15061 * sd_start_cmds() to transport the command, with the exception of a 15062 * zero retry_delay. The only current implementor of a zero retry delay 15063 * is the case where a START_STOP_UNIT is sent to spin-up a device. 15064 */ 15065 if (un->un_retry_bp == NULL) { 15066 ASSERT(un->un_retry_statp == NULL); 15067 un->un_retry_bp = bp; 15068 15069 /* 15070 * If the user has not specified a delay the command should 15071 * be queued and no timeout should be scheduled. 15072 */ 15073 if (retry_delay == 0) { 15074 /* 15075 * Save the kstat pointer that will be used in the 15076 * call to SD_UPDATE_KSTATS() below, so that 15077 * sd_start_cmds() can correctly decrement the waitq 15078 * count when it is time to transport this command. 15079 */ 15080 un->un_retry_statp = statp; 15081 goto done; 15082 } 15083 } 15084 15085 if (un->un_retry_bp == bp) { 15086 /* 15087 * Save the kstat pointer that will be used in the call to 15088 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can 15089 * correctly decrement the waitq count when it is time to 15090 * transport this command. 15091 */ 15092 un->un_retry_statp = statp; 15093 15094 /* 15095 * Schedule a timeout if: 15096 * 1) The user has specified a delay. 15097 * 2) There is not a START_STOP_UNIT callback pending. 15098 * 15099 * If no delay has been specified, then it is up to the caller 15100 * to ensure that IO processing continues without stalling. 15101 * Effectively, this means that the caller will issue the 15102 * required call to sd_start_cmds(). The START_STOP_UNIT 15103 * callback does this after the START STOP UNIT command has 15104 * completed. In either of these cases we should not schedule 15105 * a timeout callback here. Also don't schedule the timeout if 15106 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart. 15107 */ 15108 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) && 15109 (un->un_direct_priority_timeid == NULL)) { 15110 un->un_retry_timeid = 15111 timeout(sd_start_retry_command, un, retry_delay); 15112 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15113 "sd_set_retry_bp: setting timeout: un: 0x%p" 15114 " bp:0x%p un_retry_timeid:0x%p\n", 15115 un, bp, un->un_retry_timeid); 15116 } 15117 } else { 15118 /* 15119 * We only get in here if there is already another command 15120 * waiting to be retried. In this case, we just put the 15121 * given command onto the wait queue, so it can be transported 15122 * after the current retry command has completed. 15123 * 15124 * Also we have to make sure that if the command at the head 15125 * of the wait queue is the un_failfast_bp, that we do not 15126 * put ahead of it any other commands that are to be retried. 15127 */ 15128 if ((un->un_failfast_bp != NULL) && 15129 (un->un_failfast_bp == un->un_waitq_headp)) { 15130 /* 15131 * Enqueue this command AFTER the first command on 15132 * the wait queue (which is also un_failfast_bp). 15133 */ 15134 bp->av_forw = un->un_waitq_headp->av_forw; 15135 un->un_waitq_headp->av_forw = bp; 15136 if (un->un_waitq_headp == un->un_waitq_tailp) { 15137 un->un_waitq_tailp = bp; 15138 } 15139 } else { 15140 /* Enqueue this command at the head of the waitq. */ 15141 bp->av_forw = un->un_waitq_headp; 15142 un->un_waitq_headp = bp; 15143 if (un->un_waitq_tailp == NULL) { 15144 un->un_waitq_tailp = bp; 15145 } 15146 } 15147 15148 if (statp == NULL) { 15149 statp = kstat_waitq_enter; 15150 } 15151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15152 "sd_set_retry_bp: un:0x%p already delayed retry\n", un); 15153 } 15154 15155 done: 15156 if (statp != NULL) { 15157 SD_UPDATE_KSTATS(un, statp, bp); 15158 } 15159 15160 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15161 "sd_set_retry_bp: exit un:0x%p\n", un); 15162 } 15163 15164 15165 /* 15166 * Function: sd_start_retry_command 15167 * 15168 * Description: Start the command that has been waiting on the target's 15169 * retry queue. Called from timeout(9F) context after the 15170 * retry delay interval has expired. 15171 * 15172 * Arguments: arg - pointer to associated softstate for the device. 15173 * 15174 * Context: timeout(9F) thread context. May not sleep. 15175 */ 15176 15177 static void 15178 sd_start_retry_command(void *arg) 15179 { 15180 struct sd_lun *un = arg; 15181 15182 ASSERT(un != NULL); 15183 ASSERT(!mutex_owned(SD_MUTEX(un))); 15184 15185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15186 "sd_start_retry_command: entry\n"); 15187 15188 mutex_enter(SD_MUTEX(un)); 15189 15190 un->un_retry_timeid = NULL; 15191 15192 if (un->un_retry_bp != NULL) { 15193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15194 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n", 15195 un, un->un_retry_bp); 15196 sd_start_cmds(un, un->un_retry_bp); 15197 } 15198 15199 mutex_exit(SD_MUTEX(un)); 15200 15201 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15202 "sd_start_retry_command: exit\n"); 15203 } 15204 15205 15206 /* 15207 * Function: sd_start_direct_priority_command 15208 * 15209 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had 15210 * received TRAN_BUSY when we called scsi_transport() to send it 15211 * to the underlying HBA. This function is called from timeout(9F) 15212 * context after the delay interval has expired. 15213 * 15214 * Arguments: arg - pointer to associated buf(9S) to be restarted. 15215 * 15216 * Context: timeout(9F) thread context. May not sleep. 15217 */ 15218 15219 static void 15220 sd_start_direct_priority_command(void *arg) 15221 { 15222 struct buf *priority_bp = arg; 15223 struct sd_lun *un; 15224 15225 ASSERT(priority_bp != NULL); 15226 un = SD_GET_UN(priority_bp); 15227 ASSERT(un != NULL); 15228 ASSERT(!mutex_owned(SD_MUTEX(un))); 15229 15230 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15231 "sd_start_direct_priority_command: entry\n"); 15232 15233 mutex_enter(SD_MUTEX(un)); 15234 un->un_direct_priority_timeid = NULL; 15235 sd_start_cmds(un, priority_bp); 15236 mutex_exit(SD_MUTEX(un)); 15237 15238 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15239 "sd_start_direct_priority_command: exit\n"); 15240 } 15241 15242 15243 /* 15244 * Function: sd_send_request_sense_command 15245 * 15246 * Description: Sends a REQUEST SENSE command to the target 15247 * 15248 * Context: May be called from interrupt context. 15249 */ 15250 15251 static void 15252 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp, 15253 struct scsi_pkt *pktp) 15254 { 15255 ASSERT(bp != NULL); 15256 ASSERT(un != NULL); 15257 ASSERT(mutex_owned(SD_MUTEX(un))); 15258 15259 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: " 15260 "entry: buf:0x%p\n", bp); 15261 15262 /* 15263 * If we are syncing or dumping, then fail the command to avoid a 15264 * recursive callback into scsi_transport(). Also fail the command 15265 * if we are suspended (legacy behavior). 15266 */ 15267 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) || 15268 (un->un_state == SD_STATE_DUMPING)) { 15269 sd_return_failed_command(un, bp, EIO); 15270 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15271 "sd_send_request_sense_command: syncing/dumping, exit\n"); 15272 return; 15273 } 15274 15275 /* 15276 * Retry the failed command and don't issue the request sense if: 15277 * 1) the sense buf is busy 15278 * 2) we have 1 or more outstanding commands on the target 15279 * (the sense data will be cleared or invalidated any way) 15280 * 15281 * Note: There could be an issue with not checking a retry limit here, 15282 * the problem is determining which retry limit to check. 15283 */ 15284 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) { 15285 /* Don't retry if the command is flagged as non-retryable */ 15286 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 15287 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, 15288 NULL, NULL, 0, SD_BSY_TIMEOUT, kstat_waitq_enter); 15289 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15290 "sd_send_request_sense_command: " 15291 "at full throttle, retrying exit\n"); 15292 } else { 15293 sd_return_failed_command(un, bp, EIO); 15294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15295 "sd_send_request_sense_command: " 15296 "at full throttle, non-retryable exit\n"); 15297 } 15298 return; 15299 } 15300 15301 sd_mark_rqs_busy(un, bp); 15302 sd_start_cmds(un, un->un_rqs_bp); 15303 15304 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15305 "sd_send_request_sense_command: exit\n"); 15306 } 15307 15308 15309 /* 15310 * Function: sd_mark_rqs_busy 15311 * 15312 * Description: Indicate that the request sense bp for this instance is 15313 * in use. 15314 * 15315 * Context: May be called under interrupt context 15316 */ 15317 15318 static void 15319 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp) 15320 { 15321 struct sd_xbuf *sense_xp; 15322 15323 ASSERT(un != NULL); 15324 ASSERT(bp != NULL); 15325 ASSERT(mutex_owned(SD_MUTEX(un))); 15326 ASSERT(un->un_sense_isbusy == 0); 15327 15328 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: " 15329 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un); 15330 15331 sense_xp = SD_GET_XBUF(un->un_rqs_bp); 15332 ASSERT(sense_xp != NULL); 15333 15334 SD_INFO(SD_LOG_IO, un, 15335 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp); 15336 15337 ASSERT(sense_xp->xb_pktp != NULL); 15338 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) 15339 == (FLAG_SENSING | FLAG_HEAD)); 15340 15341 un->un_sense_isbusy = 1; 15342 un->un_rqs_bp->b_resid = 0; 15343 sense_xp->xb_pktp->pkt_resid = 0; 15344 sense_xp->xb_pktp->pkt_reason = 0; 15345 15346 /* So we can get back the bp at interrupt time! */ 15347 sense_xp->xb_sense_bp = bp; 15348 15349 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH); 15350 15351 /* 15352 * Mark this buf as awaiting sense data. (This is already set in 15353 * the pkt_flags for the RQS packet.) 15354 */ 15355 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING; 15356 15357 sense_xp->xb_retry_count = 0; 15358 sense_xp->xb_victim_retry_count = 0; 15359 sense_xp->xb_ua_retry_count = 0; 15360 sense_xp->xb_dma_resid = 0; 15361 15362 /* Clean up the fields for auto-request sense */ 15363 sense_xp->xb_sense_status = 0; 15364 sense_xp->xb_sense_state = 0; 15365 sense_xp->xb_sense_resid = 0; 15366 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data)); 15367 15368 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n"); 15369 } 15370 15371 15372 /* 15373 * Function: sd_mark_rqs_idle 15374 * 15375 * Description: SD_MUTEX must be held continuously through this routine 15376 * to prevent reuse of the rqs struct before the caller can 15377 * complete it's processing. 15378 * 15379 * Return Code: Pointer to the RQS buf 15380 * 15381 * Context: May be called under interrupt context 15382 */ 15383 15384 static struct buf * 15385 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp) 15386 { 15387 struct buf *bp; 15388 ASSERT(un != NULL); 15389 ASSERT(sense_xp != NULL); 15390 ASSERT(mutex_owned(SD_MUTEX(un))); 15391 ASSERT(un->un_sense_isbusy != 0); 15392 15393 un->un_sense_isbusy = 0; 15394 bp = sense_xp->xb_sense_bp; 15395 sense_xp->xb_sense_bp = NULL; 15396 15397 /* This pkt is no longer interested in getting sense data */ 15398 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING; 15399 15400 return (bp); 15401 } 15402 15403 15404 15405 /* 15406 * Function: sd_alloc_rqs 15407 * 15408 * Description: Set up the unit to receive auto request sense data 15409 * 15410 * Return Code: DDI_SUCCESS or DDI_FAILURE 15411 * 15412 * Context: Called under attach(9E) context 15413 */ 15414 15415 static int 15416 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un) 15417 { 15418 struct sd_xbuf *xp; 15419 15420 ASSERT(un != NULL); 15421 ASSERT(!mutex_owned(SD_MUTEX(un))); 15422 ASSERT(un->un_rqs_bp == NULL); 15423 ASSERT(un->un_rqs_pktp == NULL); 15424 15425 /* 15426 * First allocate the required buf and scsi_pkt structs, then set up 15427 * the CDB in the scsi_pkt for a REQUEST SENSE command. 15428 */ 15429 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL, 15430 SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL); 15431 if (un->un_rqs_bp == NULL) { 15432 return (DDI_FAILURE); 15433 } 15434 15435 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp, 15436 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL); 15437 15438 if (un->un_rqs_pktp == NULL) { 15439 sd_free_rqs(un); 15440 return (DDI_FAILURE); 15441 } 15442 15443 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */ 15444 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp, 15445 SCMD_REQUEST_SENSE, 0, SENSE_LENGTH, 0); 15446 15447 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp); 15448 15449 /* Set up the other needed members in the ARQ scsi_pkt. */ 15450 un->un_rqs_pktp->pkt_comp = sdintr; 15451 un->un_rqs_pktp->pkt_time = sd_io_time; 15452 un->un_rqs_pktp->pkt_flags |= 15453 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */ 15454 15455 /* 15456 * Allocate & init the sd_xbuf struct for the RQS command. Do not 15457 * provide any intpkt, destroypkt routines as we take care of 15458 * scsi_pkt allocation/freeing here and in sd_free_rqs(). 15459 */ 15460 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP); 15461 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL); 15462 xp->xb_pktp = un->un_rqs_pktp; 15463 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15464 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n", 15465 un, xp, un->un_rqs_pktp, un->un_rqs_bp); 15466 15467 /* 15468 * Save the pointer to the request sense private bp so it can 15469 * be retrieved in sdintr. 15470 */ 15471 un->un_rqs_pktp->pkt_private = un->un_rqs_bp; 15472 ASSERT(un->un_rqs_bp->b_private == xp); 15473 15474 /* 15475 * See if the HBA supports auto-request sense for the specified 15476 * target/lun. If it does, then try to enable it (if not already 15477 * enabled). 15478 * 15479 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return 15480 * failure, while for other HBAs (pln) scsi_ifsetcap will always 15481 * return success. However, in both of these cases ARQ is always 15482 * enabled and scsi_ifgetcap will always return true. The best approach 15483 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap(). 15484 * 15485 * The 3rd case is the HBA (adp) always return enabled on 15486 * scsi_ifgetgetcap even when it's not enable, the best approach 15487 * is issue a scsi_ifsetcap then a scsi_ifgetcap 15488 * Note: this case is to circumvent the Adaptec bug. (x86 only) 15489 */ 15490 15491 if (un->un_f_is_fibre == TRUE) { 15492 un->un_f_arq_enabled = TRUE; 15493 } else { 15494 #if defined(__i386) || defined(__amd64) 15495 /* 15496 * Circumvent the Adaptec bug, remove this code when 15497 * the bug is fixed 15498 */ 15499 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1); 15500 #endif 15501 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) { 15502 case 0: 15503 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15504 "sd_alloc_rqs: HBA supports ARQ\n"); 15505 /* 15506 * ARQ is supported by this HBA but currently is not 15507 * enabled. Attempt to enable it and if successful then 15508 * mark this instance as ARQ enabled. 15509 */ 15510 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1) 15511 == 1) { 15512 /* Successfully enabled ARQ in the HBA */ 15513 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15514 "sd_alloc_rqs: ARQ enabled\n"); 15515 un->un_f_arq_enabled = TRUE; 15516 } else { 15517 /* Could not enable ARQ in the HBA */ 15518 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15519 "sd_alloc_rqs: failed ARQ enable\n"); 15520 un->un_f_arq_enabled = FALSE; 15521 } 15522 break; 15523 case 1: 15524 /* 15525 * ARQ is supported by this HBA and is already enabled. 15526 * Just mark ARQ as enabled for this instance. 15527 */ 15528 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15529 "sd_alloc_rqs: ARQ already enabled\n"); 15530 un->un_f_arq_enabled = TRUE; 15531 break; 15532 default: 15533 /* 15534 * ARQ is not supported by this HBA; disable it for this 15535 * instance. 15536 */ 15537 SD_INFO(SD_LOG_ATTACH_DETACH, un, 15538 "sd_alloc_rqs: HBA does not support ARQ\n"); 15539 un->un_f_arq_enabled = FALSE; 15540 break; 15541 } 15542 } 15543 15544 return (DDI_SUCCESS); 15545 } 15546 15547 15548 /* 15549 * Function: sd_free_rqs 15550 * 15551 * Description: Cleanup for the pre-instance RQS command. 15552 * 15553 * Context: Kernel thread context 15554 */ 15555 15556 static void 15557 sd_free_rqs(struct sd_lun *un) 15558 { 15559 ASSERT(un != NULL); 15560 15561 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n"); 15562 15563 /* 15564 * If consistent memory is bound to a scsi_pkt, the pkt 15565 * has to be destroyed *before* freeing the consistent memory. 15566 * Don't change the sequence of this operations. 15567 * scsi_destroy_pkt() might access memory, which isn't allowed, 15568 * after it was freed in scsi_free_consistent_buf(). 15569 */ 15570 if (un->un_rqs_pktp != NULL) { 15571 scsi_destroy_pkt(un->un_rqs_pktp); 15572 un->un_rqs_pktp = NULL; 15573 } 15574 15575 if (un->un_rqs_bp != NULL) { 15576 kmem_free(SD_GET_XBUF(un->un_rqs_bp), sizeof (struct sd_xbuf)); 15577 scsi_free_consistent_buf(un->un_rqs_bp); 15578 un->un_rqs_bp = NULL; 15579 } 15580 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n"); 15581 } 15582 15583 15584 15585 /* 15586 * Function: sd_reduce_throttle 15587 * 15588 * Description: Reduces the maximun # of outstanding commands on a 15589 * target to the current number of outstanding commands. 15590 * Queues a tiemout(9F) callback to restore the limit 15591 * after a specified interval has elapsed. 15592 * Typically used when we get a TRAN_BUSY return code 15593 * back from scsi_transport(). 15594 * 15595 * Arguments: un - ptr to the sd_lun softstate struct 15596 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL 15597 * 15598 * Context: May be called from interrupt context 15599 */ 15600 15601 static void 15602 sd_reduce_throttle(struct sd_lun *un, int throttle_type) 15603 { 15604 ASSERT(un != NULL); 15605 ASSERT(mutex_owned(SD_MUTEX(un))); 15606 ASSERT(un->un_ncmds_in_transport >= 0); 15607 15608 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15609 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n", 15610 un, un->un_throttle, un->un_ncmds_in_transport); 15611 15612 if (un->un_throttle > 1) { 15613 if (un->un_f_use_adaptive_throttle == TRUE) { 15614 switch (throttle_type) { 15615 case SD_THROTTLE_TRAN_BUSY: 15616 if (un->un_busy_throttle == 0) { 15617 un->un_busy_throttle = un->un_throttle; 15618 } 15619 break; 15620 case SD_THROTTLE_QFULL: 15621 un->un_busy_throttle = 0; 15622 break; 15623 default: 15624 ASSERT(FALSE); 15625 } 15626 15627 if (un->un_ncmds_in_transport > 0) { 15628 un->un_throttle = un->un_ncmds_in_transport; 15629 } 15630 } else { 15631 if (un->un_ncmds_in_transport == 0) { 15632 un->un_throttle = 1; 15633 } else { 15634 un->un_throttle = un->un_ncmds_in_transport; 15635 } 15636 } 15637 } 15638 15639 /* Reschedule the timeout if none is currently active */ 15640 if (un->un_reset_throttle_timeid == NULL) { 15641 un->un_reset_throttle_timeid = timeout(sd_restore_throttle, 15642 un, sd_reset_throttle_timeout); 15643 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15644 "sd_reduce_throttle: timeout scheduled!\n"); 15645 } 15646 15647 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: " 15648 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15649 } 15650 15651 15652 15653 /* 15654 * Function: sd_restore_throttle 15655 * 15656 * Description: Callback function for timeout(9F). Resets the current 15657 * value of un->un_throttle to its default. 15658 * 15659 * Arguments: arg - pointer to associated softstate for the device. 15660 * 15661 * Context: May be called from interrupt context 15662 */ 15663 15664 static void 15665 sd_restore_throttle(void *arg) 15666 { 15667 struct sd_lun *un = arg; 15668 15669 ASSERT(un != NULL); 15670 ASSERT(!mutex_owned(SD_MUTEX(un))); 15671 15672 mutex_enter(SD_MUTEX(un)); 15673 15674 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15675 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle); 15676 15677 un->un_reset_throttle_timeid = NULL; 15678 15679 if (un->un_f_use_adaptive_throttle == TRUE) { 15680 /* 15681 * If un_busy_throttle is nonzero, then it contains the 15682 * value that un_throttle was when we got a TRAN_BUSY back 15683 * from scsi_transport(). We want to revert back to this 15684 * value. 15685 */ 15686 if (un->un_busy_throttle > 0) { 15687 un->un_throttle = un->un_busy_throttle; 15688 un->un_busy_throttle = 0; 15689 } 15690 15691 /* 15692 * If un_throttle has fallen below the low-water mark, we 15693 * restore the maximum value here (and allow it to ratchet 15694 * down again if necessary). 15695 */ 15696 if (un->un_throttle < un->un_min_throttle) { 15697 un->un_throttle = un->un_saved_throttle; 15698 } 15699 } else { 15700 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: " 15701 "restoring limit from 0x%x to 0x%x\n", 15702 un->un_throttle, un->un_saved_throttle); 15703 un->un_throttle = un->un_saved_throttle; 15704 } 15705 15706 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15707 "sd_restore_throttle: calling sd_start_cmds!\n"); 15708 15709 sd_start_cmds(un, NULL); 15710 15711 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, 15712 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n", 15713 un, un->un_throttle); 15714 15715 mutex_exit(SD_MUTEX(un)); 15716 15717 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n"); 15718 } 15719 15720 /* 15721 * Function: sdrunout 15722 * 15723 * Description: Callback routine for scsi_init_pkt when a resource allocation 15724 * fails. 15725 * 15726 * Arguments: arg - a pointer to the sd_lun unit struct for the particular 15727 * soft state instance. 15728 * 15729 * Return Code: The scsi_init_pkt routine allows for the callback function to 15730 * return a 0 indicating the callback should be rescheduled or a 1 15731 * indicating not to reschedule. This routine always returns 1 15732 * because the driver always provides a callback function to 15733 * scsi_init_pkt. This results in a callback always being scheduled 15734 * (via the scsi_init_pkt callback implementation) if a resource 15735 * failure occurs. 15736 * 15737 * Context: This callback function may not block or call routines that block 15738 * 15739 * Note: Using the scsi_init_pkt callback facility can result in an I/O 15740 * request persisting at the head of the list which cannot be 15741 * satisfied even after multiple retries. In the future the driver 15742 * may implement some time of maximum runout count before failing 15743 * an I/O. 15744 */ 15745 15746 static int 15747 sdrunout(caddr_t arg) 15748 { 15749 struct sd_lun *un = (struct sd_lun *)arg; 15750 15751 ASSERT(un != NULL); 15752 ASSERT(!mutex_owned(SD_MUTEX(un))); 15753 15754 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n"); 15755 15756 mutex_enter(SD_MUTEX(un)); 15757 sd_start_cmds(un, NULL); 15758 mutex_exit(SD_MUTEX(un)); 15759 /* 15760 * This callback routine always returns 1 (i.e. do not reschedule) 15761 * because we always specify sdrunout as the callback handler for 15762 * scsi_init_pkt inside the call to sd_start_cmds. 15763 */ 15764 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n"); 15765 return (1); 15766 } 15767 15768 15769 /* 15770 * Function: sdintr 15771 * 15772 * Description: Completion callback routine for scsi_pkt(9S) structs 15773 * sent to the HBA driver via scsi_transport(9F). 15774 * 15775 * Context: Interrupt context 15776 */ 15777 15778 static void 15779 sdintr(struct scsi_pkt *pktp) 15780 { 15781 struct buf *bp; 15782 struct sd_xbuf *xp; 15783 struct sd_lun *un; 15784 15785 ASSERT(pktp != NULL); 15786 bp = (struct buf *)pktp->pkt_private; 15787 ASSERT(bp != NULL); 15788 xp = SD_GET_XBUF(bp); 15789 ASSERT(xp != NULL); 15790 ASSERT(xp->xb_pktp != NULL); 15791 un = SD_GET_UN(bp); 15792 ASSERT(un != NULL); 15793 ASSERT(!mutex_owned(SD_MUTEX(un))); 15794 15795 #ifdef SD_FAULT_INJECTION 15796 15797 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n"); 15798 /* SD FaultInjection */ 15799 sd_faultinjection(pktp); 15800 15801 #endif /* SD_FAULT_INJECTION */ 15802 15803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p," 15804 " xp:0x%p, un:0x%p\n", bp, xp, un); 15805 15806 mutex_enter(SD_MUTEX(un)); 15807 15808 /* Reduce the count of the #commands currently in transport */ 15809 un->un_ncmds_in_transport--; 15810 ASSERT(un->un_ncmds_in_transport >= 0); 15811 15812 /* Increment counter to indicate that the callback routine is active */ 15813 un->un_in_callback++; 15814 15815 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp); 15816 15817 #ifdef SDDEBUG 15818 if (bp == un->un_retry_bp) { 15819 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: " 15820 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n", 15821 un, un->un_retry_bp, un->un_ncmds_in_transport); 15822 } 15823 #endif 15824 15825 /* 15826 * If pkt_reason is CMD_DEV_GONE, just fail the command 15827 */ 15828 if (pktp->pkt_reason == CMD_DEV_GONE) { 15829 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 15830 "Device is gone\n"); 15831 sd_return_failed_command(un, bp, EIO); 15832 goto exit; 15833 } 15834 15835 /* 15836 * First see if the pkt has auto-request sense data with it.... 15837 * Look at the packet state first so we don't take a performance 15838 * hit looking at the arq enabled flag unless absolutely necessary. 15839 */ 15840 if ((pktp->pkt_state & STATE_ARQ_DONE) && 15841 (un->un_f_arq_enabled == TRUE)) { 15842 /* 15843 * The HBA did an auto request sense for this command so check 15844 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15845 * driver command that should not be retried. 15846 */ 15847 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15848 /* 15849 * Save the relevant sense info into the xp for the 15850 * original cmd. 15851 */ 15852 struct scsi_arq_status *asp; 15853 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 15854 xp->xb_sense_status = 15855 *((uchar_t *)(&(asp->sts_rqpkt_status))); 15856 xp->xb_sense_state = asp->sts_rqpkt_state; 15857 xp->xb_sense_resid = asp->sts_rqpkt_resid; 15858 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 15859 min(sizeof (struct scsi_extended_sense), 15860 SENSE_LENGTH)); 15861 15862 /* fail the command */ 15863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15864 "sdintr: arq done and FLAG_DIAGNOSE set\n"); 15865 sd_return_failed_command(un, bp, EIO); 15866 goto exit; 15867 } 15868 15869 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15870 /* 15871 * We want to either retry or fail this command, so free 15872 * the DMA resources here. If we retry the command then 15873 * the DMA resources will be reallocated in sd_start_cmds(). 15874 * Note that when PKT_DMA_PARTIAL is used, this reallocation 15875 * causes the *entire* transfer to start over again from the 15876 * beginning of the request, even for PARTIAL chunks that 15877 * have already transferred successfully. 15878 */ 15879 if ((un->un_f_is_fibre == TRUE) && 15880 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15881 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15882 scsi_dmafree(pktp); 15883 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15884 } 15885 #endif 15886 15887 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15888 "sdintr: arq done, sd_handle_auto_request_sense\n"); 15889 15890 sd_handle_auto_request_sense(un, bp, xp, pktp); 15891 goto exit; 15892 } 15893 15894 /* Next see if this is the REQUEST SENSE pkt for the instance */ 15895 if (pktp->pkt_flags & FLAG_SENSING) { 15896 /* This pktp is from the unit's REQUEST_SENSE command */ 15897 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15898 "sdintr: sd_handle_request_sense\n"); 15899 sd_handle_request_sense(un, bp, xp, pktp); 15900 goto exit; 15901 } 15902 15903 /* 15904 * Check to see if the command successfully completed as requested; 15905 * this is the most common case (and also the hot performance path). 15906 * 15907 * Requirements for successful completion are: 15908 * pkt_reason is CMD_CMPLT and packet status is status good. 15909 * In addition: 15910 * - A residual of zero indicates successful completion no matter what 15911 * the command is. 15912 * - If the residual is not zero and the command is not a read or 15913 * write, then it's still defined as successful completion. In other 15914 * words, if the command is a read or write the residual must be 15915 * zero for successful completion. 15916 * - If the residual is not zero and the command is a read or 15917 * write, and it's a USCSICMD, then it's still defined as 15918 * successful completion. 15919 */ 15920 if ((pktp->pkt_reason == CMD_CMPLT) && 15921 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) { 15922 15923 /* 15924 * Since this command is returned with a good status, we 15925 * can reset the count for Sonoma failover. 15926 */ 15927 un->un_sonoma_failure_count = 0; 15928 15929 /* 15930 * Return all USCSI commands on good status 15931 */ 15932 if (pktp->pkt_resid == 0) { 15933 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15934 "sdintr: returning command for resid == 0\n"); 15935 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) && 15936 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) { 15937 SD_UPDATE_B_RESID(bp, pktp); 15938 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15939 "sdintr: returning command for resid != 0\n"); 15940 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) { 15941 SD_UPDATE_B_RESID(bp, pktp); 15942 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 15943 "sdintr: returning uscsi command\n"); 15944 } else { 15945 goto not_successful; 15946 } 15947 sd_return_command(un, bp); 15948 15949 /* 15950 * Decrement counter to indicate that the callback routine 15951 * is done. 15952 */ 15953 un->un_in_callback--; 15954 ASSERT(un->un_in_callback >= 0); 15955 mutex_exit(SD_MUTEX(un)); 15956 15957 return; 15958 } 15959 15960 not_successful: 15961 15962 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */ 15963 /* 15964 * The following is based upon knowledge of the underlying transport 15965 * and its use of DMA resources. This code should be removed when 15966 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor 15967 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf() 15968 * and sd_start_cmds(). 15969 * 15970 * Free any DMA resources associated with this command if there 15971 * is a chance it could be retried or enqueued for later retry. 15972 * If we keep the DMA binding then mpxio cannot reissue the 15973 * command on another path whenever a path failure occurs. 15974 * 15975 * Note that when PKT_DMA_PARTIAL is used, free/reallocation 15976 * causes the *entire* transfer to start over again from the 15977 * beginning of the request, even for PARTIAL chunks that 15978 * have already transferred successfully. 15979 * 15980 * This is only done for non-uscsi commands (and also skipped for the 15981 * driver's internal RQS command). Also just do this for Fibre Channel 15982 * devices as these are the only ones that support mpxio. 15983 */ 15984 if ((un->un_f_is_fibre == TRUE) && 15985 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) && 15986 ((pktp->pkt_flags & FLAG_SENSING) == 0)) { 15987 scsi_dmafree(pktp); 15988 xp->xb_pkt_flags |= SD_XB_DMA_FREED; 15989 } 15990 #endif 15991 15992 /* 15993 * The command did not successfully complete as requested so check 15994 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal 15995 * driver command that should not be retried so just return. If 15996 * FLAG_DIAGNOSE is not set the error will be processed below. 15997 */ 15998 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) { 15999 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16000 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n"); 16001 /* 16002 * Issue a request sense if a check condition caused the error 16003 * (we handle the auto request sense case above), otherwise 16004 * just fail the command. 16005 */ 16006 if ((pktp->pkt_reason == CMD_CMPLT) && 16007 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) { 16008 sd_send_request_sense_command(un, bp, pktp); 16009 } else { 16010 sd_return_failed_command(un, bp, EIO); 16011 } 16012 goto exit; 16013 } 16014 16015 /* 16016 * The command did not successfully complete as requested so process 16017 * the error, retry, and/or attempt recovery. 16018 */ 16019 switch (pktp->pkt_reason) { 16020 case CMD_CMPLT: 16021 switch (SD_GET_PKT_STATUS(pktp)) { 16022 case STATUS_GOOD: 16023 /* 16024 * The command completed successfully with a non-zero 16025 * residual 16026 */ 16027 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16028 "sdintr: STATUS_GOOD \n"); 16029 sd_pkt_status_good(un, bp, xp, pktp); 16030 break; 16031 16032 case STATUS_CHECK: 16033 case STATUS_TERMINATED: 16034 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16035 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n"); 16036 sd_pkt_status_check_condition(un, bp, xp, pktp); 16037 break; 16038 16039 case STATUS_BUSY: 16040 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16041 "sdintr: STATUS_BUSY\n"); 16042 sd_pkt_status_busy(un, bp, xp, pktp); 16043 break; 16044 16045 case STATUS_RESERVATION_CONFLICT: 16046 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16047 "sdintr: STATUS_RESERVATION_CONFLICT\n"); 16048 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16049 break; 16050 16051 case STATUS_QFULL: 16052 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16053 "sdintr: STATUS_QFULL\n"); 16054 sd_pkt_status_qfull(un, bp, xp, pktp); 16055 break; 16056 16057 case STATUS_MET: 16058 case STATUS_INTERMEDIATE: 16059 case STATUS_SCSI2: 16060 case STATUS_INTERMEDIATE_MET: 16061 case STATUS_ACA_ACTIVE: 16062 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16063 "Unexpected SCSI status received: 0x%x\n", 16064 SD_GET_PKT_STATUS(pktp)); 16065 sd_return_failed_command(un, bp, EIO); 16066 break; 16067 16068 default: 16069 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 16070 "Invalid SCSI status received: 0x%x\n", 16071 SD_GET_PKT_STATUS(pktp)); 16072 sd_return_failed_command(un, bp, EIO); 16073 break; 16074 16075 } 16076 break; 16077 16078 case CMD_INCOMPLETE: 16079 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16080 "sdintr: CMD_INCOMPLETE\n"); 16081 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp); 16082 break; 16083 case CMD_TRAN_ERR: 16084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16085 "sdintr: CMD_TRAN_ERR\n"); 16086 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp); 16087 break; 16088 case CMD_RESET: 16089 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16090 "sdintr: CMD_RESET \n"); 16091 sd_pkt_reason_cmd_reset(un, bp, xp, pktp); 16092 break; 16093 case CMD_ABORTED: 16094 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16095 "sdintr: CMD_ABORTED \n"); 16096 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp); 16097 break; 16098 case CMD_TIMEOUT: 16099 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16100 "sdintr: CMD_TIMEOUT\n"); 16101 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp); 16102 break; 16103 case CMD_UNX_BUS_FREE: 16104 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16105 "sdintr: CMD_UNX_BUS_FREE \n"); 16106 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp); 16107 break; 16108 case CMD_TAG_REJECT: 16109 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16110 "sdintr: CMD_TAG_REJECT\n"); 16111 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp); 16112 break; 16113 default: 16114 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 16115 "sdintr: default\n"); 16116 sd_pkt_reason_default(un, bp, xp, pktp); 16117 break; 16118 } 16119 16120 exit: 16121 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n"); 16122 16123 /* Decrement counter to indicate that the callback routine is done. */ 16124 un->un_in_callback--; 16125 ASSERT(un->un_in_callback >= 0); 16126 16127 /* 16128 * At this point, the pkt has been dispatched, ie, it is either 16129 * being re-tried or has been returned to its caller and should 16130 * not be referenced. 16131 */ 16132 16133 mutex_exit(SD_MUTEX(un)); 16134 } 16135 16136 16137 /* 16138 * Function: sd_print_incomplete_msg 16139 * 16140 * Description: Prints the error message for a CMD_INCOMPLETE error. 16141 * 16142 * Arguments: un - ptr to associated softstate for the device. 16143 * bp - ptr to the buf(9S) for the command. 16144 * arg - message string ptr 16145 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED, 16146 * or SD_NO_RETRY_ISSUED. 16147 * 16148 * Context: May be called under interrupt context 16149 */ 16150 16151 static void 16152 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16153 { 16154 struct scsi_pkt *pktp; 16155 char *msgp; 16156 char *cmdp = arg; 16157 16158 ASSERT(un != NULL); 16159 ASSERT(mutex_owned(SD_MUTEX(un))); 16160 ASSERT(bp != NULL); 16161 ASSERT(arg != NULL); 16162 pktp = SD_GET_PKTP(bp); 16163 ASSERT(pktp != NULL); 16164 16165 switch (code) { 16166 case SD_DELAYED_RETRY_ISSUED: 16167 case SD_IMMEDIATE_RETRY_ISSUED: 16168 msgp = "retrying"; 16169 break; 16170 case SD_NO_RETRY_ISSUED: 16171 default: 16172 msgp = "giving up"; 16173 break; 16174 } 16175 16176 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16177 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16178 "incomplete %s- %s\n", cmdp, msgp); 16179 } 16180 } 16181 16182 16183 16184 /* 16185 * Function: sd_pkt_status_good 16186 * 16187 * Description: Processing for a STATUS_GOOD code in pkt_status. 16188 * 16189 * Context: May be called under interrupt context 16190 */ 16191 16192 static void 16193 sd_pkt_status_good(struct sd_lun *un, struct buf *bp, 16194 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16195 { 16196 char *cmdp; 16197 16198 ASSERT(un != NULL); 16199 ASSERT(mutex_owned(SD_MUTEX(un))); 16200 ASSERT(bp != NULL); 16201 ASSERT(xp != NULL); 16202 ASSERT(pktp != NULL); 16203 ASSERT(pktp->pkt_reason == CMD_CMPLT); 16204 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD); 16205 ASSERT(pktp->pkt_resid != 0); 16206 16207 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n"); 16208 16209 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16210 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) { 16211 case SCMD_READ: 16212 cmdp = "read"; 16213 break; 16214 case SCMD_WRITE: 16215 cmdp = "write"; 16216 break; 16217 default: 16218 SD_UPDATE_B_RESID(bp, pktp); 16219 sd_return_command(un, bp); 16220 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16221 return; 16222 } 16223 16224 /* 16225 * See if we can retry the read/write, preferrably immediately. 16226 * If retries are exhaused, then sd_retry_command() will update 16227 * the b_resid count. 16228 */ 16229 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg, 16230 cmdp, EIO, (clock_t)0, NULL); 16231 16232 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n"); 16233 } 16234 16235 16236 16237 16238 16239 /* 16240 * Function: sd_handle_request_sense 16241 * 16242 * Description: Processing for non-auto Request Sense command. 16243 * 16244 * Arguments: un - ptr to associated softstate 16245 * sense_bp - ptr to buf(9S) for the RQS command 16246 * sense_xp - ptr to the sd_xbuf for the RQS command 16247 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command 16248 * 16249 * Context: May be called under interrupt context 16250 */ 16251 16252 static void 16253 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp, 16254 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp) 16255 { 16256 struct buf *cmd_bp; /* buf for the original command */ 16257 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */ 16258 struct scsi_pkt *cmd_pktp; /* pkt for the original command */ 16259 16260 ASSERT(un != NULL); 16261 ASSERT(mutex_owned(SD_MUTEX(un))); 16262 ASSERT(sense_bp != NULL); 16263 ASSERT(sense_xp != NULL); 16264 ASSERT(sense_pktp != NULL); 16265 16266 /* 16267 * Note the sense_bp, sense_xp, and sense_pktp here are for the 16268 * RQS command and not the original command. 16269 */ 16270 ASSERT(sense_pktp == un->un_rqs_pktp); 16271 ASSERT(sense_bp == un->un_rqs_bp); 16272 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) == 16273 (FLAG_SENSING | FLAG_HEAD)); 16274 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) & 16275 FLAG_SENSING) == FLAG_SENSING); 16276 16277 /* These are the bp, xp, and pktp for the original command */ 16278 cmd_bp = sense_xp->xb_sense_bp; 16279 cmd_xp = SD_GET_XBUF(cmd_bp); 16280 cmd_pktp = SD_GET_PKTP(cmd_bp); 16281 16282 if (sense_pktp->pkt_reason != CMD_CMPLT) { 16283 /* 16284 * The REQUEST SENSE command failed. Release the REQUEST 16285 * SENSE command for re-use, get back the bp for the original 16286 * command, and attempt to re-try the original command if 16287 * FLAG_DIAGNOSE is not set in the original packet. 16288 */ 16289 SD_UPDATE_ERRSTATS(un, sd_harderrs); 16290 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16291 cmd_bp = sd_mark_rqs_idle(un, sense_xp); 16292 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD, 16293 NULL, NULL, EIO, (clock_t)0, NULL); 16294 return; 16295 } 16296 } 16297 16298 /* 16299 * Save the relevant sense info into the xp for the original cmd. 16300 * 16301 * Note: if the request sense failed the state info will be zero 16302 * as set in sd_mark_rqs_busy() 16303 */ 16304 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp); 16305 cmd_xp->xb_sense_state = sense_pktp->pkt_state; 16306 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid; 16307 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data, SENSE_LENGTH); 16308 16309 /* 16310 * Free up the RQS command.... 16311 * NOTE: 16312 * Must do this BEFORE calling sd_validate_sense_data! 16313 * sd_validate_sense_data may return the original command in 16314 * which case the pkt will be freed and the flags can no 16315 * longer be touched. 16316 * SD_MUTEX is held through this process until the command 16317 * is dispatched based upon the sense data, so there are 16318 * no race conditions. 16319 */ 16320 (void) sd_mark_rqs_idle(un, sense_xp); 16321 16322 /* 16323 * For a retryable command see if we have valid sense data, if so then 16324 * turn it over to sd_decode_sense() to figure out the right course of 16325 * action. Just fail a non-retryable command. 16326 */ 16327 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) { 16328 if (sd_validate_sense_data(un, cmd_bp, cmd_xp) == 16329 SD_SENSE_DATA_IS_VALID) { 16330 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp); 16331 } 16332 } else { 16333 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB", 16334 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16335 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data", 16336 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX); 16337 sd_return_failed_command(un, cmd_bp, EIO); 16338 } 16339 } 16340 16341 16342 16343 16344 /* 16345 * Function: sd_handle_auto_request_sense 16346 * 16347 * Description: Processing for auto-request sense information. 16348 * 16349 * Arguments: un - ptr to associated softstate 16350 * bp - ptr to buf(9S) for the command 16351 * xp - ptr to the sd_xbuf for the command 16352 * pktp - ptr to the scsi_pkt(9S) for the command 16353 * 16354 * Context: May be called under interrupt context 16355 */ 16356 16357 static void 16358 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp, 16359 struct sd_xbuf *xp, struct scsi_pkt *pktp) 16360 { 16361 struct scsi_arq_status *asp; 16362 16363 ASSERT(un != NULL); 16364 ASSERT(mutex_owned(SD_MUTEX(un))); 16365 ASSERT(bp != NULL); 16366 ASSERT(xp != NULL); 16367 ASSERT(pktp != NULL); 16368 ASSERT(pktp != un->un_rqs_pktp); 16369 ASSERT(bp != un->un_rqs_bp); 16370 16371 /* 16372 * For auto-request sense, we get a scsi_arq_status back from 16373 * the HBA, with the sense data in the sts_sensedata member. 16374 * The pkt_scbp of the packet points to this scsi_arq_status. 16375 */ 16376 asp = (struct scsi_arq_status *)(pktp->pkt_scbp); 16377 16378 if (asp->sts_rqpkt_reason != CMD_CMPLT) { 16379 /* 16380 * The auto REQUEST SENSE failed; see if we can re-try 16381 * the original command. 16382 */ 16383 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16384 "auto request sense failed (reason=%s)\n", 16385 scsi_rname(asp->sts_rqpkt_reason)); 16386 16387 sd_reset_target(un, pktp); 16388 16389 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16390 NULL, NULL, EIO, (clock_t)0, NULL); 16391 return; 16392 } 16393 16394 /* Save the relevant sense info into the xp for the original cmd. */ 16395 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status))); 16396 xp->xb_sense_state = asp->sts_rqpkt_state; 16397 xp->xb_sense_resid = asp->sts_rqpkt_resid; 16398 bcopy(&asp->sts_sensedata, xp->xb_sense_data, 16399 min(sizeof (struct scsi_extended_sense), SENSE_LENGTH)); 16400 16401 /* 16402 * See if we have valid sense data, if so then turn it over to 16403 * sd_decode_sense() to figure out the right course of action. 16404 */ 16405 if (sd_validate_sense_data(un, bp, xp) == SD_SENSE_DATA_IS_VALID) { 16406 sd_decode_sense(un, bp, xp, pktp); 16407 } 16408 } 16409 16410 16411 /* 16412 * Function: sd_print_sense_failed_msg 16413 * 16414 * Description: Print log message when RQS has failed. 16415 * 16416 * Arguments: un - ptr to associated softstate 16417 * bp - ptr to buf(9S) for the command 16418 * arg - generic message string ptr 16419 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16420 * or SD_NO_RETRY_ISSUED 16421 * 16422 * Context: May be called from interrupt context 16423 */ 16424 16425 static void 16426 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg, 16427 int code) 16428 { 16429 char *msgp = arg; 16430 16431 ASSERT(un != NULL); 16432 ASSERT(mutex_owned(SD_MUTEX(un))); 16433 ASSERT(bp != NULL); 16434 16435 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) { 16436 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp); 16437 } 16438 } 16439 16440 16441 /* 16442 * Function: sd_validate_sense_data 16443 * 16444 * Description: Check the given sense data for validity. 16445 * If the sense data is not valid, the command will 16446 * be either failed or retried! 16447 * 16448 * Return Code: SD_SENSE_DATA_IS_INVALID 16449 * SD_SENSE_DATA_IS_VALID 16450 * 16451 * Context: May be called from interrupt context 16452 */ 16453 16454 static int 16455 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp) 16456 { 16457 struct scsi_extended_sense *esp; 16458 struct scsi_pkt *pktp; 16459 size_t actual_len; 16460 char *msgp = NULL; 16461 16462 ASSERT(un != NULL); 16463 ASSERT(mutex_owned(SD_MUTEX(un))); 16464 ASSERT(bp != NULL); 16465 ASSERT(bp != un->un_rqs_bp); 16466 ASSERT(xp != NULL); 16467 16468 pktp = SD_GET_PKTP(bp); 16469 ASSERT(pktp != NULL); 16470 16471 /* 16472 * Check the status of the RQS command (auto or manual). 16473 */ 16474 switch (xp->xb_sense_status & STATUS_MASK) { 16475 case STATUS_GOOD: 16476 break; 16477 16478 case STATUS_RESERVATION_CONFLICT: 16479 sd_pkt_status_reservation_conflict(un, bp, xp, pktp); 16480 return (SD_SENSE_DATA_IS_INVALID); 16481 16482 case STATUS_BUSY: 16483 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16484 "Busy Status on REQUEST SENSE\n"); 16485 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL, 16486 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16487 return (SD_SENSE_DATA_IS_INVALID); 16488 16489 case STATUS_QFULL: 16490 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 16491 "QFULL Status on REQUEST SENSE\n"); 16492 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, 16493 NULL, EIO, SD_BSY_TIMEOUT / 500, kstat_waitq_enter); 16494 return (SD_SENSE_DATA_IS_INVALID); 16495 16496 case STATUS_CHECK: 16497 case STATUS_TERMINATED: 16498 msgp = "Check Condition on REQUEST SENSE\n"; 16499 goto sense_failed; 16500 16501 default: 16502 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n"; 16503 goto sense_failed; 16504 } 16505 16506 /* 16507 * See if we got the minimum required amount of sense data. 16508 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes 16509 * or less. 16510 */ 16511 actual_len = (int)(SENSE_LENGTH - xp->xb_sense_resid); 16512 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) || 16513 (actual_len == 0)) { 16514 msgp = "Request Sense couldn't get sense data\n"; 16515 goto sense_failed; 16516 } 16517 16518 if (actual_len < SUN_MIN_SENSE_LENGTH) { 16519 msgp = "Not enough sense information\n"; 16520 goto sense_failed; 16521 } 16522 16523 /* 16524 * We require the extended sense data 16525 */ 16526 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16527 if (esp->es_class != CLASS_EXTENDED_SENSE) { 16528 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 16529 static char tmp[8]; 16530 static char buf[148]; 16531 char *p = (char *)(xp->xb_sense_data); 16532 int i; 16533 16534 mutex_enter(&sd_sense_mutex); 16535 (void) strcpy(buf, "undecodable sense information:"); 16536 for (i = 0; i < actual_len; i++) { 16537 (void) sprintf(tmp, " 0x%x", *(p++)&0xff); 16538 (void) strcpy(&buf[strlen(buf)], tmp); 16539 } 16540 i = strlen(buf); 16541 (void) strcpy(&buf[i], "-(assumed fatal)\n"); 16542 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, buf); 16543 mutex_exit(&sd_sense_mutex); 16544 } 16545 /* Note: Legacy behavior, fail the command with no retry */ 16546 sd_return_failed_command(un, bp, EIO); 16547 return (SD_SENSE_DATA_IS_INVALID); 16548 } 16549 16550 /* 16551 * Check that es_code is valid (es_class concatenated with es_code 16552 * make up the "response code" field. es_class will always be 7, so 16553 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the 16554 * format. 16555 */ 16556 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) && 16557 (esp->es_code != CODE_FMT_FIXED_DEFERRED) && 16558 (esp->es_code != CODE_FMT_DESCR_CURRENT) && 16559 (esp->es_code != CODE_FMT_DESCR_DEFERRED) && 16560 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) { 16561 goto sense_failed; 16562 } 16563 16564 return (SD_SENSE_DATA_IS_VALID); 16565 16566 sense_failed: 16567 /* 16568 * If the request sense failed (for whatever reason), attempt 16569 * to retry the original command. 16570 */ 16571 #if defined(__i386) || defined(__amd64) 16572 /* 16573 * SD_RETRY_DELAY is conditionally compile (#if fibre) in 16574 * sddef.h for Sparc platform, and x86 uses 1 binary 16575 * for both SCSI/FC. 16576 * The SD_RETRY_DELAY value need to be adjusted here 16577 * when SD_RETRY_DELAY change in sddef.h 16578 */ 16579 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16580 sd_print_sense_failed_msg, msgp, EIO, 16581 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL); 16582 #else 16583 sd_retry_command(un, bp, SD_RETRIES_STANDARD, 16584 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL); 16585 #endif 16586 16587 return (SD_SENSE_DATA_IS_INVALID); 16588 } 16589 16590 16591 16592 /* 16593 * Function: sd_decode_sense 16594 * 16595 * Description: Take recovery action(s) when SCSI Sense Data is received. 16596 * 16597 * Context: Interrupt context. 16598 */ 16599 16600 static void 16601 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 16602 struct scsi_pkt *pktp) 16603 { 16604 struct scsi_extended_sense *esp; 16605 struct scsi_descr_sense_hdr *sdsp; 16606 uint8_t asc, ascq, sense_key; 16607 16608 ASSERT(un != NULL); 16609 ASSERT(mutex_owned(SD_MUTEX(un))); 16610 ASSERT(bp != NULL); 16611 ASSERT(bp != un->un_rqs_bp); 16612 ASSERT(xp != NULL); 16613 ASSERT(pktp != NULL); 16614 16615 esp = (struct scsi_extended_sense *)xp->xb_sense_data; 16616 16617 switch (esp->es_code) { 16618 case CODE_FMT_DESCR_CURRENT: 16619 case CODE_FMT_DESCR_DEFERRED: 16620 sdsp = (struct scsi_descr_sense_hdr *)xp->xb_sense_data; 16621 sense_key = sdsp->ds_key; 16622 asc = sdsp->ds_add_code; 16623 ascq = sdsp->ds_qual_code; 16624 break; 16625 case CODE_FMT_VENDOR_SPECIFIC: 16626 case CODE_FMT_FIXED_CURRENT: 16627 case CODE_FMT_FIXED_DEFERRED: 16628 default: 16629 sense_key = esp->es_key; 16630 asc = esp->es_add_code; 16631 ascq = esp->es_qual_code; 16632 break; 16633 } 16634 16635 switch (sense_key) { 16636 case KEY_NO_SENSE: 16637 sd_sense_key_no_sense(un, bp, xp, pktp); 16638 break; 16639 case KEY_RECOVERABLE_ERROR: 16640 sd_sense_key_recoverable_error(un, asc, bp, xp, pktp); 16641 break; 16642 case KEY_NOT_READY: 16643 sd_sense_key_not_ready(un, asc, ascq, bp, xp, pktp); 16644 break; 16645 case KEY_MEDIUM_ERROR: 16646 case KEY_HARDWARE_ERROR: 16647 sd_sense_key_medium_or_hardware_error(un, 16648 sense_key, asc, bp, xp, pktp); 16649 break; 16650 case KEY_ILLEGAL_REQUEST: 16651 sd_sense_key_illegal_request(un, bp, xp, pktp); 16652 break; 16653 case KEY_UNIT_ATTENTION: 16654 sd_sense_key_unit_attention(un, asc, bp, xp, pktp); 16655 break; 16656 case KEY_WRITE_PROTECT: 16657 case KEY_VOLUME_OVERFLOW: 16658 case KEY_MISCOMPARE: 16659 sd_sense_key_fail_command(un, bp, xp, pktp); 16660 break; 16661 case KEY_BLANK_CHECK: 16662 sd_sense_key_blank_check(un, bp, xp, pktp); 16663 break; 16664 case KEY_ABORTED_COMMAND: 16665 sd_sense_key_aborted_command(un, bp, xp, pktp); 16666 break; 16667 case KEY_VENDOR_UNIQUE: 16668 case KEY_COPY_ABORTED: 16669 case KEY_EQUAL: 16670 case KEY_RESERVED: 16671 default: 16672 sd_sense_key_default(un, sense_key, bp, xp, pktp); 16673 break; 16674 } 16675 } 16676 16677 16678 /* 16679 * Function: sd_dump_memory 16680 * 16681 * Description: Debug logging routine to print the contents of a user provided 16682 * buffer. The output of the buffer is broken up into 256 byte 16683 * segments due to a size constraint of the scsi_log. 16684 * implementation. 16685 * 16686 * Arguments: un - ptr to softstate 16687 * comp - component mask 16688 * title - "title" string to preceed data when printed 16689 * data - ptr to data block to be printed 16690 * len - size of data block to be printed 16691 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c) 16692 * 16693 * Context: May be called from interrupt context 16694 */ 16695 16696 #define SD_DUMP_MEMORY_BUF_SIZE 256 16697 16698 static char *sd_dump_format_string[] = { 16699 " 0x%02x", 16700 " %c" 16701 }; 16702 16703 static void 16704 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data, 16705 int len, int fmt) 16706 { 16707 int i, j; 16708 int avail_count; 16709 int start_offset; 16710 int end_offset; 16711 size_t entry_len; 16712 char *bufp; 16713 char *local_buf; 16714 char *format_string; 16715 16716 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR)); 16717 16718 /* 16719 * In the debug version of the driver, this function is called from a 16720 * number of places which are NOPs in the release driver. 16721 * The debug driver therefore has additional methods of filtering 16722 * debug output. 16723 */ 16724 #ifdef SDDEBUG 16725 /* 16726 * In the debug version of the driver we can reduce the amount of debug 16727 * messages by setting sd_error_level to something other than 16728 * SCSI_ERR_ALL and clearing bits in sd_level_mask and 16729 * sd_component_mask. 16730 */ 16731 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) || 16732 (sd_error_level != SCSI_ERR_ALL)) { 16733 return; 16734 } 16735 if (((sd_component_mask & comp) == 0) || 16736 (sd_error_level != SCSI_ERR_ALL)) { 16737 return; 16738 } 16739 #else 16740 if (sd_error_level != SCSI_ERR_ALL) { 16741 return; 16742 } 16743 #endif 16744 16745 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP); 16746 bufp = local_buf; 16747 /* 16748 * Available length is the length of local_buf[], minus the 16749 * length of the title string, minus one for the ":", minus 16750 * one for the newline, minus one for the NULL terminator. 16751 * This gives the #bytes available for holding the printed 16752 * values from the given data buffer. 16753 */ 16754 if (fmt == SD_LOG_HEX) { 16755 format_string = sd_dump_format_string[0]; 16756 } else /* SD_LOG_CHAR */ { 16757 format_string = sd_dump_format_string[1]; 16758 } 16759 /* 16760 * Available count is the number of elements from the given 16761 * data buffer that we can fit into the available length. 16762 * This is based upon the size of the format string used. 16763 * Make one entry and find it's size. 16764 */ 16765 (void) sprintf(bufp, format_string, data[0]); 16766 entry_len = strlen(bufp); 16767 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len; 16768 16769 j = 0; 16770 while (j < len) { 16771 bufp = local_buf; 16772 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE); 16773 start_offset = j; 16774 16775 end_offset = start_offset + avail_count; 16776 16777 (void) sprintf(bufp, "%s:", title); 16778 bufp += strlen(bufp); 16779 for (i = start_offset; ((i < end_offset) && (j < len)); 16780 i++, j++) { 16781 (void) sprintf(bufp, format_string, data[i]); 16782 bufp += entry_len; 16783 } 16784 (void) sprintf(bufp, "\n"); 16785 16786 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf); 16787 } 16788 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE); 16789 } 16790 16791 /* 16792 * Function: sd_print_sense_msg 16793 * 16794 * Description: Log a message based upon the given sense data. 16795 * 16796 * Arguments: un - ptr to associated softstate 16797 * bp - ptr to buf(9S) for the command 16798 * arg - ptr to associate sd_sense_info struct 16799 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 16800 * or SD_NO_RETRY_ISSUED 16801 * 16802 * Context: May be called from interrupt context 16803 */ 16804 16805 static void 16806 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code) 16807 { 16808 struct sd_xbuf *xp; 16809 struct scsi_pkt *pktp; 16810 struct scsi_extended_sense *sensep; 16811 daddr_t request_blkno; 16812 diskaddr_t err_blkno; 16813 int severity; 16814 int pfa_flag; 16815 int fixed_format = TRUE; 16816 extern struct scsi_key_strings scsi_cmds[]; 16817 16818 ASSERT(un != NULL); 16819 ASSERT(mutex_owned(SD_MUTEX(un))); 16820 ASSERT(bp != NULL); 16821 xp = SD_GET_XBUF(bp); 16822 ASSERT(xp != NULL); 16823 pktp = SD_GET_PKTP(bp); 16824 ASSERT(pktp != NULL); 16825 ASSERT(arg != NULL); 16826 16827 severity = ((struct sd_sense_info *)(arg))->ssi_severity; 16828 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag; 16829 16830 if ((code == SD_DELAYED_RETRY_ISSUED) || 16831 (code == SD_IMMEDIATE_RETRY_ISSUED)) { 16832 severity = SCSI_ERR_RETRYABLE; 16833 } 16834 16835 /* Use absolute block number for the request block number */ 16836 request_blkno = xp->xb_blkno; 16837 16838 /* 16839 * Now try to get the error block number from the sense data 16840 */ 16841 sensep = (struct scsi_extended_sense *)xp->xb_sense_data; 16842 switch (sensep->es_code) { 16843 case CODE_FMT_DESCR_CURRENT: 16844 case CODE_FMT_DESCR_DEFERRED: 16845 err_blkno = 16846 sd_extract_sense_info_descr( 16847 (struct scsi_descr_sense_hdr *)sensep); 16848 fixed_format = FALSE; 16849 break; 16850 case CODE_FMT_FIXED_CURRENT: 16851 case CODE_FMT_FIXED_DEFERRED: 16852 case CODE_FMT_VENDOR_SPECIFIC: 16853 default: 16854 /* 16855 * With the es_valid bit set, we assume that the error 16856 * blkno is in the sense data. Also, if xp->xb_blkno is 16857 * greater than 0xffffffff then the target *should* have used 16858 * a descriptor sense format (or it shouldn't have set 16859 * the es_valid bit), and we may as well ignore the 16860 * 32-bit value. 16861 */ 16862 if ((sensep->es_valid != 0) && (xp->xb_blkno <= 0xffffffff)) { 16863 err_blkno = (diskaddr_t) 16864 ((sensep->es_info_1 << 24) | 16865 (sensep->es_info_2 << 16) | 16866 (sensep->es_info_3 << 8) | 16867 (sensep->es_info_4)); 16868 } else { 16869 err_blkno = (diskaddr_t)-1; 16870 } 16871 break; 16872 } 16873 16874 if (err_blkno == (diskaddr_t)-1) { 16875 /* 16876 * Without the es_valid bit set (for fixed format) or an 16877 * information descriptor (for descriptor format) we cannot 16878 * be certain of the error blkno, so just use the 16879 * request_blkno. 16880 */ 16881 err_blkno = (diskaddr_t)request_blkno; 16882 } else { 16883 /* 16884 * We retrieved the error block number from the information 16885 * portion of the sense data. 16886 * 16887 * For USCSI commands we are better off using the error 16888 * block no. as the requested block no. (This is the best 16889 * we can estimate.) 16890 */ 16891 if ((SD_IS_BUFIO(xp) == FALSE) && 16892 ((pktp->pkt_flags & FLAG_SILENT) == 0)) { 16893 request_blkno = err_blkno; 16894 } 16895 } 16896 16897 /* 16898 * The following will log the buffer contents for the release driver 16899 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error 16900 * level is set to verbose. 16901 */ 16902 sd_dump_memory(un, SD_LOG_IO, "Failed CDB", 16903 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX); 16904 sd_dump_memory(un, SD_LOG_IO, "Sense Data", 16905 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX); 16906 16907 if (pfa_flag == FALSE) { 16908 /* This is normally only set for USCSI */ 16909 if ((pktp->pkt_flags & FLAG_SILENT) != 0) { 16910 return; 16911 } 16912 16913 if ((SD_IS_BUFIO(xp) == TRUE) && 16914 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) && 16915 (severity < sd_error_level))) { 16916 return; 16917 } 16918 } 16919 16920 /* 16921 * If the data is fixed format then check for Sonoma Failover, 16922 * and keep a count of how many failed I/O's. We should not have 16923 * to worry about Sonoma returning descriptor format sense data, 16924 * and asc/ascq are in a different location in descriptor format. 16925 */ 16926 if (fixed_format && 16927 (SD_IS_LSI(un)) && (sensep->es_key == KEY_ILLEGAL_REQUEST) && 16928 (sensep->es_add_code == 0x94) && (sensep->es_qual_code == 0x01)) { 16929 un->un_sonoma_failure_count++; 16930 if (un->un_sonoma_failure_count > 1) { 16931 return; 16932 } 16933 } 16934 16935 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity, 16936 request_blkno, err_blkno, scsi_cmds, sensep, 16937 un->un_additional_codes, NULL); 16938 } 16939 16940 /* 16941 * Function: sd_extract_sense_info_descr 16942 * 16943 * Description: Retrieve "information" field from descriptor format 16944 * sense data. Iterates through each sense descriptor 16945 * looking for the information descriptor and returns 16946 * the information field from that descriptor. 16947 * 16948 * Context: May be called from interrupt context 16949 */ 16950 16951 static diskaddr_t 16952 sd_extract_sense_info_descr(struct scsi_descr_sense_hdr *sdsp) 16953 { 16954 diskaddr_t result; 16955 uint8_t *descr_offset; 16956 int valid_sense_length; 16957 struct scsi_information_sense_descr *isd; 16958 16959 /* 16960 * Initialize result to -1 indicating there is no information 16961 * descriptor 16962 */ 16963 result = (diskaddr_t)-1; 16964 16965 /* 16966 * The first descriptor will immediately follow the header 16967 */ 16968 descr_offset = (uint8_t *)(sdsp+1); /* Pointer arithmetic */ 16969 16970 /* 16971 * Calculate the amount of valid sense data 16972 */ 16973 valid_sense_length = 16974 min((sizeof (struct scsi_descr_sense_hdr) + 16975 sdsp->ds_addl_sense_length), 16976 SENSE_LENGTH); 16977 16978 /* 16979 * Iterate through the list of descriptors, stopping when we 16980 * run out of sense data 16981 */ 16982 while ((descr_offset + sizeof (struct scsi_information_sense_descr)) <= 16983 (uint8_t *)sdsp + valid_sense_length) { 16984 /* 16985 * Check if this is an information descriptor. We can 16986 * use the scsi_information_sense_descr structure as a 16987 * template sense the first two fields are always the 16988 * same 16989 */ 16990 isd = (struct scsi_information_sense_descr *)descr_offset; 16991 if (isd->isd_descr_type == DESCR_INFORMATION) { 16992 /* 16993 * Found an information descriptor. Copy the 16994 * information field. There will only be one 16995 * information descriptor so we can stop looking. 16996 */ 16997 result = 16998 (((diskaddr_t)isd->isd_information[0] << 56) | 16999 ((diskaddr_t)isd->isd_information[1] << 48) | 17000 ((diskaddr_t)isd->isd_information[2] << 40) | 17001 ((diskaddr_t)isd->isd_information[3] << 32) | 17002 ((diskaddr_t)isd->isd_information[4] << 24) | 17003 ((diskaddr_t)isd->isd_information[5] << 16) | 17004 ((diskaddr_t)isd->isd_information[6] << 8) | 17005 ((diskaddr_t)isd->isd_information[7])); 17006 break; 17007 } 17008 17009 /* 17010 * Get pointer to the next descriptor. The "additional 17011 * length" field holds the length of the descriptor except 17012 * for the "type" and "additional length" fields, so 17013 * we need to add 2 to get the total length. 17014 */ 17015 descr_offset += (isd->isd_addl_length + 2); 17016 } 17017 17018 return (result); 17019 } 17020 17021 /* 17022 * Function: sd_sense_key_no_sense 17023 * 17024 * Description: Recovery action when sense data was not received. 17025 * 17026 * Context: May be called from interrupt context 17027 */ 17028 17029 static void 17030 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, 17031 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17032 { 17033 struct sd_sense_info si; 17034 17035 ASSERT(un != NULL); 17036 ASSERT(mutex_owned(SD_MUTEX(un))); 17037 ASSERT(bp != NULL); 17038 ASSERT(xp != NULL); 17039 ASSERT(pktp != NULL); 17040 17041 si.ssi_severity = SCSI_ERR_FATAL; 17042 si.ssi_pfa_flag = FALSE; 17043 17044 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17045 17046 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17047 &si, EIO, (clock_t)0, NULL); 17048 } 17049 17050 17051 /* 17052 * Function: sd_sense_key_recoverable_error 17053 * 17054 * Description: Recovery actions for a SCSI "Recovered Error" sense key. 17055 * 17056 * Context: May be called from interrupt context 17057 */ 17058 17059 static void 17060 sd_sense_key_recoverable_error(struct sd_lun *un, 17061 uint8_t asc, 17062 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17063 { 17064 struct sd_sense_info si; 17065 17066 ASSERT(un != NULL); 17067 ASSERT(mutex_owned(SD_MUTEX(un))); 17068 ASSERT(bp != NULL); 17069 ASSERT(xp != NULL); 17070 ASSERT(pktp != NULL); 17071 17072 /* 17073 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED 17074 */ 17075 if ((asc == 0x5D) && (sd_report_pfa != 0)) { 17076 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17077 si.ssi_severity = SCSI_ERR_INFO; 17078 si.ssi_pfa_flag = TRUE; 17079 } else { 17080 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17081 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err); 17082 si.ssi_severity = SCSI_ERR_RECOVERED; 17083 si.ssi_pfa_flag = FALSE; 17084 } 17085 17086 if (pktp->pkt_resid == 0) { 17087 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17088 sd_return_command(un, bp); 17089 return; 17090 } 17091 17092 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17093 &si, EIO, (clock_t)0, NULL); 17094 } 17095 17096 17097 17098 17099 /* 17100 * Function: sd_sense_key_not_ready 17101 * 17102 * Description: Recovery actions for a SCSI "Not Ready" sense key. 17103 * 17104 * Context: May be called from interrupt context 17105 */ 17106 17107 static void 17108 sd_sense_key_not_ready(struct sd_lun *un, 17109 uint8_t asc, uint8_t ascq, 17110 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17111 { 17112 struct sd_sense_info si; 17113 17114 ASSERT(un != NULL); 17115 ASSERT(mutex_owned(SD_MUTEX(un))); 17116 ASSERT(bp != NULL); 17117 ASSERT(xp != NULL); 17118 ASSERT(pktp != NULL); 17119 17120 si.ssi_severity = SCSI_ERR_FATAL; 17121 si.ssi_pfa_flag = FALSE; 17122 17123 /* 17124 * Update error stats after first NOT READY error. Disks may have 17125 * been powered down and may need to be restarted. For CDROMs, 17126 * report NOT READY errors only if media is present. 17127 */ 17128 if ((ISCD(un) && (un->un_f_geometry_is_valid == TRUE)) || 17129 (xp->xb_retry_count > 0)) { 17130 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17131 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err); 17132 } 17133 17134 /* 17135 * Just fail if the "not ready" retry limit has been reached. 17136 */ 17137 if (xp->xb_retry_count >= un->un_notready_retry_count) { 17138 /* Special check for error message printing for removables. */ 17139 if ((ISREMOVABLE(un)) && (asc == 0x04) && 17140 (ascq >= 0x04)) { 17141 si.ssi_severity = SCSI_ERR_ALL; 17142 } 17143 goto fail_command; 17144 } 17145 17146 /* 17147 * Check the ASC and ASCQ in the sense data as needed, to determine 17148 * what to do. 17149 */ 17150 switch (asc) { 17151 case 0x04: /* LOGICAL UNIT NOT READY */ 17152 /* 17153 * disk drives that don't spin up result in a very long delay 17154 * in format without warning messages. We will log a message 17155 * if the error level is set to verbose. 17156 */ 17157 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17158 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17159 "logical unit not ready, resetting disk\n"); 17160 } 17161 17162 /* 17163 * There are different requirements for CDROMs and disks for 17164 * the number of retries. If a CD-ROM is giving this, it is 17165 * probably reading TOC and is in the process of getting 17166 * ready, so we should keep on trying for a long time to make 17167 * sure that all types of media are taken in account (for 17168 * some media the drive takes a long time to read TOC). For 17169 * disks we do not want to retry this too many times as this 17170 * can cause a long hang in format when the drive refuses to 17171 * spin up (a very common failure). 17172 */ 17173 switch (ascq) { 17174 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */ 17175 /* 17176 * Disk drives frequently refuse to spin up which 17177 * results in a very long hang in format without 17178 * warning messages. 17179 * 17180 * Note: This code preserves the legacy behavior of 17181 * comparing xb_retry_count against zero for fibre 17182 * channel targets instead of comparing against the 17183 * un_reset_retry_count value. The reason for this 17184 * discrepancy has been so utterly lost beneath the 17185 * Sands of Time that even Indiana Jones could not 17186 * find it. 17187 */ 17188 if (un->un_f_is_fibre == TRUE) { 17189 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17190 (xp->xb_retry_count > 0)) && 17191 (un->un_startstop_timeid == NULL)) { 17192 scsi_log(SD_DEVINFO(un), sd_label, 17193 CE_WARN, "logical unit not ready, " 17194 "resetting disk\n"); 17195 sd_reset_target(un, pktp); 17196 } 17197 } else { 17198 if (((sd_level_mask & SD_LOGMASK_DIAG) || 17199 (xp->xb_retry_count > 17200 un->un_reset_retry_count)) && 17201 (un->un_startstop_timeid == NULL)) { 17202 scsi_log(SD_DEVINFO(un), sd_label, 17203 CE_WARN, "logical unit not ready, " 17204 "resetting disk\n"); 17205 sd_reset_target(un, pktp); 17206 } 17207 } 17208 break; 17209 17210 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */ 17211 /* 17212 * If the target is in the process of becoming 17213 * ready, just proceed with the retry. This can 17214 * happen with CD-ROMs that take a long time to 17215 * read TOC after a power cycle or reset. 17216 */ 17217 goto do_retry; 17218 17219 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */ 17220 break; 17221 17222 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */ 17223 /* 17224 * Retries cannot help here so just fail right away. 17225 */ 17226 goto fail_command; 17227 17228 case 0x88: 17229 /* 17230 * Vendor-unique code for T3/T4: it indicates a 17231 * path problem in a mutipathed config, but as far as 17232 * the target driver is concerned it equates to a fatal 17233 * error, so we should just fail the command right away 17234 * (without printing anything to the console). If this 17235 * is not a T3/T4, fall thru to the default recovery 17236 * action. 17237 * T3/T4 is FC only, don't need to check is_fibre 17238 */ 17239 if (SD_IS_T3(un) || SD_IS_T4(un)) { 17240 sd_return_failed_command(un, bp, EIO); 17241 return; 17242 } 17243 /* FALLTHRU */ 17244 17245 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */ 17246 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */ 17247 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */ 17248 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */ 17249 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */ 17250 default: /* Possible future codes in SCSI spec? */ 17251 /* 17252 * For removable-media devices, do not retry if 17253 * ASCQ > 2 as these result mostly from USCSI commands 17254 * on MMC devices issued to check status of an 17255 * operation initiated in immediate mode. Also for 17256 * ASCQ >= 4 do not print console messages as these 17257 * mainly represent a user-initiated operation 17258 * instead of a system failure. 17259 */ 17260 if (ISREMOVABLE(un)) { 17261 si.ssi_severity = SCSI_ERR_ALL; 17262 goto fail_command; 17263 } 17264 break; 17265 } 17266 17267 /* 17268 * As part of our recovery attempt for the NOT READY 17269 * condition, we issue a START STOP UNIT command. However 17270 * we want to wait for a short delay before attempting this 17271 * as there may still be more commands coming back from the 17272 * target with the check condition. To do this we use 17273 * timeout(9F) to call sd_start_stop_unit_callback() after 17274 * the delay interval expires. (sd_start_stop_unit_callback() 17275 * dispatches sd_start_stop_unit_task(), which will issue 17276 * the actual START STOP UNIT command. The delay interval 17277 * is one-half of the delay that we will use to retry the 17278 * command that generated the NOT READY condition. 17279 * 17280 * Note that we could just dispatch sd_start_stop_unit_task() 17281 * from here and allow it to sleep for the delay interval, 17282 * but then we would be tying up the taskq thread 17283 * uncesessarily for the duration of the delay. 17284 * 17285 * Do not issue the START STOP UNIT if the current command 17286 * is already a START STOP UNIT. 17287 */ 17288 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) { 17289 break; 17290 } 17291 17292 /* 17293 * Do not schedule the timeout if one is already pending. 17294 */ 17295 if (un->un_startstop_timeid != NULL) { 17296 SD_INFO(SD_LOG_ERROR, un, 17297 "sd_sense_key_not_ready: restart already issued to" 17298 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)), 17299 ddi_get_instance(SD_DEVINFO(un))); 17300 break; 17301 } 17302 17303 /* 17304 * Schedule the START STOP UNIT command, then queue the command 17305 * for a retry. 17306 * 17307 * Note: A timeout is not scheduled for this retry because we 17308 * want the retry to be serial with the START_STOP_UNIT. The 17309 * retry will be started when the START_STOP_UNIT is completed 17310 * in sd_start_stop_unit_task. 17311 */ 17312 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback, 17313 un, SD_BSY_TIMEOUT / 2); 17314 xp->xb_retry_count++; 17315 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter); 17316 return; 17317 17318 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */ 17319 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17320 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17321 "unit does not respond to selection\n"); 17322 } 17323 break; 17324 17325 case 0x3A: /* MEDIUM NOT PRESENT */ 17326 if (sd_error_level >= SCSI_ERR_FATAL) { 17327 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17328 "Caddy not inserted in drive\n"); 17329 } 17330 17331 sr_ejected(un); 17332 un->un_mediastate = DKIO_EJECTED; 17333 /* The state has changed, inform the media watch routines */ 17334 cv_broadcast(&un->un_state_cv); 17335 /* Just fail if no media is present in the drive. */ 17336 goto fail_command; 17337 17338 default: 17339 if (sd_error_level < SCSI_ERR_RETRYABLE) { 17340 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, 17341 "Unit not Ready. Additional sense code 0x%x\n", 17342 asc); 17343 } 17344 break; 17345 } 17346 17347 do_retry: 17348 17349 /* 17350 * Retry the command, as some targets may report NOT READY for 17351 * several seconds after being reset. 17352 */ 17353 xp->xb_retry_count++; 17354 si.ssi_severity = SCSI_ERR_RETRYABLE; 17355 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 17356 &si, EIO, SD_BSY_TIMEOUT, NULL); 17357 17358 return; 17359 17360 fail_command: 17361 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17362 sd_return_failed_command(un, bp, EIO); 17363 } 17364 17365 17366 17367 /* 17368 * Function: sd_sense_key_medium_or_hardware_error 17369 * 17370 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error" 17371 * sense key. 17372 * 17373 * Context: May be called from interrupt context 17374 */ 17375 17376 static void 17377 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, 17378 int sense_key, uint8_t asc, 17379 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17380 { 17381 struct sd_sense_info si; 17382 17383 ASSERT(un != NULL); 17384 ASSERT(mutex_owned(SD_MUTEX(un))); 17385 ASSERT(bp != NULL); 17386 ASSERT(xp != NULL); 17387 ASSERT(pktp != NULL); 17388 17389 si.ssi_severity = SCSI_ERR_FATAL; 17390 si.ssi_pfa_flag = FALSE; 17391 17392 if (sense_key == KEY_MEDIUM_ERROR) { 17393 SD_UPDATE_ERRSTATS(un, sd_rq_media_err); 17394 } 17395 17396 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17397 17398 if ((un->un_reset_retry_count != 0) && 17399 (xp->xb_retry_count == un->un_reset_retry_count)) { 17400 mutex_exit(SD_MUTEX(un)); 17401 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */ 17402 if (un->un_f_allow_bus_device_reset == TRUE) { 17403 17404 boolean_t try_resetting_target = B_TRUE; 17405 17406 /* 17407 * We need to be able to handle specific ASC when we are 17408 * handling a KEY_HARDWARE_ERROR. In particular 17409 * taking the default action of resetting the target may 17410 * not be the appropriate way to attempt recovery. 17411 * Resetting a target because of a single LUN failure 17412 * victimizes all LUNs on that target. 17413 * 17414 * This is true for the LSI arrays, if an LSI 17415 * array controller returns an ASC of 0x84 (LUN Dead) we 17416 * should trust it. 17417 */ 17418 17419 if (sense_key == KEY_HARDWARE_ERROR) { 17420 switch (asc) { 17421 case 0x84: 17422 if (SD_IS_LSI(un)) { 17423 try_resetting_target = B_FALSE; 17424 } 17425 break; 17426 default: 17427 break; 17428 } 17429 } 17430 17431 if (try_resetting_target == B_TRUE) { 17432 int reset_retval = 0; 17433 if (un->un_f_lun_reset_enabled == TRUE) { 17434 SD_TRACE(SD_LOG_IO_CORE, un, 17435 "sd_sense_key_medium_or_hardware_" 17436 "error: issuing RESET_LUN\n"); 17437 reset_retval = 17438 scsi_reset(SD_ADDRESS(un), 17439 RESET_LUN); 17440 } 17441 if (reset_retval == 0) { 17442 SD_TRACE(SD_LOG_IO_CORE, un, 17443 "sd_sense_key_medium_or_hardware_" 17444 "error: issuing RESET_TARGET\n"); 17445 (void) scsi_reset(SD_ADDRESS(un), 17446 RESET_TARGET); 17447 } 17448 } 17449 } 17450 mutex_enter(SD_MUTEX(un)); 17451 } 17452 17453 /* 17454 * This really ought to be a fatal error, but we will retry anyway 17455 * as some drives report this as a spurious error. 17456 */ 17457 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17458 &si, EIO, (clock_t)0, NULL); 17459 } 17460 17461 17462 17463 /* 17464 * Function: sd_sense_key_illegal_request 17465 * 17466 * Description: Recovery actions for a SCSI "Illegal Request" sense key. 17467 * 17468 * Context: May be called from interrupt context 17469 */ 17470 17471 static void 17472 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp, 17473 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17474 { 17475 struct sd_sense_info si; 17476 17477 ASSERT(un != NULL); 17478 ASSERT(mutex_owned(SD_MUTEX(un))); 17479 ASSERT(bp != NULL); 17480 ASSERT(xp != NULL); 17481 ASSERT(pktp != NULL); 17482 17483 SD_UPDATE_ERRSTATS(un, sd_softerrs); 17484 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err); 17485 17486 si.ssi_severity = SCSI_ERR_INFO; 17487 si.ssi_pfa_flag = FALSE; 17488 17489 /* Pointless to retry if the target thinks it's an illegal request */ 17490 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17491 sd_return_failed_command(un, bp, EIO); 17492 } 17493 17494 17495 17496 17497 /* 17498 * Function: sd_sense_key_unit_attention 17499 * 17500 * Description: Recovery actions for a SCSI "Unit Attention" sense key. 17501 * 17502 * Context: May be called from interrupt context 17503 */ 17504 17505 static void 17506 sd_sense_key_unit_attention(struct sd_lun *un, 17507 uint8_t asc, 17508 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17509 { 17510 /* 17511 * For UNIT ATTENTION we allow retries for one minute. Devices 17512 * like Sonoma can return UNIT ATTENTION close to a minute 17513 * under certain conditions. 17514 */ 17515 int retry_check_flag = SD_RETRIES_UA; 17516 struct sd_sense_info si; 17517 17518 ASSERT(un != NULL); 17519 ASSERT(mutex_owned(SD_MUTEX(un))); 17520 ASSERT(bp != NULL); 17521 ASSERT(xp != NULL); 17522 ASSERT(pktp != NULL); 17523 17524 si.ssi_severity = SCSI_ERR_INFO; 17525 si.ssi_pfa_flag = FALSE; 17526 17527 17528 switch (asc) { 17529 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */ 17530 if (sd_report_pfa != 0) { 17531 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err); 17532 si.ssi_pfa_flag = TRUE; 17533 retry_check_flag = SD_RETRIES_STANDARD; 17534 goto do_retry; 17535 } 17536 break; 17537 17538 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */ 17539 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 17540 un->un_resvd_status |= 17541 (SD_LOST_RESERVE | SD_WANT_RESERVE); 17542 } 17543 /* FALLTHRU */ 17544 17545 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */ 17546 if (!ISREMOVABLE(un)) { 17547 break; 17548 } 17549 17550 /* 17551 * When we get a unit attention from a removable-media device, 17552 * it may be in a state that will take a long time to recover 17553 * (e.g., from a reset). Since we are executing in interrupt 17554 * context here, we cannot wait around for the device to come 17555 * back. So hand this command off to sd_media_change_task() 17556 * for deferred processing under taskq thread context. (Note 17557 * that the command still may be failed if a problem is 17558 * encountered at a later time.) 17559 */ 17560 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp, 17561 KM_NOSLEEP) == 0) { 17562 /* 17563 * Cannot dispatch the request so fail the command. 17564 */ 17565 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17566 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17567 si.ssi_severity = SCSI_ERR_FATAL; 17568 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17569 sd_return_failed_command(un, bp, EIO); 17570 } 17571 /* 17572 * Either the command has been successfully dispatched to a 17573 * task Q for retrying, or the dispatch failed. In either case 17574 * do NOT retry again by calling sd_retry_command. This sets up 17575 * two retries of the same command and when one completes and 17576 * frees the resources the other will access freed memory, 17577 * a bad thing. 17578 */ 17579 return; 17580 17581 default: 17582 break; 17583 } 17584 17585 if (!ISREMOVABLE(un)) { 17586 /* 17587 * Do not update these here for removables. For removables 17588 * these stats are updated (1) above if we failed to dispatch 17589 * sd_media_change_task(), or (2) sd_media_change_task() may 17590 * update these later if it encounters an error. 17591 */ 17592 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17593 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 17594 } 17595 17596 do_retry: 17597 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si, 17598 EIO, SD_UA_RETRY_DELAY, NULL); 17599 } 17600 17601 17602 17603 /* 17604 * Function: sd_sense_key_fail_command 17605 * 17606 * Description: Use to fail a command when we don't like the sense key that 17607 * was returned. 17608 * 17609 * Context: May be called from interrupt context 17610 */ 17611 17612 static void 17613 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, 17614 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17615 { 17616 struct sd_sense_info si; 17617 17618 ASSERT(un != NULL); 17619 ASSERT(mutex_owned(SD_MUTEX(un))); 17620 ASSERT(bp != NULL); 17621 ASSERT(xp != NULL); 17622 ASSERT(pktp != NULL); 17623 17624 si.ssi_severity = SCSI_ERR_FATAL; 17625 si.ssi_pfa_flag = FALSE; 17626 17627 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17628 sd_return_failed_command(un, bp, EIO); 17629 } 17630 17631 17632 17633 /* 17634 * Function: sd_sense_key_blank_check 17635 * 17636 * Description: Recovery actions for a SCSI "Blank Check" sense key. 17637 * Has no monetary connotation. 17638 * 17639 * Context: May be called from interrupt context 17640 */ 17641 17642 static void 17643 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, 17644 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17645 { 17646 struct sd_sense_info si; 17647 17648 ASSERT(un != NULL); 17649 ASSERT(mutex_owned(SD_MUTEX(un))); 17650 ASSERT(bp != NULL); 17651 ASSERT(xp != NULL); 17652 ASSERT(pktp != NULL); 17653 17654 /* 17655 * Blank check is not fatal for removable devices, therefore 17656 * it does not require a console message. 17657 */ 17658 si.ssi_severity = (ISREMOVABLE(un)) ? SCSI_ERR_ALL : SCSI_ERR_FATAL; 17659 si.ssi_pfa_flag = FALSE; 17660 17661 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 17662 sd_return_failed_command(un, bp, EIO); 17663 } 17664 17665 17666 17667 17668 /* 17669 * Function: sd_sense_key_aborted_command 17670 * 17671 * Description: Recovery actions for a SCSI "Aborted Command" sense key. 17672 * 17673 * Context: May be called from interrupt context 17674 */ 17675 17676 static void 17677 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp, 17678 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17679 { 17680 struct sd_sense_info si; 17681 17682 ASSERT(un != NULL); 17683 ASSERT(mutex_owned(SD_MUTEX(un))); 17684 ASSERT(bp != NULL); 17685 ASSERT(xp != NULL); 17686 ASSERT(pktp != NULL); 17687 17688 si.ssi_severity = SCSI_ERR_FATAL; 17689 si.ssi_pfa_flag = FALSE; 17690 17691 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17692 17693 /* 17694 * This really ought to be a fatal error, but we will retry anyway 17695 * as some drives report this as a spurious error. 17696 */ 17697 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17698 &si, EIO, (clock_t)0, NULL); 17699 } 17700 17701 17702 17703 /* 17704 * Function: sd_sense_key_default 17705 * 17706 * Description: Default recovery action for several SCSI sense keys (basically 17707 * attempts a retry). 17708 * 17709 * Context: May be called from interrupt context 17710 */ 17711 17712 static void 17713 sd_sense_key_default(struct sd_lun *un, 17714 int sense_key, 17715 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp) 17716 { 17717 struct sd_sense_info si; 17718 17719 ASSERT(un != NULL); 17720 ASSERT(mutex_owned(SD_MUTEX(un))); 17721 ASSERT(bp != NULL); 17722 ASSERT(xp != NULL); 17723 ASSERT(pktp != NULL); 17724 17725 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17726 17727 /* 17728 * Undecoded sense key. Attempt retries and hope that will fix 17729 * the problem. Otherwise, we're dead. 17730 */ 17731 if ((pktp->pkt_flags & FLAG_SILENT) == 0) { 17732 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17733 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]); 17734 } 17735 17736 si.ssi_severity = SCSI_ERR_FATAL; 17737 si.ssi_pfa_flag = FALSE; 17738 17739 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg, 17740 &si, EIO, (clock_t)0, NULL); 17741 } 17742 17743 17744 17745 /* 17746 * Function: sd_print_retry_msg 17747 * 17748 * Description: Print a message indicating the retry action being taken. 17749 * 17750 * Arguments: un - ptr to associated softstate 17751 * bp - ptr to buf(9S) for the command 17752 * arg - not used. 17753 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17754 * or SD_NO_RETRY_ISSUED 17755 * 17756 * Context: May be called from interrupt context 17757 */ 17758 /* ARGSUSED */ 17759 static void 17760 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag) 17761 { 17762 struct sd_xbuf *xp; 17763 struct scsi_pkt *pktp; 17764 char *reasonp; 17765 char *msgp; 17766 17767 ASSERT(un != NULL); 17768 ASSERT(mutex_owned(SD_MUTEX(un))); 17769 ASSERT(bp != NULL); 17770 pktp = SD_GET_PKTP(bp); 17771 ASSERT(pktp != NULL); 17772 xp = SD_GET_XBUF(bp); 17773 ASSERT(xp != NULL); 17774 17775 ASSERT(!mutex_owned(&un->un_pm_mutex)); 17776 mutex_enter(&un->un_pm_mutex); 17777 if ((un->un_state == SD_STATE_SUSPENDED) || 17778 (SD_DEVICE_IS_IN_LOW_POWER(un)) || 17779 (pktp->pkt_flags & FLAG_SILENT)) { 17780 mutex_exit(&un->un_pm_mutex); 17781 goto update_pkt_reason; 17782 } 17783 mutex_exit(&un->un_pm_mutex); 17784 17785 /* 17786 * Suppress messages if they are all the same pkt_reason; with 17787 * TQ, many (up to 256) are returned with the same pkt_reason. 17788 * If we are in panic, then suppress the retry messages. 17789 */ 17790 switch (flag) { 17791 case SD_NO_RETRY_ISSUED: 17792 msgp = "giving up"; 17793 break; 17794 case SD_IMMEDIATE_RETRY_ISSUED: 17795 case SD_DELAYED_RETRY_ISSUED: 17796 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) || 17797 ((pktp->pkt_reason == un->un_last_pkt_reason) && 17798 (sd_error_level != SCSI_ERR_ALL))) { 17799 return; 17800 } 17801 msgp = "retrying command"; 17802 break; 17803 default: 17804 goto update_pkt_reason; 17805 } 17806 17807 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" : 17808 scsi_rname(pktp->pkt_reason)); 17809 17810 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17811 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp); 17812 17813 update_pkt_reason: 17814 /* 17815 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason. 17816 * This is to prevent multiple console messages for the same failure 17817 * condition. Note that un->un_last_pkt_reason is NOT restored if & 17818 * when the command is retried successfully because there still may be 17819 * more commands coming back with the same value of pktp->pkt_reason. 17820 */ 17821 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) { 17822 un->un_last_pkt_reason = pktp->pkt_reason; 17823 } 17824 } 17825 17826 17827 /* 17828 * Function: sd_print_cmd_incomplete_msg 17829 * 17830 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason. 17831 * 17832 * Arguments: un - ptr to associated softstate 17833 * bp - ptr to buf(9S) for the command 17834 * arg - passed to sd_print_retry_msg() 17835 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED, 17836 * or SD_NO_RETRY_ISSUED 17837 * 17838 * Context: May be called from interrupt context 17839 */ 17840 17841 static void 17842 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, 17843 int code) 17844 { 17845 dev_info_t *dip; 17846 17847 ASSERT(un != NULL); 17848 ASSERT(mutex_owned(SD_MUTEX(un))); 17849 ASSERT(bp != NULL); 17850 17851 switch (code) { 17852 case SD_NO_RETRY_ISSUED: 17853 /* Command was failed. Someone turned off this target? */ 17854 if (un->un_state != SD_STATE_OFFLINE) { 17855 /* 17856 * Suppress message if we are detaching and 17857 * device has been disconnected 17858 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation 17859 * private interface and not part of the DDI 17860 */ 17861 dip = un->un_sd->sd_dev; 17862 if (!(DEVI_IS_DETACHING(dip) && 17863 DEVI_IS_DEVICE_REMOVED(dip))) { 17864 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 17865 "disk not responding to selection\n"); 17866 } 17867 New_state(un, SD_STATE_OFFLINE); 17868 } 17869 break; 17870 17871 case SD_DELAYED_RETRY_ISSUED: 17872 case SD_IMMEDIATE_RETRY_ISSUED: 17873 default: 17874 /* Command was successfully queued for retry */ 17875 sd_print_retry_msg(un, bp, arg, code); 17876 break; 17877 } 17878 } 17879 17880 17881 /* 17882 * Function: sd_pkt_reason_cmd_incomplete 17883 * 17884 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason. 17885 * 17886 * Context: May be called from interrupt context 17887 */ 17888 17889 static void 17890 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp, 17891 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17892 { 17893 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE; 17894 17895 ASSERT(un != NULL); 17896 ASSERT(mutex_owned(SD_MUTEX(un))); 17897 ASSERT(bp != NULL); 17898 ASSERT(xp != NULL); 17899 ASSERT(pktp != NULL); 17900 17901 /* Do not do a reset if selection did not complete */ 17902 /* Note: Should this not just check the bit? */ 17903 if (pktp->pkt_state != STATE_GOT_BUS) { 17904 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17905 sd_reset_target(un, pktp); 17906 } 17907 17908 /* 17909 * If the target was not successfully selected, then set 17910 * SD_RETRIES_FAILFAST to indicate that we lost communication 17911 * with the target, and further retries and/or commands are 17912 * likely to take a long time. 17913 */ 17914 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) { 17915 flag |= SD_RETRIES_FAILFAST; 17916 } 17917 17918 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17919 17920 sd_retry_command(un, bp, flag, 17921 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17922 } 17923 17924 17925 17926 /* 17927 * Function: sd_pkt_reason_cmd_tran_err 17928 * 17929 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason. 17930 * 17931 * Context: May be called from interrupt context 17932 */ 17933 17934 static void 17935 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp, 17936 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17937 { 17938 ASSERT(un != NULL); 17939 ASSERT(mutex_owned(SD_MUTEX(un))); 17940 ASSERT(bp != NULL); 17941 ASSERT(xp != NULL); 17942 ASSERT(pktp != NULL); 17943 17944 /* 17945 * Do not reset if we got a parity error, or if 17946 * selection did not complete. 17947 */ 17948 SD_UPDATE_ERRSTATS(un, sd_harderrs); 17949 /* Note: Should this not just check the bit for pkt_state? */ 17950 if (((pktp->pkt_statistics & STAT_PERR) == 0) && 17951 (pktp->pkt_state != STATE_GOT_BUS)) { 17952 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17953 sd_reset_target(un, pktp); 17954 } 17955 17956 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17957 17958 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 17959 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17960 } 17961 17962 17963 17964 /* 17965 * Function: sd_pkt_reason_cmd_reset 17966 * 17967 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason. 17968 * 17969 * Context: May be called from interrupt context 17970 */ 17971 17972 static void 17973 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, 17974 struct sd_xbuf *xp, struct scsi_pkt *pktp) 17975 { 17976 ASSERT(un != NULL); 17977 ASSERT(mutex_owned(SD_MUTEX(un))); 17978 ASSERT(bp != NULL); 17979 ASSERT(xp != NULL); 17980 ASSERT(pktp != NULL); 17981 17982 /* The target may still be running the command, so try to reset. */ 17983 SD_UPDATE_ERRSTATS(un, sd_transerrs); 17984 sd_reset_target(un, pktp); 17985 17986 SD_UPDATE_RESERVATION_STATUS(un, pktp); 17987 17988 /* 17989 * If pkt_reason is CMD_RESET chances are that this pkt got 17990 * reset because another target on this bus caused it. The target 17991 * that caused it should get CMD_TIMEOUT with pkt_statistics 17992 * of STAT_TIMEOUT/STAT_DEV_RESET. 17993 */ 17994 17995 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 17996 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 17997 } 17998 17999 18000 18001 18002 /* 18003 * Function: sd_pkt_reason_cmd_aborted 18004 * 18005 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason. 18006 * 18007 * Context: May be called from interrupt context 18008 */ 18009 18010 static void 18011 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, 18012 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18013 { 18014 ASSERT(un != NULL); 18015 ASSERT(mutex_owned(SD_MUTEX(un))); 18016 ASSERT(bp != NULL); 18017 ASSERT(xp != NULL); 18018 ASSERT(pktp != NULL); 18019 18020 /* The target may still be running the command, so try to reset. */ 18021 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18022 sd_reset_target(un, pktp); 18023 18024 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18025 18026 /* 18027 * If pkt_reason is CMD_ABORTED chances are that this pkt got 18028 * aborted because another target on this bus caused it. The target 18029 * that caused it should get CMD_TIMEOUT with pkt_statistics 18030 * of STAT_TIMEOUT/STAT_DEV_RESET. 18031 */ 18032 18033 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE), 18034 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18035 } 18036 18037 18038 18039 /* 18040 * Function: sd_pkt_reason_cmd_timeout 18041 * 18042 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason. 18043 * 18044 * Context: May be called from interrupt context 18045 */ 18046 18047 static void 18048 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, 18049 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18050 { 18051 ASSERT(un != NULL); 18052 ASSERT(mutex_owned(SD_MUTEX(un))); 18053 ASSERT(bp != NULL); 18054 ASSERT(xp != NULL); 18055 ASSERT(pktp != NULL); 18056 18057 18058 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18059 sd_reset_target(un, pktp); 18060 18061 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18062 18063 /* 18064 * A command timeout indicates that we could not establish 18065 * communication with the target, so set SD_RETRIES_FAILFAST 18066 * as further retries/commands are likely to take a long time. 18067 */ 18068 sd_retry_command(un, bp, 18069 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST), 18070 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18071 } 18072 18073 18074 18075 /* 18076 * Function: sd_pkt_reason_cmd_unx_bus_free 18077 * 18078 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason. 18079 * 18080 * Context: May be called from interrupt context 18081 */ 18082 18083 static void 18084 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp, 18085 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18086 { 18087 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code); 18088 18089 ASSERT(un != NULL); 18090 ASSERT(mutex_owned(SD_MUTEX(un))); 18091 ASSERT(bp != NULL); 18092 ASSERT(xp != NULL); 18093 ASSERT(pktp != NULL); 18094 18095 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18096 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18097 18098 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ? 18099 sd_print_retry_msg : NULL; 18100 18101 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18102 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18103 } 18104 18105 18106 /* 18107 * Function: sd_pkt_reason_cmd_tag_reject 18108 * 18109 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason. 18110 * 18111 * Context: May be called from interrupt context 18112 */ 18113 18114 static void 18115 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp, 18116 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18117 { 18118 ASSERT(un != NULL); 18119 ASSERT(mutex_owned(SD_MUTEX(un))); 18120 ASSERT(bp != NULL); 18121 ASSERT(xp != NULL); 18122 ASSERT(pktp != NULL); 18123 18124 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18125 pktp->pkt_flags = 0; 18126 un->un_tagflags = 0; 18127 if (un->un_f_opt_queueing == TRUE) { 18128 un->un_throttle = min(un->un_throttle, 3); 18129 } else { 18130 un->un_throttle = 1; 18131 } 18132 mutex_exit(SD_MUTEX(un)); 18133 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 18134 mutex_enter(SD_MUTEX(un)); 18135 18136 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18137 18138 /* Legacy behavior not to check retry counts here. */ 18139 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE), 18140 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18141 } 18142 18143 18144 /* 18145 * Function: sd_pkt_reason_default 18146 * 18147 * Description: Default recovery actions for SCSA pkt_reason values that 18148 * do not have more explicit recovery actions. 18149 * 18150 * Context: May be called from interrupt context 18151 */ 18152 18153 static void 18154 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, 18155 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18156 { 18157 ASSERT(un != NULL); 18158 ASSERT(mutex_owned(SD_MUTEX(un))); 18159 ASSERT(bp != NULL); 18160 ASSERT(xp != NULL); 18161 ASSERT(pktp != NULL); 18162 18163 SD_UPDATE_ERRSTATS(un, sd_transerrs); 18164 sd_reset_target(un, pktp); 18165 18166 SD_UPDATE_RESERVATION_STATUS(un, pktp); 18167 18168 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE), 18169 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL); 18170 } 18171 18172 18173 18174 /* 18175 * Function: sd_pkt_status_check_condition 18176 * 18177 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status. 18178 * 18179 * Context: May be called from interrupt context 18180 */ 18181 18182 static void 18183 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp, 18184 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18185 { 18186 ASSERT(un != NULL); 18187 ASSERT(mutex_owned(SD_MUTEX(un))); 18188 ASSERT(bp != NULL); 18189 ASSERT(xp != NULL); 18190 ASSERT(pktp != NULL); 18191 18192 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: " 18193 "entry: buf:0x%p xp:0x%p\n", bp, xp); 18194 18195 /* 18196 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the 18197 * command will be retried after the request sense). Otherwise, retry 18198 * the command. Note: we are issuing the request sense even though the 18199 * retry limit may have been reached for the failed command. 18200 */ 18201 if (un->un_f_arq_enabled == FALSE) { 18202 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18203 "no ARQ, sending request sense command\n"); 18204 sd_send_request_sense_command(un, bp, pktp); 18205 } else { 18206 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: " 18207 "ARQ,retrying request sense command\n"); 18208 #if defined(__i386) || defined(__amd64) 18209 /* 18210 * The SD_RETRY_DELAY value need to be adjusted here 18211 * when SD_RETRY_DELAY change in sddef.h 18212 */ 18213 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 0, 18214 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, 18215 NULL); 18216 #else 18217 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, 18218 0, SD_RETRY_DELAY, NULL); 18219 #endif 18220 } 18221 18222 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n"); 18223 } 18224 18225 18226 /* 18227 * Function: sd_pkt_status_busy 18228 * 18229 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status. 18230 * 18231 * Context: May be called from interrupt context 18232 */ 18233 18234 static void 18235 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp, 18236 struct scsi_pkt *pktp) 18237 { 18238 ASSERT(un != NULL); 18239 ASSERT(mutex_owned(SD_MUTEX(un))); 18240 ASSERT(bp != NULL); 18241 ASSERT(xp != NULL); 18242 ASSERT(pktp != NULL); 18243 18244 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18245 "sd_pkt_status_busy: entry\n"); 18246 18247 /* If retries are exhausted, just fail the command. */ 18248 if (xp->xb_retry_count >= un->un_busy_retry_count) { 18249 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 18250 "device busy too long\n"); 18251 sd_return_failed_command(un, bp, EIO); 18252 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18253 "sd_pkt_status_busy: exit\n"); 18254 return; 18255 } 18256 xp->xb_retry_count++; 18257 18258 /* 18259 * Try to reset the target. However, we do not want to perform 18260 * more than one reset if the device continues to fail. The reset 18261 * will be performed when the retry count reaches the reset 18262 * threshold. This threshold should be set such that at least 18263 * one retry is issued before the reset is performed. 18264 */ 18265 if (xp->xb_retry_count == 18266 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) { 18267 int rval = 0; 18268 mutex_exit(SD_MUTEX(un)); 18269 if (un->un_f_allow_bus_device_reset == TRUE) { 18270 /* 18271 * First try to reset the LUN; if we cannot then 18272 * try to reset the target. 18273 */ 18274 if (un->un_f_lun_reset_enabled == TRUE) { 18275 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18276 "sd_pkt_status_busy: RESET_LUN\n"); 18277 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18278 } 18279 if (rval == 0) { 18280 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18281 "sd_pkt_status_busy: RESET_TARGET\n"); 18282 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18283 } 18284 } 18285 if (rval == 0) { 18286 /* 18287 * If the RESET_LUN and/or RESET_TARGET failed, 18288 * try RESET_ALL 18289 */ 18290 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18291 "sd_pkt_status_busy: RESET_ALL\n"); 18292 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL); 18293 } 18294 mutex_enter(SD_MUTEX(un)); 18295 if (rval == 0) { 18296 /* 18297 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed. 18298 * At this point we give up & fail the command. 18299 */ 18300 sd_return_failed_command(un, bp, EIO); 18301 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18302 "sd_pkt_status_busy: exit (failed cmd)\n"); 18303 return; 18304 } 18305 } 18306 18307 /* 18308 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as 18309 * we have already checked the retry counts above. 18310 */ 18311 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 18312 EIO, SD_BSY_TIMEOUT, NULL); 18313 18314 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18315 "sd_pkt_status_busy: exit\n"); 18316 } 18317 18318 18319 /* 18320 * Function: sd_pkt_status_reservation_conflict 18321 * 18322 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI 18323 * command status. 18324 * 18325 * Context: May be called from interrupt context 18326 */ 18327 18328 static void 18329 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp, 18330 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18331 { 18332 ASSERT(un != NULL); 18333 ASSERT(mutex_owned(SD_MUTEX(un))); 18334 ASSERT(bp != NULL); 18335 ASSERT(xp != NULL); 18336 ASSERT(pktp != NULL); 18337 18338 /* 18339 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation 18340 * conflict could be due to various reasons like incorrect keys, not 18341 * registered or not reserved etc. So, we return EACCES to the caller. 18342 */ 18343 if (un->un_reservation_type == SD_SCSI3_RESERVATION) { 18344 int cmd = SD_GET_PKT_OPCODE(pktp); 18345 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) || 18346 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) { 18347 sd_return_failed_command(un, bp, EACCES); 18348 return; 18349 } 18350 } 18351 18352 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 18353 18354 if ((un->un_resvd_status & SD_FAILFAST) != 0) { 18355 if (sd_failfast_enable != 0) { 18356 /* By definition, we must panic here.... */ 18357 panic("Reservation Conflict"); 18358 /*NOTREACHED*/ 18359 } 18360 SD_ERROR(SD_LOG_IO, un, 18361 "sd_handle_resv_conflict: Disk Reserved\n"); 18362 sd_return_failed_command(un, bp, EACCES); 18363 return; 18364 } 18365 18366 /* 18367 * 1147670: retry only if sd_retry_on_reservation_conflict 18368 * property is set (default is 1). Retries will not succeed 18369 * on a disk reserved by another initiator. HA systems 18370 * may reset this via sd.conf to avoid these retries. 18371 * 18372 * Note: The legacy return code for this failure is EIO, however EACCES 18373 * seems more appropriate for a reservation conflict. 18374 */ 18375 if (sd_retry_on_reservation_conflict == 0) { 18376 SD_ERROR(SD_LOG_IO, un, 18377 "sd_handle_resv_conflict: Device Reserved\n"); 18378 sd_return_failed_command(un, bp, EIO); 18379 return; 18380 } 18381 18382 /* 18383 * Retry the command if we can. 18384 * 18385 * Note: The legacy return code for this failure is EIO, however EACCES 18386 * seems more appropriate for a reservation conflict. 18387 */ 18388 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO, 18389 (clock_t)2, NULL); 18390 } 18391 18392 18393 18394 /* 18395 * Function: sd_pkt_status_qfull 18396 * 18397 * Description: Handle a QUEUE FULL condition from the target. This can 18398 * occur if the HBA does not handle the queue full condition. 18399 * (Basically this means third-party HBAs as Sun HBAs will 18400 * handle the queue full condition.) Note that if there are 18401 * some commands already in the transport, then the queue full 18402 * has occurred because the queue for this nexus is actually 18403 * full. If there are no commands in the transport, then the 18404 * queue full is resulting from some other initiator or lun 18405 * consuming all the resources at the target. 18406 * 18407 * Context: May be called from interrupt context 18408 */ 18409 18410 static void 18411 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, 18412 struct sd_xbuf *xp, struct scsi_pkt *pktp) 18413 { 18414 ASSERT(un != NULL); 18415 ASSERT(mutex_owned(SD_MUTEX(un))); 18416 ASSERT(bp != NULL); 18417 ASSERT(xp != NULL); 18418 ASSERT(pktp != NULL); 18419 18420 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18421 "sd_pkt_status_qfull: entry\n"); 18422 18423 /* 18424 * Just lower the QFULL throttle and retry the command. Note that 18425 * we do not limit the number of retries here. 18426 */ 18427 sd_reduce_throttle(un, SD_THROTTLE_QFULL); 18428 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0, 18429 SD_RESTART_TIMEOUT, NULL); 18430 18431 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18432 "sd_pkt_status_qfull: exit\n"); 18433 } 18434 18435 18436 /* 18437 * Function: sd_reset_target 18438 * 18439 * Description: Issue a scsi_reset(9F), with either RESET_LUN, 18440 * RESET_TARGET, or RESET_ALL. 18441 * 18442 * Context: May be called under interrupt context. 18443 */ 18444 18445 static void 18446 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp) 18447 { 18448 int rval = 0; 18449 18450 ASSERT(un != NULL); 18451 ASSERT(mutex_owned(SD_MUTEX(un))); 18452 ASSERT(pktp != NULL); 18453 18454 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n"); 18455 18456 /* 18457 * No need to reset if the transport layer has already done so. 18458 */ 18459 if ((pktp->pkt_statistics & 18460 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) { 18461 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18462 "sd_reset_target: no reset\n"); 18463 return; 18464 } 18465 18466 mutex_exit(SD_MUTEX(un)); 18467 18468 if (un->un_f_allow_bus_device_reset == TRUE) { 18469 if (un->un_f_lun_reset_enabled == TRUE) { 18470 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18471 "sd_reset_target: RESET_LUN\n"); 18472 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 18473 } 18474 if (rval == 0) { 18475 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18476 "sd_reset_target: RESET_TARGET\n"); 18477 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 18478 } 18479 } 18480 18481 if (rval == 0) { 18482 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 18483 "sd_reset_target: RESET_ALL\n"); 18484 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 18485 } 18486 18487 mutex_enter(SD_MUTEX(un)); 18488 18489 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n"); 18490 } 18491 18492 18493 /* 18494 * Function: sd_media_change_task 18495 * 18496 * Description: Recovery action for CDROM to become available. 18497 * 18498 * Context: Executes in a taskq() thread context 18499 */ 18500 18501 static void 18502 sd_media_change_task(void *arg) 18503 { 18504 struct scsi_pkt *pktp = arg; 18505 struct sd_lun *un; 18506 struct buf *bp; 18507 struct sd_xbuf *xp; 18508 int err = 0; 18509 int retry_count = 0; 18510 int retry_limit = SD_UNIT_ATTENTION_RETRY/10; 18511 struct sd_sense_info si; 18512 18513 ASSERT(pktp != NULL); 18514 bp = (struct buf *)pktp->pkt_private; 18515 ASSERT(bp != NULL); 18516 xp = SD_GET_XBUF(bp); 18517 ASSERT(xp != NULL); 18518 un = SD_GET_UN(bp); 18519 ASSERT(un != NULL); 18520 ASSERT(!mutex_owned(SD_MUTEX(un))); 18521 ASSERT(ISREMOVABLE(un)); 18522 18523 si.ssi_severity = SCSI_ERR_INFO; 18524 si.ssi_pfa_flag = FALSE; 18525 18526 /* 18527 * When a reset is issued on a CDROM, it takes a long time to 18528 * recover. First few attempts to read capacity and other things 18529 * related to handling unit attention fail (with a ASC 0x4 and 18530 * ASCQ 0x1). In that case we want to do enough retries and we want 18531 * to limit the retries in other cases of genuine failures like 18532 * no media in drive. 18533 */ 18534 while (retry_count++ < retry_limit) { 18535 if ((err = sd_handle_mchange(un)) == 0) { 18536 break; 18537 } 18538 if (err == EAGAIN) { 18539 retry_limit = SD_UNIT_ATTENTION_RETRY; 18540 } 18541 /* Sleep for 0.5 sec. & try again */ 18542 delay(drv_usectohz(500000)); 18543 } 18544 18545 /* 18546 * Dispatch (retry or fail) the original command here, 18547 * along with appropriate console messages.... 18548 * 18549 * Must grab the mutex before calling sd_retry_command, 18550 * sd_print_sense_msg and sd_return_failed_command. 18551 */ 18552 mutex_enter(SD_MUTEX(un)); 18553 if (err != SD_CMD_SUCCESS) { 18554 SD_UPDATE_ERRSTATS(un, sd_harderrs); 18555 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err); 18556 si.ssi_severity = SCSI_ERR_FATAL; 18557 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED); 18558 sd_return_failed_command(un, bp, EIO); 18559 } else { 18560 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg, 18561 &si, EIO, (clock_t)0, NULL); 18562 } 18563 mutex_exit(SD_MUTEX(un)); 18564 } 18565 18566 18567 18568 /* 18569 * Function: sd_handle_mchange 18570 * 18571 * Description: Perform geometry validation & other recovery when CDROM 18572 * has been removed from drive. 18573 * 18574 * Return Code: 0 for success 18575 * errno-type return code of either sd_send_scsi_DOORLOCK() or 18576 * sd_send_scsi_READ_CAPACITY() 18577 * 18578 * Context: Executes in a taskq() thread context 18579 */ 18580 18581 static int 18582 sd_handle_mchange(struct sd_lun *un) 18583 { 18584 uint64_t capacity; 18585 uint32_t lbasize; 18586 int rval; 18587 18588 ASSERT(!mutex_owned(SD_MUTEX(un))); 18589 ASSERT(ISREMOVABLE(un)); 18590 18591 if ((rval = sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 18592 SD_PATH_DIRECT_PRIORITY)) != 0) { 18593 return (rval); 18594 } 18595 18596 mutex_enter(SD_MUTEX(un)); 18597 sd_update_block_info(un, lbasize, capacity); 18598 18599 if (un->un_errstats != NULL) { 18600 struct sd_errstats *stp = 18601 (struct sd_errstats *)un->un_errstats->ks_data; 18602 stp->sd_capacity.value.ui64 = (uint64_t) 18603 ((uint64_t)un->un_blockcount * 18604 (uint64_t)un->un_tgt_blocksize); 18605 } 18606 18607 /* 18608 * Note: Maybe let the strategy/partitioning chain worry about getting 18609 * valid geometry. 18610 */ 18611 un->un_f_geometry_is_valid = FALSE; 18612 (void) sd_validate_geometry(un, SD_PATH_DIRECT_PRIORITY); 18613 if (un->un_f_geometry_is_valid == FALSE) { 18614 mutex_exit(SD_MUTEX(un)); 18615 return (EIO); 18616 } 18617 18618 mutex_exit(SD_MUTEX(un)); 18619 18620 /* 18621 * Try to lock the door 18622 */ 18623 return (sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 18624 SD_PATH_DIRECT_PRIORITY)); 18625 } 18626 18627 18628 /* 18629 * Function: sd_send_scsi_DOORLOCK 18630 * 18631 * Description: Issue the scsi DOOR LOCK command 18632 * 18633 * Arguments: un - pointer to driver soft state (unit) structure for 18634 * this target. 18635 * flag - SD_REMOVAL_ALLOW 18636 * SD_REMOVAL_PREVENT 18637 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18638 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18639 * to use the USCSI "direct" chain and bypass the normal 18640 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18641 * command is issued as part of an error recovery action. 18642 * 18643 * Return Code: 0 - Success 18644 * errno return code from sd_send_scsi_cmd() 18645 * 18646 * Context: Can sleep. 18647 */ 18648 18649 static int 18650 sd_send_scsi_DOORLOCK(struct sd_lun *un, int flag, int path_flag) 18651 { 18652 union scsi_cdb cdb; 18653 struct uscsi_cmd ucmd_buf; 18654 struct scsi_extended_sense sense_buf; 18655 int status; 18656 18657 ASSERT(un != NULL); 18658 ASSERT(!mutex_owned(SD_MUTEX(un))); 18659 18660 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un); 18661 18662 /* already determined doorlock is not supported, fake success */ 18663 if (un->un_f_doorlock_supported == FALSE) { 18664 return (0); 18665 } 18666 18667 bzero(&cdb, sizeof (cdb)); 18668 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18669 18670 cdb.scc_cmd = SCMD_DOORLOCK; 18671 cdb.cdb_opaque[4] = (uchar_t)flag; 18672 18673 ucmd_buf.uscsi_cdb = (char *)&cdb; 18674 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 18675 ucmd_buf.uscsi_bufaddr = NULL; 18676 ucmd_buf.uscsi_buflen = 0; 18677 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18678 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18679 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 18680 ucmd_buf.uscsi_timeout = 15; 18681 18682 SD_TRACE(SD_LOG_IO, un, 18683 "sd_send_scsi_DOORLOCK: returning sd_send_scsi_cmd()\n"); 18684 18685 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18686 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18687 18688 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) && 18689 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18690 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 18691 /* fake success and skip subsequent doorlock commands */ 18692 un->un_f_doorlock_supported = FALSE; 18693 return (0); 18694 } 18695 18696 return (status); 18697 } 18698 18699 18700 /* 18701 * Function: sd_send_scsi_READ_CAPACITY 18702 * 18703 * Description: This routine uses the scsi READ CAPACITY command to determine 18704 * the device capacity in number of blocks and the device native 18705 * block size. If this function returns a failure, then the 18706 * values in *capp and *lbap are undefined. If the capacity 18707 * returned is 0xffffffff then the lun is too large for a 18708 * normal READ CAPACITY command and the results of a 18709 * READ CAPACITY 16 will be used instead. 18710 * 18711 * Arguments: un - ptr to soft state struct for the target 18712 * capp - ptr to unsigned 64-bit variable to receive the 18713 * capacity value from the command. 18714 * lbap - ptr to unsigned 32-bit varaible to receive the 18715 * block size value from the command 18716 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18717 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18718 * to use the USCSI "direct" chain and bypass the normal 18719 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 18720 * command is issued as part of an error recovery action. 18721 * 18722 * Return Code: 0 - Success 18723 * EIO - IO error 18724 * EACCES - Reservation conflict detected 18725 * EAGAIN - Device is becoming ready 18726 * errno return code from sd_send_scsi_cmd() 18727 * 18728 * Context: Can sleep. Blocks until command completes. 18729 */ 18730 18731 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity) 18732 18733 static int 18734 sd_send_scsi_READ_CAPACITY(struct sd_lun *un, uint64_t *capp, uint32_t *lbap, 18735 int path_flag) 18736 { 18737 struct scsi_extended_sense sense_buf; 18738 struct uscsi_cmd ucmd_buf; 18739 union scsi_cdb cdb; 18740 uint32_t *capacity_buf; 18741 uint64_t capacity; 18742 uint32_t lbasize; 18743 int status; 18744 18745 ASSERT(un != NULL); 18746 ASSERT(!mutex_owned(SD_MUTEX(un))); 18747 ASSERT(capp != NULL); 18748 ASSERT(lbap != NULL); 18749 18750 SD_TRACE(SD_LOG_IO, un, 18751 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18752 18753 /* 18754 * First send a READ_CAPACITY command to the target. 18755 * (This command is mandatory under SCSI-2.) 18756 * 18757 * Set up the CDB for the READ_CAPACITY command. The Partial 18758 * Medium Indicator bit is cleared. The address field must be 18759 * zero if the PMI bit is zero. 18760 */ 18761 bzero(&cdb, sizeof (cdb)); 18762 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18763 18764 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP); 18765 18766 cdb.scc_cmd = SCMD_READ_CAPACITY; 18767 18768 ucmd_buf.uscsi_cdb = (char *)&cdb; 18769 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 18770 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf; 18771 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE; 18772 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18773 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18774 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18775 ucmd_buf.uscsi_timeout = 60; 18776 18777 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18778 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18779 18780 switch (status) { 18781 case 0: 18782 /* Return failure if we did not get valid capacity data. */ 18783 if (ucmd_buf.uscsi_resid != 0) { 18784 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18785 return (EIO); 18786 } 18787 18788 /* 18789 * Read capacity and block size from the READ CAPACITY 10 data. 18790 * This data may be adjusted later due to device specific 18791 * issues. 18792 * 18793 * According to the SCSI spec, the READ CAPACITY 10 18794 * command returns the following: 18795 * 18796 * bytes 0-3: Maximum logical block address available. 18797 * (MSB in byte:0 & LSB in byte:3) 18798 * 18799 * bytes 4-7: Block length in bytes 18800 * (MSB in byte:4 & LSB in byte:7) 18801 * 18802 */ 18803 capacity = BE_32(capacity_buf[0]); 18804 lbasize = BE_32(capacity_buf[1]); 18805 18806 /* 18807 * Done with capacity_buf 18808 */ 18809 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18810 18811 /* 18812 * if the reported capacity is set to all 0xf's, then 18813 * this disk is too large and requires SBC-2 commands. 18814 * Reissue the request using READ CAPACITY 16. 18815 */ 18816 if (capacity == 0xffffffff) { 18817 status = sd_send_scsi_READ_CAPACITY_16(un, &capacity, 18818 &lbasize, path_flag); 18819 if (status != 0) { 18820 return (status); 18821 } 18822 } 18823 break; /* Success! */ 18824 case EIO: 18825 switch (ucmd_buf.uscsi_status) { 18826 case STATUS_RESERVATION_CONFLICT: 18827 status = EACCES; 18828 break; 18829 case STATUS_CHECK: 18830 /* 18831 * Check condition; look for ASC/ASCQ of 0x04/0x01 18832 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 18833 */ 18834 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 18835 (sense_buf.es_add_code == 0x04) && 18836 (sense_buf.es_qual_code == 0x01)) { 18837 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18838 return (EAGAIN); 18839 } 18840 break; 18841 default: 18842 break; 18843 } 18844 /* FALLTHRU */ 18845 default: 18846 kmem_free(capacity_buf, SD_CAPACITY_SIZE); 18847 return (status); 18848 } 18849 18850 /* 18851 * Some ATAPI CD-ROM drives report inaccurate LBA size values 18852 * (2352 and 0 are common) so for these devices always force the value 18853 * to 2048 as required by the ATAPI specs. 18854 */ 18855 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) { 18856 lbasize = 2048; 18857 } 18858 18859 /* 18860 * Get the maximum LBA value from the READ CAPACITY data. 18861 * Here we assume that the Partial Medium Indicator (PMI) bit 18862 * was cleared when issuing the command. This means that the LBA 18863 * returned from the device is the LBA of the last logical block 18864 * on the logical unit. The actual logical block count will be 18865 * this value plus one. 18866 * 18867 * Currently the capacity is saved in terms of un->un_sys_blocksize, 18868 * so scale the capacity value to reflect this. 18869 */ 18870 capacity = (capacity + 1) * (lbasize / un->un_sys_blocksize); 18871 18872 #if defined(__i386) || defined(__amd64) 18873 /* 18874 * On x86, compensate for off-by-1 error (number of sectors on 18875 * media) (1175930) 18876 */ 18877 if (!ISREMOVABLE(un) && (lbasize == un->un_sys_blocksize)) { 18878 capacity -= 1; 18879 } 18880 #endif 18881 18882 /* 18883 * Copy the values from the READ CAPACITY command into the space 18884 * provided by the caller. 18885 */ 18886 *capp = capacity; 18887 *lbap = lbasize; 18888 18889 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: " 18890 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 18891 18892 /* 18893 * Both the lbasize and capacity from the device must be nonzero, 18894 * otherwise we assume that the values are not valid and return 18895 * failure to the caller. (4203735) 18896 */ 18897 if ((capacity == 0) || (lbasize == 0)) { 18898 return (EIO); 18899 } 18900 18901 return (0); 18902 } 18903 18904 /* 18905 * Function: sd_send_scsi_READ_CAPACITY_16 18906 * 18907 * Description: This routine uses the scsi READ CAPACITY 16 command to 18908 * determine the device capacity in number of blocks and the 18909 * device native block size. If this function returns a failure, 18910 * then the values in *capp and *lbap are undefined. 18911 * This routine should always be called by 18912 * sd_send_scsi_READ_CAPACITY which will appy any device 18913 * specific adjustments to capacity and lbasize. 18914 * 18915 * Arguments: un - ptr to soft state struct for the target 18916 * capp - ptr to unsigned 64-bit variable to receive the 18917 * capacity value from the command. 18918 * lbap - ptr to unsigned 32-bit varaible to receive the 18919 * block size value from the command 18920 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 18921 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 18922 * to use the USCSI "direct" chain and bypass the normal 18923 * command waitq. SD_PATH_DIRECT_PRIORITY is used when 18924 * this command is issued as part of an error recovery 18925 * action. 18926 * 18927 * Return Code: 0 - Success 18928 * EIO - IO error 18929 * EACCES - Reservation conflict detected 18930 * EAGAIN - Device is becoming ready 18931 * errno return code from sd_send_scsi_cmd() 18932 * 18933 * Context: Can sleep. Blocks until command completes. 18934 */ 18935 18936 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16) 18937 18938 static int 18939 sd_send_scsi_READ_CAPACITY_16(struct sd_lun *un, uint64_t *capp, 18940 uint32_t *lbap, int path_flag) 18941 { 18942 struct scsi_extended_sense sense_buf; 18943 struct uscsi_cmd ucmd_buf; 18944 union scsi_cdb cdb; 18945 uint64_t *capacity16_buf; 18946 uint64_t capacity; 18947 uint32_t lbasize; 18948 int status; 18949 18950 ASSERT(un != NULL); 18951 ASSERT(!mutex_owned(SD_MUTEX(un))); 18952 ASSERT(capp != NULL); 18953 ASSERT(lbap != NULL); 18954 18955 SD_TRACE(SD_LOG_IO, un, 18956 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un); 18957 18958 /* 18959 * First send a READ_CAPACITY_16 command to the target. 18960 * 18961 * Set up the CDB for the READ_CAPACITY_16 command. The Partial 18962 * Medium Indicator bit is cleared. The address field must be 18963 * zero if the PMI bit is zero. 18964 */ 18965 bzero(&cdb, sizeof (cdb)); 18966 bzero(&ucmd_buf, sizeof (ucmd_buf)); 18967 18968 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP); 18969 18970 ucmd_buf.uscsi_cdb = (char *)&cdb; 18971 ucmd_buf.uscsi_cdblen = CDB_GROUP4; 18972 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf; 18973 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE; 18974 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 18975 ucmd_buf.uscsi_rqlen = sizeof (sense_buf); 18976 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 18977 ucmd_buf.uscsi_timeout = 60; 18978 18979 /* 18980 * Read Capacity (16) is a Service Action In command. One 18981 * command byte (0x9E) is overloaded for multiple operations, 18982 * with the second CDB byte specifying the desired operation 18983 */ 18984 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4; 18985 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4; 18986 18987 /* 18988 * Fill in allocation length field 18989 */ 18990 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen); 18991 18992 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 18993 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 18994 18995 switch (status) { 18996 case 0: 18997 /* Return failure if we did not get valid capacity data. */ 18998 if (ucmd_buf.uscsi_resid > 20) { 18999 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19000 return (EIO); 19001 } 19002 19003 /* 19004 * Read capacity and block size from the READ CAPACITY 10 data. 19005 * This data may be adjusted later due to device specific 19006 * issues. 19007 * 19008 * According to the SCSI spec, the READ CAPACITY 10 19009 * command returns the following: 19010 * 19011 * bytes 0-7: Maximum logical block address available. 19012 * (MSB in byte:0 & LSB in byte:7) 19013 * 19014 * bytes 8-11: Block length in bytes 19015 * (MSB in byte:8 & LSB in byte:11) 19016 * 19017 */ 19018 capacity = BE_64(capacity16_buf[0]); 19019 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]); 19020 19021 /* 19022 * Done with capacity16_buf 19023 */ 19024 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19025 19026 /* 19027 * if the reported capacity is set to all 0xf's, then 19028 * this disk is too large. This could only happen with 19029 * a device that supports LBAs larger than 64 bits which 19030 * are not defined by any current T10 standards. 19031 */ 19032 if (capacity == 0xffffffffffffffff) { 19033 return (EIO); 19034 } 19035 break; /* Success! */ 19036 case EIO: 19037 switch (ucmd_buf.uscsi_status) { 19038 case STATUS_RESERVATION_CONFLICT: 19039 status = EACCES; 19040 break; 19041 case STATUS_CHECK: 19042 /* 19043 * Check condition; look for ASC/ASCQ of 0x04/0x01 19044 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY) 19045 */ 19046 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19047 (sense_buf.es_add_code == 0x04) && 19048 (sense_buf.es_qual_code == 0x01)) { 19049 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19050 return (EAGAIN); 19051 } 19052 break; 19053 default: 19054 break; 19055 } 19056 /* FALLTHRU */ 19057 default: 19058 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE); 19059 return (status); 19060 } 19061 19062 *capp = capacity; 19063 *lbap = lbasize; 19064 19065 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: " 19066 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize); 19067 19068 return (0); 19069 } 19070 19071 19072 /* 19073 * Function: sd_send_scsi_START_STOP_UNIT 19074 * 19075 * Description: Issue a scsi START STOP UNIT command to the target. 19076 * 19077 * Arguments: un - pointer to driver soft state (unit) structure for 19078 * this target. 19079 * flag - SD_TARGET_START 19080 * SD_TARGET_STOP 19081 * SD_TARGET_EJECT 19082 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19083 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19084 * to use the USCSI "direct" chain and bypass the normal 19085 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this 19086 * command is issued as part of an error recovery action. 19087 * 19088 * Return Code: 0 - Success 19089 * EIO - IO error 19090 * EACCES - Reservation conflict detected 19091 * ENXIO - Not Ready, medium not present 19092 * errno return code from sd_send_scsi_cmd() 19093 * 19094 * Context: Can sleep. 19095 */ 19096 19097 static int 19098 sd_send_scsi_START_STOP_UNIT(struct sd_lun *un, int flag, int path_flag) 19099 { 19100 struct scsi_extended_sense sense_buf; 19101 union scsi_cdb cdb; 19102 struct uscsi_cmd ucmd_buf; 19103 int status; 19104 19105 ASSERT(un != NULL); 19106 ASSERT(!mutex_owned(SD_MUTEX(un))); 19107 19108 SD_TRACE(SD_LOG_IO, un, 19109 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un); 19110 19111 if (ISREMOVABLE(un) && 19112 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) && 19113 (un->un_f_start_stop_supported != TRUE)) { 19114 return (0); 19115 } 19116 19117 bzero(&cdb, sizeof (cdb)); 19118 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19119 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19120 19121 cdb.scc_cmd = SCMD_START_STOP; 19122 cdb.cdb_opaque[4] = (uchar_t)flag; 19123 19124 ucmd_buf.uscsi_cdb = (char *)&cdb; 19125 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19126 ucmd_buf.uscsi_bufaddr = NULL; 19127 ucmd_buf.uscsi_buflen = 0; 19128 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19129 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19130 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19131 ucmd_buf.uscsi_timeout = 200; 19132 19133 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19134 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 19135 19136 switch (status) { 19137 case 0: 19138 break; /* Success! */ 19139 case EIO: 19140 switch (ucmd_buf.uscsi_status) { 19141 case STATUS_RESERVATION_CONFLICT: 19142 status = EACCES; 19143 break; 19144 case STATUS_CHECK: 19145 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) { 19146 switch (sense_buf.es_key) { 19147 case KEY_ILLEGAL_REQUEST: 19148 status = ENOTSUP; 19149 break; 19150 case KEY_NOT_READY: 19151 if (sense_buf.es_add_code == 0x3A) { 19152 status = ENXIO; 19153 } 19154 break; 19155 default: 19156 break; 19157 } 19158 } 19159 break; 19160 default: 19161 break; 19162 } 19163 break; 19164 default: 19165 break; 19166 } 19167 19168 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n"); 19169 19170 return (status); 19171 } 19172 19173 19174 /* 19175 * Function: sd_start_stop_unit_callback 19176 * 19177 * Description: timeout(9F) callback to begin recovery process for a 19178 * device that has spun down. 19179 * 19180 * Arguments: arg - pointer to associated softstate struct. 19181 * 19182 * Context: Executes in a timeout(9F) thread context 19183 */ 19184 19185 static void 19186 sd_start_stop_unit_callback(void *arg) 19187 { 19188 struct sd_lun *un = arg; 19189 ASSERT(un != NULL); 19190 ASSERT(!mutex_owned(SD_MUTEX(un))); 19191 19192 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n"); 19193 19194 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP); 19195 } 19196 19197 19198 /* 19199 * Function: sd_start_stop_unit_task 19200 * 19201 * Description: Recovery procedure when a drive is spun down. 19202 * 19203 * Arguments: arg - pointer to associated softstate struct. 19204 * 19205 * Context: Executes in a taskq() thread context 19206 */ 19207 19208 static void 19209 sd_start_stop_unit_task(void *arg) 19210 { 19211 struct sd_lun *un = arg; 19212 19213 ASSERT(un != NULL); 19214 ASSERT(!mutex_owned(SD_MUTEX(un))); 19215 19216 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n"); 19217 19218 /* 19219 * Some unformatted drives report not ready error, no need to 19220 * restart if format has been initiated. 19221 */ 19222 mutex_enter(SD_MUTEX(un)); 19223 if (un->un_f_format_in_progress == TRUE) { 19224 mutex_exit(SD_MUTEX(un)); 19225 return; 19226 } 19227 mutex_exit(SD_MUTEX(un)); 19228 19229 /* 19230 * When a START STOP command is issued from here, it is part of a 19231 * failure recovery operation and must be issued before any other 19232 * commands, including any pending retries. Thus it must be sent 19233 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up 19234 * succeeds or not, we will start I/O after the attempt. 19235 */ 19236 (void) sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 19237 SD_PATH_DIRECT_PRIORITY); 19238 19239 /* 19240 * The above call blocks until the START_STOP_UNIT command completes. 19241 * Now that it has completed, we must re-try the original IO that 19242 * received the NOT READY condition in the first place. There are 19243 * three possible conditions here: 19244 * 19245 * (1) The original IO is on un_retry_bp. 19246 * (2) The original IO is on the regular wait queue, and un_retry_bp 19247 * is NULL. 19248 * (3) The original IO is on the regular wait queue, and un_retry_bp 19249 * points to some other, unrelated bp. 19250 * 19251 * For each case, we must call sd_start_cmds() with un_retry_bp 19252 * as the argument. If un_retry_bp is NULL, this will initiate 19253 * processing of the regular wait queue. If un_retry_bp is not NULL, 19254 * then this will process the bp on un_retry_bp. That may or may not 19255 * be the original IO, but that does not matter: the important thing 19256 * is to keep the IO processing going at this point. 19257 * 19258 * Note: This is a very specific error recovery sequence associated 19259 * with a drive that is not spun up. We attempt a START_STOP_UNIT and 19260 * serialize the I/O with completion of the spin-up. 19261 */ 19262 mutex_enter(SD_MUTEX(un)); 19263 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, 19264 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n", 19265 un, un->un_retry_bp); 19266 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */ 19267 sd_start_cmds(un, un->un_retry_bp); 19268 mutex_exit(SD_MUTEX(un)); 19269 19270 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n"); 19271 } 19272 19273 19274 /* 19275 * Function: sd_send_scsi_INQUIRY 19276 * 19277 * Description: Issue the scsi INQUIRY command. 19278 * 19279 * Arguments: un 19280 * bufaddr 19281 * buflen 19282 * evpd 19283 * page_code 19284 * page_length 19285 * 19286 * Return Code: 0 - Success 19287 * errno return code from sd_send_scsi_cmd() 19288 * 19289 * Context: Can sleep. Does not return until command is completed. 19290 */ 19291 19292 static int 19293 sd_send_scsi_INQUIRY(struct sd_lun *un, uchar_t *bufaddr, size_t buflen, 19294 uchar_t evpd, uchar_t page_code, size_t *residp) 19295 { 19296 union scsi_cdb cdb; 19297 struct uscsi_cmd ucmd_buf; 19298 int status; 19299 19300 ASSERT(un != NULL); 19301 ASSERT(!mutex_owned(SD_MUTEX(un))); 19302 ASSERT(bufaddr != NULL); 19303 19304 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un); 19305 19306 bzero(&cdb, sizeof (cdb)); 19307 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19308 bzero(bufaddr, buflen); 19309 19310 cdb.scc_cmd = SCMD_INQUIRY; 19311 cdb.cdb_opaque[1] = evpd; 19312 cdb.cdb_opaque[2] = page_code; 19313 FORMG0COUNT(&cdb, buflen); 19314 19315 ucmd_buf.uscsi_cdb = (char *)&cdb; 19316 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19317 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 19318 ucmd_buf.uscsi_buflen = buflen; 19319 ucmd_buf.uscsi_rqbuf = NULL; 19320 ucmd_buf.uscsi_rqlen = 0; 19321 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT; 19322 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */ 19323 19324 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19325 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19326 19327 if ((status == 0) && (residp != NULL)) { 19328 *residp = ucmd_buf.uscsi_resid; 19329 } 19330 19331 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n"); 19332 19333 return (status); 19334 } 19335 19336 19337 /* 19338 * Function: sd_send_scsi_TEST_UNIT_READY 19339 * 19340 * Description: Issue the scsi TEST UNIT READY command. 19341 * This routine can be told to set the flag USCSI_DIAGNOSE to 19342 * prevent retrying failed commands. Use this when the intent 19343 * is either to check for device readiness, to clear a Unit 19344 * Attention, or to clear any outstanding sense data. 19345 * However under specific conditions the expected behavior 19346 * is for retries to bring a device ready, so use the flag 19347 * with caution. 19348 * 19349 * Arguments: un 19350 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present 19351 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE. 19352 * 0: dont check for media present, do retries on cmd. 19353 * 19354 * Return Code: 0 - Success 19355 * EIO - IO error 19356 * EACCES - Reservation conflict detected 19357 * ENXIO - Not Ready, medium not present 19358 * errno return code from sd_send_scsi_cmd() 19359 * 19360 * Context: Can sleep. Does not return until command is completed. 19361 */ 19362 19363 static int 19364 sd_send_scsi_TEST_UNIT_READY(struct sd_lun *un, int flag) 19365 { 19366 struct scsi_extended_sense sense_buf; 19367 union scsi_cdb cdb; 19368 struct uscsi_cmd ucmd_buf; 19369 int status; 19370 19371 ASSERT(un != NULL); 19372 ASSERT(!mutex_owned(SD_MUTEX(un))); 19373 19374 SD_TRACE(SD_LOG_IO, un, 19375 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un); 19376 19377 /* 19378 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect 19379 * timeouts when they receive a TUR and the queue is not empty. Check 19380 * the configuration flag set during attach (indicating the drive has 19381 * this firmware bug) and un_ncmds_in_transport before issuing the 19382 * TUR. If there are 19383 * pending commands return success, this is a bit arbitrary but is ok 19384 * for non-removables (i.e. the eliteI disks) and non-clustering 19385 * configurations. 19386 */ 19387 if (un->un_f_cfg_tur_check == TRUE) { 19388 mutex_enter(SD_MUTEX(un)); 19389 if (un->un_ncmds_in_transport != 0) { 19390 mutex_exit(SD_MUTEX(un)); 19391 return (0); 19392 } 19393 mutex_exit(SD_MUTEX(un)); 19394 } 19395 19396 bzero(&cdb, sizeof (cdb)); 19397 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19398 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19399 19400 cdb.scc_cmd = SCMD_TEST_UNIT_READY; 19401 19402 ucmd_buf.uscsi_cdb = (char *)&cdb; 19403 ucmd_buf.uscsi_cdblen = CDB_GROUP0; 19404 ucmd_buf.uscsi_bufaddr = NULL; 19405 ucmd_buf.uscsi_buflen = 0; 19406 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19407 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19408 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19409 19410 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */ 19411 if ((flag & SD_DONT_RETRY_TUR) != 0) { 19412 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE; 19413 } 19414 ucmd_buf.uscsi_timeout = 60; 19415 19416 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19417 UIO_SYSSPACE, UIO_SYSSPACE, 19418 ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT : SD_PATH_STANDARD)); 19419 19420 switch (status) { 19421 case 0: 19422 break; /* Success! */ 19423 case EIO: 19424 switch (ucmd_buf.uscsi_status) { 19425 case STATUS_RESERVATION_CONFLICT: 19426 status = EACCES; 19427 break; 19428 case STATUS_CHECK: 19429 if ((flag & SD_CHECK_FOR_MEDIA) == 0) { 19430 break; 19431 } 19432 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19433 (sense_buf.es_key == KEY_NOT_READY) && 19434 (sense_buf.es_add_code == 0x3A)) { 19435 status = ENXIO; 19436 } 19437 break; 19438 default: 19439 break; 19440 } 19441 break; 19442 default: 19443 break; 19444 } 19445 19446 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n"); 19447 19448 return (status); 19449 } 19450 19451 19452 /* 19453 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN 19454 * 19455 * Description: Issue the scsi PERSISTENT RESERVE IN command. 19456 * 19457 * Arguments: un 19458 * 19459 * Return Code: 0 - Success 19460 * EACCES 19461 * ENOTSUP 19462 * errno return code from sd_send_scsi_cmd() 19463 * 19464 * Context: Can sleep. Does not return until command is completed. 19465 */ 19466 19467 static int 19468 sd_send_scsi_PERSISTENT_RESERVE_IN(struct sd_lun *un, uchar_t usr_cmd, 19469 uint16_t data_len, uchar_t *data_bufp) 19470 { 19471 struct scsi_extended_sense sense_buf; 19472 union scsi_cdb cdb; 19473 struct uscsi_cmd ucmd_buf; 19474 int status; 19475 int no_caller_buf = FALSE; 19476 19477 ASSERT(un != NULL); 19478 ASSERT(!mutex_owned(SD_MUTEX(un))); 19479 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV)); 19480 19481 SD_TRACE(SD_LOG_IO, un, 19482 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un); 19483 19484 bzero(&cdb, sizeof (cdb)); 19485 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19486 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19487 if (data_bufp == NULL) { 19488 /* Allocate a default buf if the caller did not give one */ 19489 ASSERT(data_len == 0); 19490 data_len = MHIOC_RESV_KEY_SIZE; 19491 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP); 19492 no_caller_buf = TRUE; 19493 } 19494 19495 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN; 19496 cdb.cdb_opaque[1] = usr_cmd; 19497 FORMG1COUNT(&cdb, data_len); 19498 19499 ucmd_buf.uscsi_cdb = (char *)&cdb; 19500 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19501 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp; 19502 ucmd_buf.uscsi_buflen = data_len; 19503 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19504 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19505 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 19506 ucmd_buf.uscsi_timeout = 60; 19507 19508 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19509 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19510 19511 switch (status) { 19512 case 0: 19513 break; /* Success! */ 19514 case EIO: 19515 switch (ucmd_buf.uscsi_status) { 19516 case STATUS_RESERVATION_CONFLICT: 19517 status = EACCES; 19518 break; 19519 case STATUS_CHECK: 19520 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19521 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19522 status = ENOTSUP; 19523 } 19524 break; 19525 default: 19526 break; 19527 } 19528 break; 19529 default: 19530 break; 19531 } 19532 19533 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n"); 19534 19535 if (no_caller_buf == TRUE) { 19536 kmem_free(data_bufp, data_len); 19537 } 19538 19539 return (status); 19540 } 19541 19542 19543 /* 19544 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT 19545 * 19546 * Description: This routine is the driver entry point for handling CD-ROM 19547 * multi-host persistent reservation requests (MHIOCGRP_INKEYS, 19548 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the 19549 * device. 19550 * 19551 * Arguments: un - Pointer to soft state struct for the target. 19552 * usr_cmd SCSI-3 reservation facility command (one of 19553 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE, 19554 * SD_SCSI3_PREEMPTANDABORT) 19555 * usr_bufp - user provided pointer register, reserve descriptor or 19556 * preempt and abort structure (mhioc_register_t, 19557 * mhioc_resv_desc_t, mhioc_preemptandabort_t) 19558 * 19559 * Return Code: 0 - Success 19560 * EACCES 19561 * ENOTSUP 19562 * errno return code from sd_send_scsi_cmd() 19563 * 19564 * Context: Can sleep. Does not return until command is completed. 19565 */ 19566 19567 static int 19568 sd_send_scsi_PERSISTENT_RESERVE_OUT(struct sd_lun *un, uchar_t usr_cmd, 19569 uchar_t *usr_bufp) 19570 { 19571 struct scsi_extended_sense sense_buf; 19572 union scsi_cdb cdb; 19573 struct uscsi_cmd ucmd_buf; 19574 int status; 19575 uchar_t data_len = sizeof (sd_prout_t); 19576 sd_prout_t *prp; 19577 19578 ASSERT(un != NULL); 19579 ASSERT(!mutex_owned(SD_MUTEX(un))); 19580 ASSERT(data_len == 24); /* required by scsi spec */ 19581 19582 SD_TRACE(SD_LOG_IO, un, 19583 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un); 19584 19585 if (usr_bufp == NULL) { 19586 return (EINVAL); 19587 } 19588 19589 bzero(&cdb, sizeof (cdb)); 19590 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19591 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19592 prp = kmem_zalloc(data_len, KM_SLEEP); 19593 19594 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; 19595 cdb.cdb_opaque[1] = usr_cmd; 19596 FORMG1COUNT(&cdb, data_len); 19597 19598 ucmd_buf.uscsi_cdb = (char *)&cdb; 19599 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19600 ucmd_buf.uscsi_bufaddr = (caddr_t)prp; 19601 ucmd_buf.uscsi_buflen = data_len; 19602 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19603 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19604 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 19605 ucmd_buf.uscsi_timeout = 60; 19606 19607 switch (usr_cmd) { 19608 case SD_SCSI3_REGISTER: { 19609 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp; 19610 19611 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19612 bcopy(ptr->newkey.key, prp->service_key, 19613 MHIOC_RESV_KEY_SIZE); 19614 prp->aptpl = ptr->aptpl; 19615 break; 19616 } 19617 case SD_SCSI3_RESERVE: 19618 case SD_SCSI3_RELEASE: { 19619 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp; 19620 19621 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19622 prp->scope_address = BE_32(ptr->scope_specific_addr); 19623 cdb.cdb_opaque[2] = ptr->type; 19624 break; 19625 } 19626 case SD_SCSI3_PREEMPTANDABORT: { 19627 mhioc_preemptandabort_t *ptr = 19628 (mhioc_preemptandabort_t *)usr_bufp; 19629 19630 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE); 19631 bcopy(ptr->victim_key.key, prp->service_key, 19632 MHIOC_RESV_KEY_SIZE); 19633 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr); 19634 cdb.cdb_opaque[2] = ptr->resvdesc.type; 19635 ucmd_buf.uscsi_flags |= USCSI_HEAD; 19636 break; 19637 } 19638 case SD_SCSI3_REGISTERANDIGNOREKEY: 19639 { 19640 mhioc_registerandignorekey_t *ptr; 19641 ptr = (mhioc_registerandignorekey_t *)usr_bufp; 19642 bcopy(ptr->newkey.key, 19643 prp->service_key, MHIOC_RESV_KEY_SIZE); 19644 prp->aptpl = ptr->aptpl; 19645 break; 19646 } 19647 default: 19648 ASSERT(FALSE); 19649 break; 19650 } 19651 19652 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19653 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19654 19655 switch (status) { 19656 case 0: 19657 break; /* Success! */ 19658 case EIO: 19659 switch (ucmd_buf.uscsi_status) { 19660 case STATUS_RESERVATION_CONFLICT: 19661 status = EACCES; 19662 break; 19663 case STATUS_CHECK: 19664 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19665 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19666 status = ENOTSUP; 19667 } 19668 break; 19669 default: 19670 break; 19671 } 19672 break; 19673 default: 19674 break; 19675 } 19676 19677 kmem_free(prp, data_len); 19678 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n"); 19679 return (status); 19680 } 19681 19682 19683 /* 19684 * Function: sd_send_scsi_SYNCHRONIZE_CACHE 19685 * 19686 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target 19687 * 19688 * Arguments: un - pointer to the target's soft state struct 19689 * 19690 * Return Code: 0 - success 19691 * errno-type error code 19692 * 19693 * Context: kernel thread context only. 19694 */ 19695 19696 static int 19697 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un) 19698 { 19699 struct scsi_extended_sense sense_buf; 19700 union scsi_cdb cdb; 19701 struct uscsi_cmd ucmd_buf; 19702 int status; 19703 19704 ASSERT(un != NULL); 19705 ASSERT(!mutex_owned(SD_MUTEX(un))); 19706 19707 SD_TRACE(SD_LOG_IO, un, 19708 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un); 19709 19710 bzero(&cdb, sizeof (cdb)); 19711 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19712 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19713 19714 cdb.scc_cmd = SCMD_SYNCHRONIZE_CACHE; 19715 19716 ucmd_buf.uscsi_cdb = (char *)&cdb; 19717 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 19718 ucmd_buf.uscsi_bufaddr = NULL; 19719 ucmd_buf.uscsi_buflen = 0; 19720 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 19721 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 19722 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT; 19723 ucmd_buf.uscsi_timeout = 240; 19724 19725 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 19726 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_DIRECT); 19727 19728 switch (status) { 19729 case 0: 19730 break; /* Success! */ 19731 case EIO: 19732 switch (ucmd_buf.uscsi_status) { 19733 case STATUS_RESERVATION_CONFLICT: 19734 /* Ignore reservation conflict */ 19735 status = 0; 19736 goto done; 19737 19738 case STATUS_CHECK: 19739 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 19740 (sense_buf.es_key == KEY_ILLEGAL_REQUEST)) { 19741 /* Ignore Illegal Request error */ 19742 status = 0; 19743 goto done; 19744 } 19745 break; 19746 default: 19747 break; 19748 } 19749 /* FALLTHRU */ 19750 default: 19751 /* Ignore error if the media is not present. */ 19752 if (sd_send_scsi_TEST_UNIT_READY(un, 0) != 0) { 19753 status = 0; 19754 goto done; 19755 } 19756 /* If we reach this, we had an error */ 19757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 19758 "SYNCHRONIZE CACHE command failed (%d)\n", status); 19759 break; 19760 } 19761 19762 done: 19763 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: exit\n"); 19764 19765 return (status); 19766 } 19767 19768 19769 /* 19770 * Function: sd_send_scsi_GET_CONFIGURATION 19771 * 19772 * Description: Issues the get configuration command to the device. 19773 * Called from sd_check_for_writable_cd & sd_get_media_info 19774 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN 19775 * Arguments: un 19776 * ucmdbuf 19777 * rqbuf 19778 * rqbuflen 19779 * bufaddr 19780 * buflen 19781 * 19782 * Return Code: 0 - Success 19783 * errno return code from sd_send_scsi_cmd() 19784 * 19785 * Context: Can sleep. Does not return until command is completed. 19786 * 19787 */ 19788 19789 static int 19790 sd_send_scsi_GET_CONFIGURATION(struct sd_lun *un, struct uscsi_cmd *ucmdbuf, 19791 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen) 19792 { 19793 char cdb[CDB_GROUP1]; 19794 int status; 19795 19796 ASSERT(un != NULL); 19797 ASSERT(!mutex_owned(SD_MUTEX(un))); 19798 ASSERT(bufaddr != NULL); 19799 ASSERT(ucmdbuf != NULL); 19800 ASSERT(rqbuf != NULL); 19801 19802 SD_TRACE(SD_LOG_IO, un, 19803 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un); 19804 19805 bzero(cdb, sizeof (cdb)); 19806 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19807 bzero(rqbuf, rqbuflen); 19808 bzero(bufaddr, buflen); 19809 19810 /* 19811 * Set up cdb field for the get configuration command. 19812 */ 19813 cdb[0] = SCMD_GET_CONFIGURATION; 19814 cdb[1] = 0x02; /* Requested Type */ 19815 cdb[8] = SD_PROFILE_HEADER_LEN; 19816 ucmdbuf->uscsi_cdb = cdb; 19817 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19818 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19819 ucmdbuf->uscsi_buflen = buflen; 19820 ucmdbuf->uscsi_timeout = sd_io_time; 19821 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19822 ucmdbuf->uscsi_rqlen = rqbuflen; 19823 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19824 19825 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19826 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19827 19828 switch (status) { 19829 case 0: 19830 break; /* Success! */ 19831 case EIO: 19832 switch (ucmdbuf->uscsi_status) { 19833 case STATUS_RESERVATION_CONFLICT: 19834 status = EACCES; 19835 break; 19836 default: 19837 break; 19838 } 19839 break; 19840 default: 19841 break; 19842 } 19843 19844 if (status == 0) { 19845 SD_DUMP_MEMORY(un, SD_LOG_IO, 19846 "sd_send_scsi_GET_CONFIGURATION: data", 19847 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19848 } 19849 19850 SD_TRACE(SD_LOG_IO, un, 19851 "sd_send_scsi_GET_CONFIGURATION: exit\n"); 19852 19853 return (status); 19854 } 19855 19856 /* 19857 * Function: sd_send_scsi_feature_GET_CONFIGURATION 19858 * 19859 * Description: Issues the get configuration command to the device to 19860 * retrieve a specfic feature. Called from 19861 * sd_check_for_writable_cd & sd_set_mmc_caps. 19862 * Arguments: un 19863 * ucmdbuf 19864 * rqbuf 19865 * rqbuflen 19866 * bufaddr 19867 * buflen 19868 * feature 19869 * 19870 * Return Code: 0 - Success 19871 * errno return code from sd_send_scsi_cmd() 19872 * 19873 * Context: Can sleep. Does not return until command is completed. 19874 * 19875 */ 19876 static int 19877 sd_send_scsi_feature_GET_CONFIGURATION(struct sd_lun *un, 19878 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen, 19879 uchar_t *bufaddr, uint_t buflen, char feature) 19880 { 19881 char cdb[CDB_GROUP1]; 19882 int status; 19883 19884 ASSERT(un != NULL); 19885 ASSERT(!mutex_owned(SD_MUTEX(un))); 19886 ASSERT(bufaddr != NULL); 19887 ASSERT(ucmdbuf != NULL); 19888 ASSERT(rqbuf != NULL); 19889 19890 SD_TRACE(SD_LOG_IO, un, 19891 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un); 19892 19893 bzero(cdb, sizeof (cdb)); 19894 bzero(ucmdbuf, sizeof (struct uscsi_cmd)); 19895 bzero(rqbuf, rqbuflen); 19896 bzero(bufaddr, buflen); 19897 19898 /* 19899 * Set up cdb field for the get configuration command. 19900 */ 19901 cdb[0] = SCMD_GET_CONFIGURATION; 19902 cdb[1] = 0x02; /* Requested Type */ 19903 cdb[3] = feature; 19904 cdb[8] = buflen; 19905 ucmdbuf->uscsi_cdb = cdb; 19906 ucmdbuf->uscsi_cdblen = CDB_GROUP1; 19907 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr; 19908 ucmdbuf->uscsi_buflen = buflen; 19909 ucmdbuf->uscsi_timeout = sd_io_time; 19910 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf; 19911 ucmdbuf->uscsi_rqlen = rqbuflen; 19912 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ; 19913 19914 status = sd_send_scsi_cmd(SD_GET_DEV(un), ucmdbuf, UIO_SYSSPACE, 19915 UIO_SYSSPACE, UIO_SYSSPACE, SD_PATH_STANDARD); 19916 19917 switch (status) { 19918 case 0: 19919 break; /* Success! */ 19920 case EIO: 19921 switch (ucmdbuf->uscsi_status) { 19922 case STATUS_RESERVATION_CONFLICT: 19923 status = EACCES; 19924 break; 19925 default: 19926 break; 19927 } 19928 break; 19929 default: 19930 break; 19931 } 19932 19933 if (status == 0) { 19934 SD_DUMP_MEMORY(un, SD_LOG_IO, 19935 "sd_send_scsi_feature_GET_CONFIGURATION: data", 19936 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX); 19937 } 19938 19939 SD_TRACE(SD_LOG_IO, un, 19940 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n"); 19941 19942 return (status); 19943 } 19944 19945 19946 /* 19947 * Function: sd_send_scsi_MODE_SENSE 19948 * 19949 * Description: Utility function for issuing a scsi MODE SENSE command. 19950 * Note: This routine uses a consistent implementation for Group0, 19951 * Group1, and Group2 commands across all platforms. ATAPI devices 19952 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 19953 * 19954 * Arguments: un - pointer to the softstate struct for the target. 19955 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 19956 * CDB_GROUP[1|2] (10 byte). 19957 * bufaddr - buffer for page data retrieved from the target. 19958 * buflen - size of page to be retrieved. 19959 * page_code - page code of data to be retrieved from the target. 19960 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 19961 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 19962 * to use the USCSI "direct" chain and bypass the normal 19963 * command waitq. 19964 * 19965 * Return Code: 0 - Success 19966 * errno return code from sd_send_scsi_cmd() 19967 * 19968 * Context: Can sleep. Does not return until command is completed. 19969 */ 19970 19971 static int 19972 sd_send_scsi_MODE_SENSE(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 19973 size_t buflen, uchar_t page_code, int path_flag) 19974 { 19975 struct scsi_extended_sense sense_buf; 19976 union scsi_cdb cdb; 19977 struct uscsi_cmd ucmd_buf; 19978 int status; 19979 19980 ASSERT(un != NULL); 19981 ASSERT(!mutex_owned(SD_MUTEX(un))); 19982 ASSERT(bufaddr != NULL); 19983 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 19984 (cdbsize == CDB_GROUP2)); 19985 19986 SD_TRACE(SD_LOG_IO, un, 19987 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un); 19988 19989 bzero(&cdb, sizeof (cdb)); 19990 bzero(&ucmd_buf, sizeof (ucmd_buf)); 19991 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 19992 bzero(bufaddr, buflen); 19993 19994 if (cdbsize == CDB_GROUP0) { 19995 cdb.scc_cmd = SCMD_MODE_SENSE; 19996 cdb.cdb_opaque[2] = page_code; 19997 FORMG0COUNT(&cdb, buflen); 19998 } else { 19999 cdb.scc_cmd = SCMD_MODE_SENSE_G1; 20000 cdb.cdb_opaque[2] = page_code; 20001 FORMG1COUNT(&cdb, buflen); 20002 } 20003 20004 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20005 20006 ucmd_buf.uscsi_cdb = (char *)&cdb; 20007 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20008 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20009 ucmd_buf.uscsi_buflen = buflen; 20010 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20011 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20012 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20013 ucmd_buf.uscsi_timeout = 60; 20014 20015 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20016 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20017 20018 switch (status) { 20019 case 0: 20020 break; /* Success! */ 20021 case EIO: 20022 switch (ucmd_buf.uscsi_status) { 20023 case STATUS_RESERVATION_CONFLICT: 20024 status = EACCES; 20025 break; 20026 default: 20027 break; 20028 } 20029 break; 20030 default: 20031 break; 20032 } 20033 20034 if (status == 0) { 20035 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data", 20036 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20037 } 20038 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n"); 20039 20040 return (status); 20041 } 20042 20043 20044 /* 20045 * Function: sd_send_scsi_MODE_SELECT 20046 * 20047 * Description: Utility function for issuing a scsi MODE SELECT command. 20048 * Note: This routine uses a consistent implementation for Group0, 20049 * Group1, and Group2 commands across all platforms. ATAPI devices 20050 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select 20051 * 20052 * Arguments: un - pointer to the softstate struct for the target. 20053 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or 20054 * CDB_GROUP[1|2] (10 byte). 20055 * bufaddr - buffer for page data retrieved from the target. 20056 * buflen - size of page to be retrieved. 20057 * save_page - boolean to determin if SP bit should be set. 20058 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20059 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20060 * to use the USCSI "direct" chain and bypass the normal 20061 * command waitq. 20062 * 20063 * Return Code: 0 - Success 20064 * errno return code from sd_send_scsi_cmd() 20065 * 20066 * Context: Can sleep. Does not return until command is completed. 20067 */ 20068 20069 static int 20070 sd_send_scsi_MODE_SELECT(struct sd_lun *un, int cdbsize, uchar_t *bufaddr, 20071 size_t buflen, uchar_t save_page, int path_flag) 20072 { 20073 struct scsi_extended_sense sense_buf; 20074 union scsi_cdb cdb; 20075 struct uscsi_cmd ucmd_buf; 20076 int status; 20077 20078 ASSERT(un != NULL); 20079 ASSERT(!mutex_owned(SD_MUTEX(un))); 20080 ASSERT(bufaddr != NULL); 20081 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) || 20082 (cdbsize == CDB_GROUP2)); 20083 20084 SD_TRACE(SD_LOG_IO, un, 20085 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un); 20086 20087 bzero(&cdb, sizeof (cdb)); 20088 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20089 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20090 20091 /* Set the PF bit for many third party drives */ 20092 cdb.cdb_opaque[1] = 0x10; 20093 20094 /* Set the savepage(SP) bit if given */ 20095 if (save_page == SD_SAVE_PAGE) { 20096 cdb.cdb_opaque[1] |= 0x01; 20097 } 20098 20099 if (cdbsize == CDB_GROUP0) { 20100 cdb.scc_cmd = SCMD_MODE_SELECT; 20101 FORMG0COUNT(&cdb, buflen); 20102 } else { 20103 cdb.scc_cmd = SCMD_MODE_SELECT_G1; 20104 FORMG1COUNT(&cdb, buflen); 20105 } 20106 20107 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20108 20109 ucmd_buf.uscsi_cdb = (char *)&cdb; 20110 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20111 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20112 ucmd_buf.uscsi_buflen = buflen; 20113 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20114 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20115 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT; 20116 ucmd_buf.uscsi_timeout = 60; 20117 20118 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20119 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20120 20121 switch (status) { 20122 case 0: 20123 break; /* Success! */ 20124 case EIO: 20125 switch (ucmd_buf.uscsi_status) { 20126 case STATUS_RESERVATION_CONFLICT: 20127 status = EACCES; 20128 break; 20129 default: 20130 break; 20131 } 20132 break; 20133 default: 20134 break; 20135 } 20136 20137 if (status == 0) { 20138 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data", 20139 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20140 } 20141 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n"); 20142 20143 return (status); 20144 } 20145 20146 20147 /* 20148 * Function: sd_send_scsi_RDWR 20149 * 20150 * Description: Issue a scsi READ or WRITE command with the given parameters. 20151 * 20152 * Arguments: un: Pointer to the sd_lun struct for the target. 20153 * cmd: SCMD_READ or SCMD_WRITE 20154 * bufaddr: Address of caller's buffer to receive the RDWR data 20155 * buflen: Length of caller's buffer receive the RDWR data. 20156 * start_block: Block number for the start of the RDWR operation. 20157 * (Assumes target-native block size.) 20158 * residp: Pointer to variable to receive the redisual of the 20159 * RDWR operation (may be NULL of no residual requested). 20160 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and 20161 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY 20162 * to use the USCSI "direct" chain and bypass the normal 20163 * command waitq. 20164 * 20165 * Return Code: 0 - Success 20166 * errno return code from sd_send_scsi_cmd() 20167 * 20168 * Context: Can sleep. Does not return until command is completed. 20169 */ 20170 20171 static int 20172 sd_send_scsi_RDWR(struct sd_lun *un, uchar_t cmd, void *bufaddr, 20173 size_t buflen, daddr_t start_block, int path_flag) 20174 { 20175 struct scsi_extended_sense sense_buf; 20176 union scsi_cdb cdb; 20177 struct uscsi_cmd ucmd_buf; 20178 uint32_t block_count; 20179 int status; 20180 int cdbsize; 20181 uchar_t flag; 20182 20183 ASSERT(un != NULL); 20184 ASSERT(!mutex_owned(SD_MUTEX(un))); 20185 ASSERT(bufaddr != NULL); 20186 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE)); 20187 20188 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un); 20189 20190 if (un->un_f_tgt_blocksize_is_valid != TRUE) { 20191 return (EINVAL); 20192 } 20193 20194 mutex_enter(SD_MUTEX(un)); 20195 block_count = SD_BYTES2TGTBLOCKS(un, buflen); 20196 mutex_exit(SD_MUTEX(un)); 20197 20198 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE; 20199 20200 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: " 20201 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n", 20202 bufaddr, buflen, start_block, block_count); 20203 20204 bzero(&cdb, sizeof (cdb)); 20205 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20206 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20207 20208 /* Compute CDB size to use */ 20209 if (start_block > 0xffffffff) 20210 cdbsize = CDB_GROUP4; 20211 else if ((start_block & 0xFFE00000) || 20212 (un->un_f_cfg_is_atapi == TRUE)) 20213 cdbsize = CDB_GROUP1; 20214 else 20215 cdbsize = CDB_GROUP0; 20216 20217 switch (cdbsize) { 20218 case CDB_GROUP0: /* 6-byte CDBs */ 20219 cdb.scc_cmd = cmd; 20220 FORMG0ADDR(&cdb, start_block); 20221 FORMG0COUNT(&cdb, block_count); 20222 break; 20223 case CDB_GROUP1: /* 10-byte CDBs */ 20224 cdb.scc_cmd = cmd | SCMD_GROUP1; 20225 FORMG1ADDR(&cdb, start_block); 20226 FORMG1COUNT(&cdb, block_count); 20227 break; 20228 case CDB_GROUP4: /* 16-byte CDBs */ 20229 cdb.scc_cmd = cmd | SCMD_GROUP4; 20230 FORMG4LONGADDR(&cdb, (uint64_t)start_block); 20231 FORMG4COUNT(&cdb, block_count); 20232 break; 20233 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */ 20234 default: 20235 /* All others reserved */ 20236 return (EINVAL); 20237 } 20238 20239 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */ 20240 SD_FILL_SCSI1_LUN_CDB(un, &cdb); 20241 20242 ucmd_buf.uscsi_cdb = (char *)&cdb; 20243 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize; 20244 ucmd_buf.uscsi_bufaddr = bufaddr; 20245 ucmd_buf.uscsi_buflen = buflen; 20246 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20247 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20248 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT; 20249 ucmd_buf.uscsi_timeout = 60; 20250 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20251 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20252 switch (status) { 20253 case 0: 20254 break; /* Success! */ 20255 case EIO: 20256 switch (ucmd_buf.uscsi_status) { 20257 case STATUS_RESERVATION_CONFLICT: 20258 status = EACCES; 20259 break; 20260 default: 20261 break; 20262 } 20263 break; 20264 default: 20265 break; 20266 } 20267 20268 if (status == 0) { 20269 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data", 20270 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20271 } 20272 20273 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n"); 20274 20275 return (status); 20276 } 20277 20278 20279 /* 20280 * Function: sd_send_scsi_LOG_SENSE 20281 * 20282 * Description: Issue a scsi LOG_SENSE command with the given parameters. 20283 * 20284 * Arguments: un: Pointer to the sd_lun struct for the target. 20285 * 20286 * Return Code: 0 - Success 20287 * errno return code from sd_send_scsi_cmd() 20288 * 20289 * Context: Can sleep. Does not return until command is completed. 20290 */ 20291 20292 static int 20293 sd_send_scsi_LOG_SENSE(struct sd_lun *un, uchar_t *bufaddr, uint16_t buflen, 20294 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, 20295 int path_flag) 20296 20297 { 20298 struct scsi_extended_sense sense_buf; 20299 union scsi_cdb cdb; 20300 struct uscsi_cmd ucmd_buf; 20301 int status; 20302 20303 ASSERT(un != NULL); 20304 ASSERT(!mutex_owned(SD_MUTEX(un))); 20305 20306 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un); 20307 20308 bzero(&cdb, sizeof (cdb)); 20309 bzero(&ucmd_buf, sizeof (ucmd_buf)); 20310 bzero(&sense_buf, sizeof (struct scsi_extended_sense)); 20311 20312 cdb.scc_cmd = SCMD_LOG_SENSE_G1; 20313 cdb.cdb_opaque[2] = (page_control << 6) | page_code; 20314 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8); 20315 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF); 20316 FORMG1COUNT(&cdb, buflen); 20317 20318 ucmd_buf.uscsi_cdb = (char *)&cdb; 20319 ucmd_buf.uscsi_cdblen = CDB_GROUP1; 20320 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr; 20321 ucmd_buf.uscsi_buflen = buflen; 20322 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf; 20323 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense); 20324 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT; 20325 ucmd_buf.uscsi_timeout = 60; 20326 20327 status = sd_send_scsi_cmd(SD_GET_DEV(un), &ucmd_buf, UIO_SYSSPACE, 20328 UIO_SYSSPACE, UIO_SYSSPACE, path_flag); 20329 20330 switch (status) { 20331 case 0: 20332 break; 20333 case EIO: 20334 switch (ucmd_buf.uscsi_status) { 20335 case STATUS_RESERVATION_CONFLICT: 20336 status = EACCES; 20337 break; 20338 case STATUS_CHECK: 20339 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) && 20340 (sense_buf.es_key == KEY_ILLEGAL_REQUEST) && 20341 (sense_buf.es_add_code == 0x24)) { 20342 /* 20343 * ASC 0x24: INVALID FIELD IN CDB 20344 */ 20345 switch (page_code) { 20346 case START_STOP_CYCLE_PAGE: 20347 /* 20348 * The start stop cycle counter is 20349 * implemented as page 0x31 in earlier 20350 * generation disks. In new generation 20351 * disks the start stop cycle counter is 20352 * implemented as page 0xE. To properly 20353 * handle this case if an attempt for 20354 * log page 0xE is made and fails we 20355 * will try again using page 0x31. 20356 * 20357 * Network storage BU committed to 20358 * maintain the page 0x31 for this 20359 * purpose and will not have any other 20360 * page implemented with page code 0x31 20361 * until all disks transition to the 20362 * standard page. 20363 */ 20364 mutex_enter(SD_MUTEX(un)); 20365 un->un_start_stop_cycle_page = 20366 START_STOP_CYCLE_VU_PAGE; 20367 cdb.cdb_opaque[2] = 20368 (char)(page_control << 6) | 20369 un->un_start_stop_cycle_page; 20370 mutex_exit(SD_MUTEX(un)); 20371 status = sd_send_scsi_cmd( 20372 SD_GET_DEV(un), &ucmd_buf, 20373 UIO_SYSSPACE, UIO_SYSSPACE, 20374 UIO_SYSSPACE, path_flag); 20375 20376 break; 20377 case TEMPERATURE_PAGE: 20378 status = ENOTTY; 20379 break; 20380 default: 20381 break; 20382 } 20383 } 20384 break; 20385 default: 20386 break; 20387 } 20388 break; 20389 default: 20390 break; 20391 } 20392 20393 if (status == 0) { 20394 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data", 20395 (uchar_t *)bufaddr, buflen, SD_LOG_HEX); 20396 } 20397 20398 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n"); 20399 20400 return (status); 20401 } 20402 20403 20404 /* 20405 * Function: sdioctl 20406 * 20407 * Description: Driver's ioctl(9e) entry point function. 20408 * 20409 * Arguments: dev - device number 20410 * cmd - ioctl operation to be performed 20411 * arg - user argument, contains data to be set or reference 20412 * parameter for get 20413 * flag - bit flag, indicating open settings, 32/64 bit type 20414 * cred_p - user credential pointer 20415 * rval_p - calling process return value (OPT) 20416 * 20417 * Return Code: EINVAL 20418 * ENOTTY 20419 * ENXIO 20420 * EIO 20421 * EFAULT 20422 * ENOTSUP 20423 * EPERM 20424 * 20425 * Context: Called from the device switch at normal priority. 20426 */ 20427 20428 static int 20429 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) 20430 { 20431 struct sd_lun *un = NULL; 20432 int geom_validated = FALSE; 20433 int err = 0; 20434 int i = 0; 20435 cred_t *cr; 20436 20437 /* 20438 * All device accesses go thru sdstrategy where we check on suspend 20439 * status 20440 */ 20441 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 20442 return (ENXIO); 20443 } 20444 20445 ASSERT(!mutex_owned(SD_MUTEX(un))); 20446 20447 /* 20448 * Moved this wait from sd_uscsi_strategy to here for 20449 * reasons of deadlock prevention. Internal driver commands, 20450 * specifically those to change a devices power level, result 20451 * in a call to sd_uscsi_strategy. 20452 */ 20453 mutex_enter(SD_MUTEX(un)); 20454 while ((un->un_state == SD_STATE_SUSPENDED) || 20455 (un->un_state == SD_STATE_PM_CHANGING)) { 20456 cv_wait(&un->un_suspend_cv, SD_MUTEX(un)); 20457 } 20458 /* 20459 * Twiddling the counter here protects commands from now 20460 * through to the top of sd_uscsi_strategy. Without the 20461 * counter inc. a power down, for example, could get in 20462 * after the above check for state is made and before 20463 * execution gets to the top of sd_uscsi_strategy. 20464 * That would cause problems. 20465 */ 20466 un->un_ncmds_in_driver++; 20467 20468 if ((un->un_f_geometry_is_valid == FALSE) && 20469 (flag & (FNDELAY | FNONBLOCK))) { 20470 switch (cmd) { 20471 case CDROMPAUSE: 20472 case CDROMRESUME: 20473 case CDROMPLAYMSF: 20474 case CDROMPLAYTRKIND: 20475 case CDROMREADTOCHDR: 20476 case CDROMREADTOCENTRY: 20477 case CDROMSTOP: 20478 case CDROMSTART: 20479 case CDROMVOLCTRL: 20480 case CDROMSUBCHNL: 20481 case CDROMREADMODE2: 20482 case CDROMREADMODE1: 20483 case CDROMREADOFFSET: 20484 case CDROMSBLKMODE: 20485 case CDROMGBLKMODE: 20486 case CDROMGDRVSPEED: 20487 case CDROMSDRVSPEED: 20488 case CDROMCDDA: 20489 case CDROMCDXA: 20490 case CDROMSUBCODE: 20491 if (!ISCD(un)) { 20492 un->un_ncmds_in_driver--; 20493 ASSERT(un->un_ncmds_in_driver >= 0); 20494 mutex_exit(SD_MUTEX(un)); 20495 return (ENOTTY); 20496 } 20497 break; 20498 case FDEJECT: 20499 case DKIOCEJECT: 20500 case CDROMEJECT: 20501 if (!ISREMOVABLE(un)) { 20502 un->un_ncmds_in_driver--; 20503 ASSERT(un->un_ncmds_in_driver >= 0); 20504 mutex_exit(SD_MUTEX(un)); 20505 return (ENOTTY); 20506 } 20507 break; 20508 case DKIOCSVTOC: 20509 case DKIOCSETEFI: 20510 case DKIOCSMBOOT: 20511 mutex_exit(SD_MUTEX(un)); 20512 err = sd_send_scsi_TEST_UNIT_READY(un, 0); 20513 if (err != 0) { 20514 mutex_enter(SD_MUTEX(un)); 20515 un->un_ncmds_in_driver--; 20516 ASSERT(un->un_ncmds_in_driver >= 0); 20517 mutex_exit(SD_MUTEX(un)); 20518 return (EIO); 20519 } 20520 mutex_enter(SD_MUTEX(un)); 20521 /* FALLTHROUGH */ 20522 case DKIOCREMOVABLE: 20523 case DKIOCINFO: 20524 case DKIOCGMEDIAINFO: 20525 case MHIOCENFAILFAST: 20526 case MHIOCSTATUS: 20527 case MHIOCTKOWN: 20528 case MHIOCRELEASE: 20529 case MHIOCGRP_INKEYS: 20530 case MHIOCGRP_INRESV: 20531 case MHIOCGRP_REGISTER: 20532 case MHIOCGRP_RESERVE: 20533 case MHIOCGRP_PREEMPTANDABORT: 20534 case MHIOCGRP_REGISTERANDIGNOREKEY: 20535 case CDROMCLOSETRAY: 20536 case USCSICMD: 20537 goto skip_ready_valid; 20538 default: 20539 break; 20540 } 20541 20542 mutex_exit(SD_MUTEX(un)); 20543 err = sd_ready_and_valid(un); 20544 mutex_enter(SD_MUTEX(un)); 20545 if (err == SD_READY_NOT_VALID) { 20546 switch (cmd) { 20547 case DKIOCGAPART: 20548 case DKIOCGGEOM: 20549 case DKIOCSGEOM: 20550 case DKIOCGVTOC: 20551 case DKIOCSVTOC: 20552 case DKIOCSAPART: 20553 case DKIOCG_PHYGEOM: 20554 case DKIOCG_VIRTGEOM: 20555 err = ENOTSUP; 20556 un->un_ncmds_in_driver--; 20557 ASSERT(un->un_ncmds_in_driver >= 0); 20558 mutex_exit(SD_MUTEX(un)); 20559 return (err); 20560 } 20561 } 20562 if (err != SD_READY_VALID) { 20563 switch (cmd) { 20564 case DKIOCSTATE: 20565 case CDROMGDRVSPEED: 20566 case CDROMSDRVSPEED: 20567 case FDEJECT: /* for eject command */ 20568 case DKIOCEJECT: 20569 case CDROMEJECT: 20570 case DKIOCGETEFI: 20571 case DKIOCSGEOM: 20572 case DKIOCREMOVABLE: 20573 case DKIOCSAPART: 20574 case DKIOCSETEFI: 20575 break; 20576 default: 20577 if (ISREMOVABLE(un)) { 20578 err = ENXIO; 20579 } else { 20580 /* Do not map EACCES to EIO */ 20581 if (err != EACCES) 20582 err = EIO; 20583 } 20584 un->un_ncmds_in_driver--; 20585 ASSERT(un->un_ncmds_in_driver >= 0); 20586 mutex_exit(SD_MUTEX(un)); 20587 return (err); 20588 } 20589 } 20590 geom_validated = TRUE; 20591 } 20592 if ((un->un_f_geometry_is_valid == TRUE) && 20593 (un->un_solaris_size > 0)) { 20594 /* 20595 * the "geometry_is_valid" flag could be true if we 20596 * have an fdisk table but no Solaris partition 20597 */ 20598 if (un->un_vtoc.v_sanity != VTOC_SANE) { 20599 /* it is EFI, so return ENOTSUP for these */ 20600 switch (cmd) { 20601 case DKIOCGAPART: 20602 case DKIOCGGEOM: 20603 case DKIOCGVTOC: 20604 case DKIOCSVTOC: 20605 case DKIOCSAPART: 20606 err = ENOTSUP; 20607 un->un_ncmds_in_driver--; 20608 ASSERT(un->un_ncmds_in_driver >= 0); 20609 mutex_exit(SD_MUTEX(un)); 20610 return (err); 20611 } 20612 } 20613 } 20614 20615 skip_ready_valid: 20616 mutex_exit(SD_MUTEX(un)); 20617 20618 switch (cmd) { 20619 case DKIOCINFO: 20620 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n"); 20621 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag); 20622 break; 20623 20624 case DKIOCGMEDIAINFO: 20625 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n"); 20626 err = sd_get_media_info(dev, (caddr_t)arg, flag); 20627 break; 20628 20629 case DKIOCGGEOM: 20630 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGGEOM\n"); 20631 err = sd_dkio_get_geometry(dev, (caddr_t)arg, flag, 20632 geom_validated); 20633 break; 20634 20635 case DKIOCSGEOM: 20636 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSGEOM\n"); 20637 err = sd_dkio_set_geometry(dev, (caddr_t)arg, flag); 20638 break; 20639 20640 case DKIOCGAPART: 20641 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGAPART\n"); 20642 err = sd_dkio_get_partition(dev, (caddr_t)arg, flag, 20643 geom_validated); 20644 break; 20645 20646 case DKIOCSAPART: 20647 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSAPART\n"); 20648 err = sd_dkio_set_partition(dev, (caddr_t)arg, flag); 20649 break; 20650 20651 case DKIOCGVTOC: 20652 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGVTOC\n"); 20653 err = sd_dkio_get_vtoc(dev, (caddr_t)arg, flag, 20654 geom_validated); 20655 break; 20656 20657 case DKIOCGETEFI: 20658 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGETEFI\n"); 20659 err = sd_dkio_get_efi(dev, (caddr_t)arg, flag); 20660 break; 20661 20662 case DKIOCPARTITION: 20663 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTITION\n"); 20664 err = sd_dkio_partition(dev, (caddr_t)arg, flag); 20665 break; 20666 20667 case DKIOCSVTOC: 20668 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSVTOC\n"); 20669 err = sd_dkio_set_vtoc(dev, (caddr_t)arg, flag); 20670 break; 20671 20672 case DKIOCSETEFI: 20673 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSETEFI\n"); 20674 err = sd_dkio_set_efi(dev, (caddr_t)arg, flag); 20675 break; 20676 20677 case DKIOCGMBOOT: 20678 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMBOOT\n"); 20679 err = sd_dkio_get_mboot(dev, (caddr_t)arg, flag); 20680 break; 20681 20682 case DKIOCSMBOOT: 20683 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSMBOOT\n"); 20684 err = sd_dkio_set_mboot(dev, (caddr_t)arg, flag); 20685 break; 20686 20687 case DKIOCLOCK: 20688 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n"); 20689 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 20690 SD_PATH_STANDARD); 20691 break; 20692 20693 case DKIOCUNLOCK: 20694 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n"); 20695 err = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 20696 SD_PATH_STANDARD); 20697 break; 20698 20699 case DKIOCSTATE: { 20700 enum dkio_state state; 20701 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n"); 20702 20703 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) { 20704 err = EFAULT; 20705 } else { 20706 err = sd_check_media(dev, state); 20707 if (err == 0) { 20708 if (ddi_copyout(&un->un_mediastate, (void *)arg, 20709 sizeof (int), flag) != 0) 20710 err = EFAULT; 20711 } 20712 } 20713 break; 20714 } 20715 20716 case DKIOCREMOVABLE: 20717 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n"); 20718 if (ISREMOVABLE(un)) { 20719 i = 1; 20720 } else { 20721 i = 0; 20722 } 20723 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) { 20724 err = EFAULT; 20725 } else { 20726 err = 0; 20727 } 20728 break; 20729 20730 case DKIOCGTEMPERATURE: 20731 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n"); 20732 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag); 20733 break; 20734 20735 case MHIOCENFAILFAST: 20736 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n"); 20737 if ((err = drv_priv(cred_p)) == 0) { 20738 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag); 20739 } 20740 break; 20741 20742 case MHIOCTKOWN: 20743 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n"); 20744 if ((err = drv_priv(cred_p)) == 0) { 20745 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag); 20746 } 20747 break; 20748 20749 case MHIOCRELEASE: 20750 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n"); 20751 if ((err = drv_priv(cred_p)) == 0) { 20752 err = sd_mhdioc_release(dev); 20753 } 20754 break; 20755 20756 case MHIOCSTATUS: 20757 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n"); 20758 if ((err = drv_priv(cred_p)) == 0) { 20759 switch (sd_send_scsi_TEST_UNIT_READY(un, 0)) { 20760 case 0: 20761 err = 0; 20762 break; 20763 case EACCES: 20764 *rval_p = 1; 20765 err = 0; 20766 break; 20767 default: 20768 err = EIO; 20769 break; 20770 } 20771 } 20772 break; 20773 20774 case MHIOCQRESERVE: 20775 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n"); 20776 if ((err = drv_priv(cred_p)) == 0) { 20777 err = sd_reserve_release(dev, SD_RESERVE); 20778 } 20779 break; 20780 20781 case MHIOCREREGISTERDEVID: 20782 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n"); 20783 if (drv_priv(cred_p) == EPERM) { 20784 err = EPERM; 20785 } else if (ISREMOVABLE(un) || ISCD(un)) { 20786 err = ENOTTY; 20787 } else { 20788 err = sd_mhdioc_register_devid(dev); 20789 } 20790 break; 20791 20792 case MHIOCGRP_INKEYS: 20793 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n"); 20794 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20795 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20796 err = ENOTSUP; 20797 } else { 20798 err = sd_mhdioc_inkeys(dev, (caddr_t)arg, 20799 flag); 20800 } 20801 } 20802 break; 20803 20804 case MHIOCGRP_INRESV: 20805 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n"); 20806 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) { 20807 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20808 err = ENOTSUP; 20809 } else { 20810 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag); 20811 } 20812 } 20813 break; 20814 20815 case MHIOCGRP_REGISTER: 20816 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n"); 20817 if ((err = drv_priv(cred_p)) != EPERM) { 20818 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20819 err = ENOTSUP; 20820 } else if (arg != NULL) { 20821 mhioc_register_t reg; 20822 if (ddi_copyin((void *)arg, ®, 20823 sizeof (mhioc_register_t), flag) != 0) { 20824 err = EFAULT; 20825 } else { 20826 err = 20827 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20828 un, SD_SCSI3_REGISTER, 20829 (uchar_t *)®); 20830 } 20831 } 20832 } 20833 break; 20834 20835 case MHIOCGRP_RESERVE: 20836 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n"); 20837 if ((err = drv_priv(cred_p)) != EPERM) { 20838 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20839 err = ENOTSUP; 20840 } else if (arg != NULL) { 20841 mhioc_resv_desc_t resv_desc; 20842 if (ddi_copyin((void *)arg, &resv_desc, 20843 sizeof (mhioc_resv_desc_t), flag) != 0) { 20844 err = EFAULT; 20845 } else { 20846 err = 20847 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20848 un, SD_SCSI3_RESERVE, 20849 (uchar_t *)&resv_desc); 20850 } 20851 } 20852 } 20853 break; 20854 20855 case MHIOCGRP_PREEMPTANDABORT: 20856 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20857 if ((err = drv_priv(cred_p)) != EPERM) { 20858 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20859 err = ENOTSUP; 20860 } else if (arg != NULL) { 20861 mhioc_preemptandabort_t preempt_abort; 20862 if (ddi_copyin((void *)arg, &preempt_abort, 20863 sizeof (mhioc_preemptandabort_t), 20864 flag) != 0) { 20865 err = EFAULT; 20866 } else { 20867 err = 20868 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20869 un, SD_SCSI3_PREEMPTANDABORT, 20870 (uchar_t *)&preempt_abort); 20871 } 20872 } 20873 } 20874 break; 20875 20876 case MHIOCGRP_REGISTERANDIGNOREKEY: 20877 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n"); 20878 if ((err = drv_priv(cred_p)) != EPERM) { 20879 if (un->un_reservation_type == SD_SCSI2_RESERVATION) { 20880 err = ENOTSUP; 20881 } else if (arg != NULL) { 20882 mhioc_registerandignorekey_t r_and_i; 20883 if (ddi_copyin((void *)arg, (void *)&r_and_i, 20884 sizeof (mhioc_registerandignorekey_t), 20885 flag) != 0) { 20886 err = EFAULT; 20887 } else { 20888 err = 20889 sd_send_scsi_PERSISTENT_RESERVE_OUT( 20890 un, SD_SCSI3_REGISTERANDIGNOREKEY, 20891 (uchar_t *)&r_and_i); 20892 } 20893 } 20894 } 20895 break; 20896 20897 case USCSICMD: 20898 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n"); 20899 cr = ddi_get_cred(); 20900 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) { 20901 err = EPERM; 20902 } else { 20903 err = sd_uscsi_ioctl(dev, (caddr_t)arg, flag); 20904 } 20905 break; 20906 20907 case CDROMPAUSE: 20908 case CDROMRESUME: 20909 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n"); 20910 if (!ISCD(un)) { 20911 err = ENOTTY; 20912 } else { 20913 err = sr_pause_resume(dev, cmd); 20914 } 20915 break; 20916 20917 case CDROMPLAYMSF: 20918 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n"); 20919 if (!ISCD(un)) { 20920 err = ENOTTY; 20921 } else { 20922 err = sr_play_msf(dev, (caddr_t)arg, flag); 20923 } 20924 break; 20925 20926 case CDROMPLAYTRKIND: 20927 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n"); 20928 #if defined(__i386) || defined(__amd64) 20929 /* 20930 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead 20931 */ 20932 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 20933 #else 20934 if (!ISCD(un)) { 20935 #endif 20936 err = ENOTTY; 20937 } else { 20938 err = sr_play_trkind(dev, (caddr_t)arg, flag); 20939 } 20940 break; 20941 20942 case CDROMREADTOCHDR: 20943 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n"); 20944 if (!ISCD(un)) { 20945 err = ENOTTY; 20946 } else { 20947 err = sr_read_tochdr(dev, (caddr_t)arg, flag); 20948 } 20949 break; 20950 20951 case CDROMREADTOCENTRY: 20952 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n"); 20953 if (!ISCD(un)) { 20954 err = ENOTTY; 20955 } else { 20956 err = sr_read_tocentry(dev, (caddr_t)arg, flag); 20957 } 20958 break; 20959 20960 case CDROMSTOP: 20961 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n"); 20962 if (!ISCD(un)) { 20963 err = ENOTTY; 20964 } else { 20965 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_STOP, 20966 SD_PATH_STANDARD); 20967 } 20968 break; 20969 20970 case CDROMSTART: 20971 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n"); 20972 if (!ISCD(un)) { 20973 err = ENOTTY; 20974 } else { 20975 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_START, 20976 SD_PATH_STANDARD); 20977 } 20978 break; 20979 20980 case CDROMCLOSETRAY: 20981 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n"); 20982 if (!ISCD(un)) { 20983 err = ENOTTY; 20984 } else { 20985 err = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_CLOSE, 20986 SD_PATH_STANDARD); 20987 } 20988 break; 20989 20990 case FDEJECT: /* for eject command */ 20991 case DKIOCEJECT: 20992 case CDROMEJECT: 20993 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n"); 20994 if (!ISREMOVABLE(un)) { 20995 err = ENOTTY; 20996 } else { 20997 err = sr_eject(dev); 20998 } 20999 break; 21000 21001 case CDROMVOLCTRL: 21002 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n"); 21003 if (!ISCD(un)) { 21004 err = ENOTTY; 21005 } else { 21006 err = sr_volume_ctrl(dev, (caddr_t)arg, flag); 21007 } 21008 break; 21009 21010 case CDROMSUBCHNL: 21011 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n"); 21012 if (!ISCD(un)) { 21013 err = ENOTTY; 21014 } else { 21015 err = sr_read_subchannel(dev, (caddr_t)arg, flag); 21016 } 21017 break; 21018 21019 case CDROMREADMODE2: 21020 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n"); 21021 if (!ISCD(un)) { 21022 err = ENOTTY; 21023 } else if (un->un_f_cfg_is_atapi == TRUE) { 21024 /* 21025 * If the drive supports READ CD, use that instead of 21026 * switching the LBA size via a MODE SELECT 21027 * Block Descriptor 21028 */ 21029 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag); 21030 } else { 21031 err = sr_read_mode2(dev, (caddr_t)arg, flag); 21032 } 21033 break; 21034 21035 case CDROMREADMODE1: 21036 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n"); 21037 if (!ISCD(un)) { 21038 err = ENOTTY; 21039 } else { 21040 err = sr_read_mode1(dev, (caddr_t)arg, flag); 21041 } 21042 break; 21043 21044 case CDROMREADOFFSET: 21045 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n"); 21046 if (!ISCD(un)) { 21047 err = ENOTTY; 21048 } else { 21049 err = sr_read_sony_session_offset(dev, (caddr_t)arg, 21050 flag); 21051 } 21052 break; 21053 21054 case CDROMSBLKMODE: 21055 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n"); 21056 /* 21057 * There is no means of changing block size in case of atapi 21058 * drives, thus return ENOTTY if drive type is atapi 21059 */ 21060 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) { 21061 err = ENOTTY; 21062 } else if (un->un_f_mmc_cap == TRUE) { 21063 21064 /* 21065 * MMC Devices do not support changing the 21066 * logical block size 21067 * 21068 * Note: EINVAL is being returned instead of ENOTTY to 21069 * maintain consistancy with the original mmc 21070 * driver update. 21071 */ 21072 err = EINVAL; 21073 } else { 21074 mutex_enter(SD_MUTEX(un)); 21075 if ((!(un->un_exclopen & (1<<SDPART(dev)))) || 21076 (un->un_ncmds_in_transport > 0)) { 21077 mutex_exit(SD_MUTEX(un)); 21078 err = EINVAL; 21079 } else { 21080 mutex_exit(SD_MUTEX(un)); 21081 err = sr_change_blkmode(dev, cmd, arg, flag); 21082 } 21083 } 21084 break; 21085 21086 case CDROMGBLKMODE: 21087 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n"); 21088 if (!ISCD(un)) { 21089 err = ENOTTY; 21090 } else if ((un->un_f_cfg_is_atapi != FALSE) && 21091 (un->un_f_blockcount_is_valid != FALSE)) { 21092 /* 21093 * Drive is an ATAPI drive so return target block 21094 * size for ATAPI drives since we cannot change the 21095 * blocksize on ATAPI drives. Used primarily to detect 21096 * if an ATAPI cdrom is present. 21097 */ 21098 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg, 21099 sizeof (int), flag) != 0) { 21100 err = EFAULT; 21101 } else { 21102 err = 0; 21103 } 21104 21105 } else { 21106 /* 21107 * Drive supports changing block sizes via a Mode 21108 * Select. 21109 */ 21110 err = sr_change_blkmode(dev, cmd, arg, flag); 21111 } 21112 break; 21113 21114 case CDROMGDRVSPEED: 21115 case CDROMSDRVSPEED: 21116 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n"); 21117 if (!ISCD(un)) { 21118 err = ENOTTY; 21119 } else if (un->un_f_mmc_cap == TRUE) { 21120 /* 21121 * Note: In the future the driver implementation 21122 * for getting and 21123 * setting cd speed should entail: 21124 * 1) If non-mmc try the Toshiba mode page 21125 * (sr_change_speed) 21126 * 2) If mmc but no support for Real Time Streaming try 21127 * the SET CD SPEED (0xBB) command 21128 * (sr_atapi_change_speed) 21129 * 3) If mmc and support for Real Time Streaming 21130 * try the GET PERFORMANCE and SET STREAMING 21131 * commands (not yet implemented, 4380808) 21132 */ 21133 /* 21134 * As per recent MMC spec, CD-ROM speed is variable 21135 * and changes with LBA. Since there is no such 21136 * things as drive speed now, fail this ioctl. 21137 * 21138 * Note: EINVAL is returned for consistancy of original 21139 * implementation which included support for getting 21140 * the drive speed of mmc devices but not setting 21141 * the drive speed. Thus EINVAL would be returned 21142 * if a set request was made for an mmc device. 21143 * We no longer support get or set speed for 21144 * mmc but need to remain consistant with regard 21145 * to the error code returned. 21146 */ 21147 err = EINVAL; 21148 } else if (un->un_f_cfg_is_atapi == TRUE) { 21149 err = sr_atapi_change_speed(dev, cmd, arg, flag); 21150 } else { 21151 err = sr_change_speed(dev, cmd, arg, flag); 21152 } 21153 break; 21154 21155 case CDROMCDDA: 21156 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n"); 21157 if (!ISCD(un)) { 21158 err = ENOTTY; 21159 } else { 21160 err = sr_read_cdda(dev, (void *)arg, flag); 21161 } 21162 break; 21163 21164 case CDROMCDXA: 21165 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n"); 21166 if (!ISCD(un)) { 21167 err = ENOTTY; 21168 } else { 21169 err = sr_read_cdxa(dev, (caddr_t)arg, flag); 21170 } 21171 break; 21172 21173 case CDROMSUBCODE: 21174 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n"); 21175 if (!ISCD(un)) { 21176 err = ENOTTY; 21177 } else { 21178 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag); 21179 } 21180 break; 21181 21182 case DKIOCPARTINFO: { 21183 /* 21184 * Return parameters describing the selected disk slice. 21185 * Note: this ioctl is for the intel platform only 21186 */ 21187 #if defined(__i386) || defined(__amd64) 21188 int part; 21189 21190 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21191 part = SDPART(dev); 21192 21193 /* don't check un_solaris_size for pN */ 21194 if (part < P0_RAW_DISK && un->un_solaris_size == 0) { 21195 err = EIO; 21196 } else { 21197 struct part_info p; 21198 21199 p.p_start = (daddr_t)un->un_offset[part]; 21200 p.p_length = (int)un->un_map[part].dkl_nblk; 21201 #ifdef _MULTI_DATAMODEL 21202 switch (ddi_model_convert_from(flag & FMODELS)) { 21203 case DDI_MODEL_ILP32: 21204 { 21205 struct part_info32 p32; 21206 21207 p32.p_start = (daddr32_t)p.p_start; 21208 p32.p_length = p.p_length; 21209 if (ddi_copyout(&p32, (void *)arg, 21210 sizeof (p32), flag)) 21211 err = EFAULT; 21212 break; 21213 } 21214 21215 case DDI_MODEL_NONE: 21216 { 21217 if (ddi_copyout(&p, (void *)arg, sizeof (p), 21218 flag)) 21219 err = EFAULT; 21220 break; 21221 } 21222 } 21223 #else /* ! _MULTI_DATAMODEL */ 21224 if (ddi_copyout(&p, (void *)arg, sizeof (p), flag)) 21225 err = EFAULT; 21226 #endif /* _MULTI_DATAMODEL */ 21227 } 21228 #else 21229 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCPARTINFO\n"); 21230 err = ENOTTY; 21231 #endif 21232 break; 21233 } 21234 21235 case DKIOCG_PHYGEOM: { 21236 /* Return the driver's notion of the media physical geometry */ 21237 #if defined(__i386) || defined(__amd64) 21238 struct dk_geom disk_geom; 21239 struct dk_geom *dkgp = &disk_geom; 21240 21241 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21242 mutex_enter(SD_MUTEX(un)); 21243 21244 if (un->un_g.dkg_nhead != 0 && 21245 un->un_g.dkg_nsect != 0) { 21246 /* 21247 * We succeeded in getting a geometry, but 21248 * right now it is being reported as just the 21249 * Solaris fdisk partition, just like for 21250 * DKIOCGGEOM. We need to change that to be 21251 * correct for the entire disk now. 21252 */ 21253 bcopy(&un->un_g, dkgp, sizeof (*dkgp)); 21254 dkgp->dkg_acyl = 0; 21255 dkgp->dkg_ncyl = un->un_blockcount / 21256 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21257 } else { 21258 bzero(dkgp, sizeof (struct dk_geom)); 21259 /* 21260 * This disk does not have a Solaris VTOC 21261 * so we must present a physical geometry 21262 * that will remain consistent regardless 21263 * of how the disk is used. This will ensure 21264 * that the geometry does not change regardless 21265 * of the fdisk partition type (ie. EFI, FAT32, 21266 * Solaris, etc). 21267 */ 21268 if (ISCD(un)) { 21269 dkgp->dkg_nhead = un->un_pgeom.g_nhead; 21270 dkgp->dkg_nsect = un->un_pgeom.g_nsect; 21271 dkgp->dkg_ncyl = un->un_pgeom.g_ncyl; 21272 dkgp->dkg_acyl = un->un_pgeom.g_acyl; 21273 } else { 21274 sd_convert_geometry(un->un_blockcount, dkgp); 21275 dkgp->dkg_acyl = 0; 21276 dkgp->dkg_ncyl = un->un_blockcount / 21277 (dkgp->dkg_nhead * dkgp->dkg_nsect); 21278 } 21279 } 21280 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21281 21282 if (ddi_copyout(dkgp, (void *)arg, 21283 sizeof (struct dk_geom), flag)) { 21284 mutex_exit(SD_MUTEX(un)); 21285 err = EFAULT; 21286 } else { 21287 mutex_exit(SD_MUTEX(un)); 21288 err = 0; 21289 } 21290 #else 21291 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_PHYGEOM\n"); 21292 err = ENOTTY; 21293 #endif 21294 break; 21295 } 21296 21297 case DKIOCG_VIRTGEOM: { 21298 /* Return the driver's notion of the media's logical geometry */ 21299 #if defined(__i386) || defined(__amd64) 21300 struct dk_geom disk_geom; 21301 struct dk_geom *dkgp = &disk_geom; 21302 21303 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21304 mutex_enter(SD_MUTEX(un)); 21305 /* 21306 * If there is no HBA geometry available, or 21307 * if the HBA returned us something that doesn't 21308 * really fit into an Int 13/function 8 geometry 21309 * result, just fail the ioctl. See PSARC 1998/313. 21310 */ 21311 if (un->un_lgeom.g_nhead == 0 || 21312 un->un_lgeom.g_nsect == 0 || 21313 un->un_lgeom.g_ncyl > 1024) { 21314 mutex_exit(SD_MUTEX(un)); 21315 err = EINVAL; 21316 } else { 21317 dkgp->dkg_ncyl = un->un_lgeom.g_ncyl; 21318 dkgp->dkg_acyl = un->un_lgeom.g_acyl; 21319 dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl; 21320 dkgp->dkg_nhead = un->un_lgeom.g_nhead; 21321 dkgp->dkg_nsect = un->un_lgeom.g_nsect; 21322 21323 if (ddi_copyout(dkgp, (void *)arg, 21324 sizeof (struct dk_geom), flag)) { 21325 mutex_exit(SD_MUTEX(un)); 21326 err = EFAULT; 21327 } else { 21328 mutex_exit(SD_MUTEX(un)); 21329 err = 0; 21330 } 21331 } 21332 #else 21333 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCG_VIRTGEOM\n"); 21334 err = ENOTTY; 21335 #endif 21336 break; 21337 } 21338 #ifdef SDDEBUG 21339 /* RESET/ABORTS testing ioctls */ 21340 case DKIOCRESET: { 21341 int reset_level; 21342 21343 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) { 21344 err = EFAULT; 21345 } else { 21346 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: " 21347 "reset_level = 0x%lx\n", reset_level); 21348 if (scsi_reset(SD_ADDRESS(un), reset_level)) { 21349 err = 0; 21350 } else { 21351 err = EIO; 21352 } 21353 } 21354 break; 21355 } 21356 21357 case DKIOCABORT: 21358 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n"); 21359 if (scsi_abort(SD_ADDRESS(un), NULL)) { 21360 err = 0; 21361 } else { 21362 err = EIO; 21363 } 21364 break; 21365 #endif 21366 21367 #ifdef SD_FAULT_INJECTION 21368 /* SDIOC FaultInjection testing ioctls */ 21369 case SDIOCSTART: 21370 case SDIOCSTOP: 21371 case SDIOCINSERTPKT: 21372 case SDIOCINSERTXB: 21373 case SDIOCINSERTUN: 21374 case SDIOCINSERTARQ: 21375 case SDIOCPUSH: 21376 case SDIOCRETRIEVE: 21377 case SDIOCRUN: 21378 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:" 21379 "SDIOC detected cmd:0x%X:\n", cmd); 21380 /* call error generator */ 21381 sd_faultinjection_ioctl(cmd, arg, un); 21382 err = 0; 21383 break; 21384 21385 #endif /* SD_FAULT_INJECTION */ 21386 21387 default: 21388 err = ENOTTY; 21389 break; 21390 } 21391 mutex_enter(SD_MUTEX(un)); 21392 un->un_ncmds_in_driver--; 21393 ASSERT(un->un_ncmds_in_driver >= 0); 21394 mutex_exit(SD_MUTEX(un)); 21395 21396 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err); 21397 return (err); 21398 } 21399 21400 21401 /* 21402 * Function: sd_uscsi_ioctl 21403 * 21404 * Description: This routine is the driver entry point for handling USCSI ioctl 21405 * requests (USCSICMD). 21406 * 21407 * Arguments: dev - the device number 21408 * arg - user provided scsi command 21409 * flag - this argument is a pass through to ddi_copyxxx() 21410 * directly from the mode argument of ioctl(). 21411 * 21412 * Return Code: code returned by sd_send_scsi_cmd 21413 * ENXIO 21414 * EFAULT 21415 * EAGAIN 21416 */ 21417 21418 static int 21419 sd_uscsi_ioctl(dev_t dev, caddr_t arg, int flag) 21420 { 21421 #ifdef _MULTI_DATAMODEL 21422 /* 21423 * For use when a 32 bit app makes a call into a 21424 * 64 bit ioctl 21425 */ 21426 struct uscsi_cmd32 uscsi_cmd_32_for_64; 21427 struct uscsi_cmd32 *ucmd32 = &uscsi_cmd_32_for_64; 21428 model_t model; 21429 #endif /* _MULTI_DATAMODEL */ 21430 struct uscsi_cmd *scmd = NULL; 21431 struct sd_lun *un = NULL; 21432 enum uio_seg uioseg; 21433 char cdb[CDB_GROUP0]; 21434 int rval = 0; 21435 21436 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21437 return (ENXIO); 21438 } 21439 21440 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: entry: un:0x%p\n", un); 21441 21442 scmd = (struct uscsi_cmd *) 21443 kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP); 21444 21445 #ifdef _MULTI_DATAMODEL 21446 switch (model = ddi_model_convert_from(flag & FMODELS)) { 21447 case DDI_MODEL_ILP32: 21448 { 21449 if (ddi_copyin((void *)arg, ucmd32, sizeof (*ucmd32), flag)) { 21450 rval = EFAULT; 21451 goto done; 21452 } 21453 /* 21454 * Convert the ILP32 uscsi data from the 21455 * application to LP64 for internal use. 21456 */ 21457 uscsi_cmd32touscsi_cmd(ucmd32, scmd); 21458 break; 21459 } 21460 case DDI_MODEL_NONE: 21461 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21462 rval = EFAULT; 21463 goto done; 21464 } 21465 break; 21466 } 21467 #else /* ! _MULTI_DATAMODEL */ 21468 if (ddi_copyin((void *)arg, scmd, sizeof (*scmd), flag)) { 21469 rval = EFAULT; 21470 goto done; 21471 } 21472 #endif /* _MULTI_DATAMODEL */ 21473 21474 scmd->uscsi_flags &= ~USCSI_NOINTR; 21475 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE; 21476 if (un->un_f_format_in_progress == TRUE) { 21477 rval = EAGAIN; 21478 goto done; 21479 } 21480 21481 /* 21482 * Gotta do the ddi_copyin() here on the uscsi_cdb so that 21483 * we will have a valid cdb[0] to test. 21484 */ 21485 if ((ddi_copyin(scmd->uscsi_cdb, cdb, CDB_GROUP0, flag) == 0) && 21486 (cdb[0] == SCMD_FORMAT)) { 21487 SD_TRACE(SD_LOG_IOCTL, un, 21488 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21489 mutex_enter(SD_MUTEX(un)); 21490 un->un_f_format_in_progress = TRUE; 21491 mutex_exit(SD_MUTEX(un)); 21492 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21493 SD_PATH_STANDARD); 21494 mutex_enter(SD_MUTEX(un)); 21495 un->un_f_format_in_progress = FALSE; 21496 mutex_exit(SD_MUTEX(un)); 21497 } else { 21498 SD_TRACE(SD_LOG_IOCTL, un, 21499 "sd_uscsi_ioctl: scmd->uscsi_cdb 0x%x\n", cdb[0]); 21500 /* 21501 * It's OK to fall into here even if the ddi_copyin() 21502 * on the uscsi_cdb above fails, because sd_send_scsi_cmd() 21503 * does this same copyin and will return the EFAULT 21504 * if it fails. 21505 */ 21506 rval = sd_send_scsi_cmd(dev, scmd, uioseg, uioseg, uioseg, 21507 SD_PATH_STANDARD); 21508 } 21509 #ifdef _MULTI_DATAMODEL 21510 switch (model) { 21511 case DDI_MODEL_ILP32: 21512 /* 21513 * Convert back to ILP32 before copyout to the 21514 * application 21515 */ 21516 uscsi_cmdtouscsi_cmd32(scmd, ucmd32); 21517 if (ddi_copyout(ucmd32, (void *)arg, sizeof (*ucmd32), flag)) { 21518 if (rval != 0) { 21519 rval = EFAULT; 21520 } 21521 } 21522 break; 21523 case DDI_MODEL_NONE: 21524 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21525 if (rval != 0) { 21526 rval = EFAULT; 21527 } 21528 } 21529 break; 21530 } 21531 #else /* ! _MULTI_DATAMODE */ 21532 if (ddi_copyout(scmd, (void *)arg, sizeof (*scmd), flag)) { 21533 if (rval != 0) { 21534 rval = EFAULT; 21535 } 21536 } 21537 #endif /* _MULTI_DATAMODE */ 21538 done: 21539 kmem_free(scmd, sizeof (struct uscsi_cmd)); 21540 21541 SD_TRACE(SD_LOG_IOCTL, un, "sd_uscsi_ioctl: exit: un:0x%p\n", un); 21542 21543 return (rval); 21544 } 21545 21546 21547 /* 21548 * Function: sd_dkio_ctrl_info 21549 * 21550 * Description: This routine is the driver entry point for handling controller 21551 * information ioctl requests (DKIOCINFO). 21552 * 21553 * Arguments: dev - the device number 21554 * arg - pointer to user provided dk_cinfo structure 21555 * specifying the controller type and attributes. 21556 * flag - this argument is a pass through to ddi_copyxxx() 21557 * directly from the mode argument of ioctl(). 21558 * 21559 * Return Code: 0 21560 * EFAULT 21561 * ENXIO 21562 */ 21563 21564 static int 21565 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag) 21566 { 21567 struct sd_lun *un = NULL; 21568 struct dk_cinfo *info; 21569 dev_info_t *pdip; 21570 int lun, tgt; 21571 21572 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21573 return (ENXIO); 21574 } 21575 21576 info = (struct dk_cinfo *) 21577 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); 21578 21579 switch (un->un_ctype) { 21580 case CTYPE_CDROM: 21581 info->dki_ctype = DKC_CDROM; 21582 break; 21583 default: 21584 info->dki_ctype = DKC_SCSI_CCS; 21585 break; 21586 } 21587 pdip = ddi_get_parent(SD_DEVINFO(un)); 21588 info->dki_cnum = ddi_get_instance(pdip); 21589 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) { 21590 (void) strcpy(info->dki_cname, ddi_get_name(pdip)); 21591 } else { 21592 (void) strncpy(info->dki_cname, ddi_node_name(pdip), 21593 DK_DEVLEN - 1); 21594 } 21595 21596 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21597 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0); 21598 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un), 21599 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0); 21600 21601 /* Unit Information */ 21602 info->dki_unit = ddi_get_instance(SD_DEVINFO(un)); 21603 info->dki_slave = ((tgt << 3) | lun); 21604 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)), 21605 DK_DEVLEN - 1); 21606 info->dki_flags = DKI_FMTVOL; 21607 info->dki_partition = SDPART(dev); 21608 21609 /* Max Transfer size of this device in blocks */ 21610 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize; 21611 info->dki_addr = 0; 21612 info->dki_space = 0; 21613 info->dki_prio = 0; 21614 info->dki_vec = 0; 21615 21616 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) { 21617 kmem_free(info, sizeof (struct dk_cinfo)); 21618 return (EFAULT); 21619 } else { 21620 kmem_free(info, sizeof (struct dk_cinfo)); 21621 return (0); 21622 } 21623 } 21624 21625 21626 /* 21627 * Function: sd_get_media_info 21628 * 21629 * Description: This routine is the driver entry point for handling ioctl 21630 * requests for the media type or command set profile used by the 21631 * drive to operate on the media (DKIOCGMEDIAINFO). 21632 * 21633 * Arguments: dev - the device number 21634 * arg - pointer to user provided dk_minfo structure 21635 * specifying the media type, logical block size and 21636 * drive capacity. 21637 * flag - this argument is a pass through to ddi_copyxxx() 21638 * directly from the mode argument of ioctl(). 21639 * 21640 * Return Code: 0 21641 * EACCESS 21642 * EFAULT 21643 * ENXIO 21644 * EIO 21645 */ 21646 21647 static int 21648 sd_get_media_info(dev_t dev, caddr_t arg, int flag) 21649 { 21650 struct sd_lun *un = NULL; 21651 struct uscsi_cmd com; 21652 struct scsi_inquiry *sinq; 21653 struct dk_minfo media_info; 21654 u_longlong_t media_capacity; 21655 uint64_t capacity; 21656 uint_t lbasize; 21657 uchar_t *out_data; 21658 uchar_t *rqbuf; 21659 int rval = 0; 21660 int rtn; 21661 21662 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 21663 (un->un_state == SD_STATE_OFFLINE)) { 21664 return (ENXIO); 21665 } 21666 21667 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info: entry\n"); 21668 21669 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP); 21670 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP); 21671 21672 /* Issue a TUR to determine if the drive is ready with media present */ 21673 rval = sd_send_scsi_TEST_UNIT_READY(un, SD_CHECK_FOR_MEDIA); 21674 if (rval == ENXIO) { 21675 goto done; 21676 } 21677 21678 /* Now get configuration data */ 21679 if (ISCD(un)) { 21680 media_info.dki_media_type = DK_CDROM; 21681 21682 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */ 21683 if (un->un_f_mmc_cap == TRUE) { 21684 rtn = sd_send_scsi_GET_CONFIGURATION(un, &com, rqbuf, 21685 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN); 21686 21687 if (rtn) { 21688 /* 21689 * Failed for other than an illegal request 21690 * or command not supported 21691 */ 21692 if ((com.uscsi_status == STATUS_CHECK) && 21693 (com.uscsi_rqstatus == STATUS_GOOD)) { 21694 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) || 21695 (rqbuf[12] != 0x20)) { 21696 rval = EIO; 21697 goto done; 21698 } 21699 } 21700 } else { 21701 /* 21702 * The GET CONFIGURATION command succeeded 21703 * so set the media type according to the 21704 * returned data 21705 */ 21706 media_info.dki_media_type = out_data[6]; 21707 media_info.dki_media_type <<= 8; 21708 media_info.dki_media_type |= out_data[7]; 21709 } 21710 } 21711 } else { 21712 /* 21713 * The profile list is not available, so we attempt to identify 21714 * the media type based on the inquiry data 21715 */ 21716 sinq = un->un_sd->sd_inq; 21717 if (sinq->inq_qual == 0) { 21718 /* This is a direct access device */ 21719 media_info.dki_media_type = DK_FIXED_DISK; 21720 21721 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) || 21722 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) { 21723 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) { 21724 media_info.dki_media_type = DK_ZIP; 21725 } else if ( 21726 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) { 21727 media_info.dki_media_type = DK_JAZ; 21728 } 21729 } 21730 } else { 21731 /* Not a CD or direct access so return unknown media */ 21732 media_info.dki_media_type = DK_UNKNOWN; 21733 } 21734 } 21735 21736 /* Now read the capacity so we can provide the lbasize and capacity */ 21737 switch (sd_send_scsi_READ_CAPACITY(un, &capacity, &lbasize, 21738 SD_PATH_DIRECT)) { 21739 case 0: 21740 break; 21741 case EACCES: 21742 rval = EACCES; 21743 goto done; 21744 default: 21745 rval = EIO; 21746 goto done; 21747 } 21748 21749 media_info.dki_lbsize = lbasize; 21750 media_capacity = capacity; 21751 21752 /* 21753 * sd_send_scsi_READ_CAPACITY() reports capacity in 21754 * un->un_sys_blocksize chunks. So we need to convert it into 21755 * cap.lbasize chunks. 21756 */ 21757 media_capacity *= un->un_sys_blocksize; 21758 media_capacity /= lbasize; 21759 media_info.dki_capacity = media_capacity; 21760 21761 if (ddi_copyout(&media_info, arg, sizeof (struct dk_minfo), flag)) { 21762 rval = EFAULT; 21763 /* Put goto. Anybody might add some code below in future */ 21764 goto done; 21765 } 21766 done: 21767 kmem_free(out_data, SD_PROFILE_HEADER_LEN); 21768 kmem_free(rqbuf, SENSE_LENGTH); 21769 return (rval); 21770 } 21771 21772 21773 /* 21774 * Function: sd_dkio_get_geometry 21775 * 21776 * Description: This routine is the driver entry point for handling user 21777 * requests to get the device geometry (DKIOCGGEOM). 21778 * 21779 * Arguments: dev - the device number 21780 * arg - pointer to user provided dk_geom structure specifying 21781 * the controller's notion of the current geometry. 21782 * flag - this argument is a pass through to ddi_copyxxx() 21783 * directly from the mode argument of ioctl(). 21784 * geom_validated - flag indicating if the device geometry has been 21785 * previously validated in the sdioctl routine. 21786 * 21787 * Return Code: 0 21788 * EFAULT 21789 * ENXIO 21790 * EIO 21791 */ 21792 21793 static int 21794 sd_dkio_get_geometry(dev_t dev, caddr_t arg, int flag, int geom_validated) 21795 { 21796 struct sd_lun *un = NULL; 21797 struct dk_geom *tmp_geom = NULL; 21798 int rval = 0; 21799 21800 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21801 return (ENXIO); 21802 } 21803 21804 #if defined(__i386) || defined(__amd64) 21805 if (un->un_solaris_size == 0) { 21806 return (EIO); 21807 } 21808 #endif 21809 if (geom_validated == FALSE) { 21810 /* 21811 * sd_validate_geometry does not spin a disk up 21812 * if it was spun down. We need to make sure it 21813 * is ready. 21814 */ 21815 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21816 return (rval); 21817 } 21818 mutex_enter(SD_MUTEX(un)); 21819 rval = sd_validate_geometry(un, SD_PATH_DIRECT); 21820 mutex_exit(SD_MUTEX(un)); 21821 } 21822 if (rval) 21823 return (rval); 21824 21825 /* 21826 * Make a local copy of the soft state geometry to avoid some potential 21827 * race conditions associated with holding the mutex and updating the 21828 * write_reinstruct value 21829 */ 21830 tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21831 mutex_enter(SD_MUTEX(un)); 21832 bcopy(&un->un_g, tmp_geom, sizeof (struct dk_geom)); 21833 mutex_exit(SD_MUTEX(un)); 21834 21835 if (tmp_geom->dkg_write_reinstruct == 0) { 21836 tmp_geom->dkg_write_reinstruct = 21837 (int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm * 21838 sd_rot_delay) / (int)60000); 21839 } 21840 21841 rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom), 21842 flag); 21843 if (rval != 0) { 21844 rval = EFAULT; 21845 } 21846 21847 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21848 return (rval); 21849 21850 } 21851 21852 21853 /* 21854 * Function: sd_dkio_set_geometry 21855 * 21856 * Description: This routine is the driver entry point for handling user 21857 * requests to set the device geometry (DKIOCSGEOM). The actual 21858 * device geometry is not updated, just the driver "notion" of it. 21859 * 21860 * Arguments: dev - the device number 21861 * arg - pointer to user provided dk_geom structure used to set 21862 * the controller's notion of the current geometry. 21863 * flag - this argument is a pass through to ddi_copyxxx() 21864 * directly from the mode argument of ioctl(). 21865 * 21866 * Return Code: 0 21867 * EFAULT 21868 * ENXIO 21869 * EIO 21870 */ 21871 21872 static int 21873 sd_dkio_set_geometry(dev_t dev, caddr_t arg, int flag) 21874 { 21875 struct sd_lun *un = NULL; 21876 struct dk_geom *tmp_geom; 21877 struct dk_map *lp; 21878 int rval = 0; 21879 int i; 21880 21881 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21882 return (ENXIO); 21883 } 21884 21885 #if defined(__i386) || defined(__amd64) 21886 if (un->un_solaris_size == 0) { 21887 return (EIO); 21888 } 21889 #endif 21890 /* 21891 * We need to copy the user specified geometry into local 21892 * storage and then update the softstate. We don't want to hold 21893 * the mutex and copyin directly from the user to the soft state 21894 */ 21895 tmp_geom = (struct dk_geom *) 21896 kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); 21897 rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag); 21898 if (rval != 0) { 21899 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21900 return (EFAULT); 21901 } 21902 21903 mutex_enter(SD_MUTEX(un)); 21904 bcopy(tmp_geom, &un->un_g, sizeof (struct dk_geom)); 21905 for (i = 0; i < NDKMAP; i++) { 21906 lp = &un->un_map[i]; 21907 un->un_offset[i] = 21908 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 21909 #if defined(__i386) || defined(__amd64) 21910 un->un_offset[i] += un->un_solaris_offset; 21911 #endif 21912 } 21913 un->un_f_geometry_is_valid = FALSE; 21914 mutex_exit(SD_MUTEX(un)); 21915 kmem_free(tmp_geom, sizeof (struct dk_geom)); 21916 21917 return (rval); 21918 } 21919 21920 21921 /* 21922 * Function: sd_dkio_get_partition 21923 * 21924 * Description: This routine is the driver entry point for handling user 21925 * requests to get the partition table (DKIOCGAPART). 21926 * 21927 * Arguments: dev - the device number 21928 * arg - pointer to user provided dk_allmap structure specifying 21929 * the controller's notion of the current partition table. 21930 * flag - this argument is a pass through to ddi_copyxxx() 21931 * directly from the mode argument of ioctl(). 21932 * geom_validated - flag indicating if the device geometry has been 21933 * previously validated in the sdioctl routine. 21934 * 21935 * Return Code: 0 21936 * EFAULT 21937 * ENXIO 21938 * EIO 21939 */ 21940 21941 static int 21942 sd_dkio_get_partition(dev_t dev, caddr_t arg, int flag, int geom_validated) 21943 { 21944 struct sd_lun *un = NULL; 21945 int rval = 0; 21946 int size; 21947 21948 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 21949 return (ENXIO); 21950 } 21951 21952 #if defined(__i386) || defined(__amd64) 21953 if (un->un_solaris_size == 0) { 21954 return (EIO); 21955 } 21956 #endif 21957 /* 21958 * Make sure the geometry is valid before getting the partition 21959 * information. 21960 */ 21961 mutex_enter(SD_MUTEX(un)); 21962 if (geom_validated == FALSE) { 21963 /* 21964 * sd_validate_geometry does not spin a disk up 21965 * if it was spun down. We need to make sure it 21966 * is ready before validating the geometry. 21967 */ 21968 mutex_exit(SD_MUTEX(un)); 21969 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 21970 return (rval); 21971 } 21972 mutex_enter(SD_MUTEX(un)); 21973 21974 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 21975 mutex_exit(SD_MUTEX(un)); 21976 return (rval); 21977 } 21978 } 21979 mutex_exit(SD_MUTEX(un)); 21980 21981 #ifdef _MULTI_DATAMODEL 21982 switch (ddi_model_convert_from(flag & FMODELS)) { 21983 case DDI_MODEL_ILP32: { 21984 struct dk_map32 dk_map32[NDKMAP]; 21985 int i; 21986 21987 for (i = 0; i < NDKMAP; i++) { 21988 dk_map32[i].dkl_cylno = un->un_map[i].dkl_cylno; 21989 dk_map32[i].dkl_nblk = un->un_map[i].dkl_nblk; 21990 } 21991 size = NDKMAP * sizeof (struct dk_map32); 21992 rval = ddi_copyout(dk_map32, (void *)arg, size, flag); 21993 if (rval != 0) { 21994 rval = EFAULT; 21995 } 21996 break; 21997 } 21998 case DDI_MODEL_NONE: 21999 size = NDKMAP * sizeof (struct dk_map); 22000 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22001 if (rval != 0) { 22002 rval = EFAULT; 22003 } 22004 break; 22005 } 22006 #else /* ! _MULTI_DATAMODEL */ 22007 size = NDKMAP * sizeof (struct dk_map); 22008 rval = ddi_copyout(un->un_map, (void *)arg, size, flag); 22009 if (rval != 0) { 22010 rval = EFAULT; 22011 } 22012 #endif /* _MULTI_DATAMODEL */ 22013 return (rval); 22014 } 22015 22016 22017 /* 22018 * Function: sd_dkio_set_partition 22019 * 22020 * Description: This routine is the driver entry point for handling user 22021 * requests to set the partition table (DKIOCSAPART). The actual 22022 * device partition is not updated. 22023 * 22024 * Arguments: dev - the device number 22025 * arg - pointer to user provided dk_allmap structure used to set 22026 * the controller's notion of the partition table. 22027 * flag - this argument is a pass through to ddi_copyxxx() 22028 * directly from the mode argument of ioctl(). 22029 * 22030 * Return Code: 0 22031 * EINVAL 22032 * EFAULT 22033 * ENXIO 22034 * EIO 22035 */ 22036 22037 static int 22038 sd_dkio_set_partition(dev_t dev, caddr_t arg, int flag) 22039 { 22040 struct sd_lun *un = NULL; 22041 struct dk_map dk_map[NDKMAP]; 22042 struct dk_map *lp; 22043 int rval = 0; 22044 int size; 22045 int i; 22046 #if defined(_SUNOS_VTOC_16) 22047 struct dkl_partition *vp; 22048 #endif 22049 22050 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22051 return (ENXIO); 22052 } 22053 22054 /* 22055 * Set the map for all logical partitions. We lock 22056 * the priority just to make sure an interrupt doesn't 22057 * come in while the map is half updated. 22058 */ 22059 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_solaris_size)) 22060 mutex_enter(SD_MUTEX(un)); 22061 if (un->un_blockcount > DK_MAX_BLOCKS) { 22062 mutex_exit(SD_MUTEX(un)); 22063 return (ENOTSUP); 22064 } 22065 mutex_exit(SD_MUTEX(un)); 22066 if (un->un_solaris_size == 0) { 22067 return (EIO); 22068 } 22069 22070 #ifdef _MULTI_DATAMODEL 22071 switch (ddi_model_convert_from(flag & FMODELS)) { 22072 case DDI_MODEL_ILP32: { 22073 struct dk_map32 dk_map32[NDKMAP]; 22074 22075 size = NDKMAP * sizeof (struct dk_map32); 22076 rval = ddi_copyin((void *)arg, dk_map32, size, flag); 22077 if (rval != 0) { 22078 return (EFAULT); 22079 } 22080 for (i = 0; i < NDKMAP; i++) { 22081 dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno; 22082 dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk; 22083 } 22084 break; 22085 } 22086 case DDI_MODEL_NONE: 22087 size = NDKMAP * sizeof (struct dk_map); 22088 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22089 if (rval != 0) { 22090 return (EFAULT); 22091 } 22092 break; 22093 } 22094 #else /* ! _MULTI_DATAMODEL */ 22095 size = NDKMAP * sizeof (struct dk_map); 22096 rval = ddi_copyin((void *)arg, dk_map, size, flag); 22097 if (rval != 0) { 22098 return (EFAULT); 22099 } 22100 #endif /* _MULTI_DATAMODEL */ 22101 22102 mutex_enter(SD_MUTEX(un)); 22103 /* Note: The size used in this bcopy is set based upon the data model */ 22104 bcopy(dk_map, un->un_map, size); 22105 #if defined(_SUNOS_VTOC_16) 22106 vp = (struct dkl_partition *)&(un->un_vtoc); 22107 #endif /* defined(_SUNOS_VTOC_16) */ 22108 for (i = 0; i < NDKMAP; i++) { 22109 lp = &un->un_map[i]; 22110 un->un_offset[i] = 22111 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 22112 #if defined(_SUNOS_VTOC_16) 22113 vp->p_start = un->un_offset[i]; 22114 vp->p_size = lp->dkl_nblk; 22115 vp++; 22116 #endif /* defined(_SUNOS_VTOC_16) */ 22117 #if defined(__i386) || defined(__amd64) 22118 un->un_offset[i] += un->un_solaris_offset; 22119 #endif 22120 } 22121 mutex_exit(SD_MUTEX(un)); 22122 return (rval); 22123 } 22124 22125 22126 /* 22127 * Function: sd_dkio_get_vtoc 22128 * 22129 * Description: This routine is the driver entry point for handling user 22130 * requests to get the current volume table of contents 22131 * (DKIOCGVTOC). 22132 * 22133 * Arguments: dev - the device number 22134 * arg - pointer to user provided vtoc structure specifying 22135 * the current vtoc. 22136 * flag - this argument is a pass through to ddi_copyxxx() 22137 * directly from the mode argument of ioctl(). 22138 * geom_validated - flag indicating if the device geometry has been 22139 * previously validated in the sdioctl routine. 22140 * 22141 * Return Code: 0 22142 * EFAULT 22143 * ENXIO 22144 * EIO 22145 */ 22146 22147 static int 22148 sd_dkio_get_vtoc(dev_t dev, caddr_t arg, int flag, int geom_validated) 22149 { 22150 struct sd_lun *un = NULL; 22151 #if defined(_SUNOS_VTOC_8) 22152 struct vtoc user_vtoc; 22153 #endif /* defined(_SUNOS_VTOC_8) */ 22154 int rval = 0; 22155 22156 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22157 return (ENXIO); 22158 } 22159 22160 mutex_enter(SD_MUTEX(un)); 22161 if (geom_validated == FALSE) { 22162 /* 22163 * sd_validate_geometry does not spin a disk up 22164 * if it was spun down. We need to make sure it 22165 * is ready. 22166 */ 22167 mutex_exit(SD_MUTEX(un)); 22168 if ((rval = sd_send_scsi_TEST_UNIT_READY(un, 0)) != 0) { 22169 return (rval); 22170 } 22171 mutex_enter(SD_MUTEX(un)); 22172 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) != 0) { 22173 mutex_exit(SD_MUTEX(un)); 22174 return (rval); 22175 } 22176 } 22177 22178 #if defined(_SUNOS_VTOC_8) 22179 sd_build_user_vtoc(un, &user_vtoc); 22180 mutex_exit(SD_MUTEX(un)); 22181 22182 #ifdef _MULTI_DATAMODEL 22183 switch (ddi_model_convert_from(flag & FMODELS)) { 22184 case DDI_MODEL_ILP32: { 22185 struct vtoc32 user_vtoc32; 22186 22187 vtoctovtoc32(user_vtoc, user_vtoc32); 22188 if (ddi_copyout(&user_vtoc32, (void *)arg, 22189 sizeof (struct vtoc32), flag)) { 22190 return (EFAULT); 22191 } 22192 break; 22193 } 22194 22195 case DDI_MODEL_NONE: 22196 if (ddi_copyout(&user_vtoc, (void *)arg, 22197 sizeof (struct vtoc), flag)) { 22198 return (EFAULT); 22199 } 22200 break; 22201 } 22202 #else /* ! _MULTI_DATAMODEL */ 22203 if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) { 22204 return (EFAULT); 22205 } 22206 #endif /* _MULTI_DATAMODEL */ 22207 22208 #elif defined(_SUNOS_VTOC_16) 22209 mutex_exit(SD_MUTEX(un)); 22210 22211 #ifdef _MULTI_DATAMODEL 22212 /* 22213 * The un_vtoc structure is a "struct dk_vtoc" which is always 22214 * 32-bit to maintain compatibility with existing on-disk 22215 * structures. Thus, we need to convert the structure when copying 22216 * it out to a datamodel-dependent "struct vtoc" in a 64-bit 22217 * program. If the target is a 32-bit program, then no conversion 22218 * is necessary. 22219 */ 22220 /* LINTED: logical expression always true: op "||" */ 22221 ASSERT(sizeof (un->un_vtoc) == sizeof (struct vtoc32)); 22222 switch (ddi_model_convert_from(flag & FMODELS)) { 22223 case DDI_MODEL_ILP32: 22224 if (ddi_copyout(&(un->un_vtoc), (void *)arg, 22225 sizeof (un->un_vtoc), flag)) { 22226 return (EFAULT); 22227 } 22228 break; 22229 22230 case DDI_MODEL_NONE: { 22231 struct vtoc user_vtoc; 22232 22233 vtoc32tovtoc(un->un_vtoc, user_vtoc); 22234 if (ddi_copyout(&user_vtoc, (void *)arg, 22235 sizeof (struct vtoc), flag)) { 22236 return (EFAULT); 22237 } 22238 break; 22239 } 22240 } 22241 #else /* ! _MULTI_DATAMODEL */ 22242 if (ddi_copyout(&(un->un_vtoc), (void *)arg, sizeof (un->un_vtoc), 22243 flag)) { 22244 return (EFAULT); 22245 } 22246 #endif /* _MULTI_DATAMODEL */ 22247 #else 22248 #error "No VTOC format defined." 22249 #endif 22250 22251 return (rval); 22252 } 22253 22254 static int 22255 sd_dkio_get_efi(dev_t dev, caddr_t arg, int flag) 22256 { 22257 struct sd_lun *un = NULL; 22258 dk_efi_t user_efi; 22259 int rval = 0; 22260 void *buffer; 22261 22262 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22263 return (ENXIO); 22264 22265 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22266 return (EFAULT); 22267 22268 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22269 22270 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22271 (user_efi.dki_length > un->un_max_xfer_size)) 22272 return (EINVAL); 22273 22274 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22275 rval = sd_send_scsi_READ(un, buffer, user_efi.dki_length, 22276 user_efi.dki_lba, SD_PATH_DIRECT); 22277 if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data, 22278 user_efi.dki_length, flag) != 0) 22279 rval = EFAULT; 22280 22281 kmem_free(buffer, user_efi.dki_length); 22282 return (rval); 22283 } 22284 22285 /* 22286 * Function: sd_build_user_vtoc 22287 * 22288 * Description: This routine populates a pass by reference variable with the 22289 * current volume table of contents. 22290 * 22291 * Arguments: un - driver soft state (unit) structure 22292 * user_vtoc - pointer to vtoc structure to be populated 22293 */ 22294 22295 static void 22296 sd_build_user_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22297 { 22298 struct dk_map2 *lpart; 22299 struct dk_map *lmap; 22300 struct partition *vpart; 22301 int nblks; 22302 int i; 22303 22304 ASSERT(mutex_owned(SD_MUTEX(un))); 22305 22306 /* 22307 * Return vtoc structure fields in the provided VTOC area, addressed 22308 * by *vtoc. 22309 */ 22310 bzero(user_vtoc, sizeof (struct vtoc)); 22311 user_vtoc->v_bootinfo[0] = un->un_vtoc.v_bootinfo[0]; 22312 user_vtoc->v_bootinfo[1] = un->un_vtoc.v_bootinfo[1]; 22313 user_vtoc->v_bootinfo[2] = un->un_vtoc.v_bootinfo[2]; 22314 user_vtoc->v_sanity = VTOC_SANE; 22315 user_vtoc->v_version = un->un_vtoc.v_version; 22316 bcopy(un->un_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL); 22317 user_vtoc->v_sectorsz = un->un_sys_blocksize; 22318 user_vtoc->v_nparts = un->un_vtoc.v_nparts; 22319 bcopy(un->un_vtoc.v_reserved, user_vtoc->v_reserved, 22320 sizeof (un->un_vtoc.v_reserved)); 22321 /* 22322 * Convert partitioning information. 22323 * 22324 * Note the conversion from starting cylinder number 22325 * to starting sector number. 22326 */ 22327 lmap = un->un_map; 22328 lpart = (struct dk_map2 *)un->un_vtoc.v_part; 22329 vpart = user_vtoc->v_part; 22330 22331 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22332 22333 for (i = 0; i < V_NUMPAR; i++) { 22334 vpart->p_tag = lpart->p_tag; 22335 vpart->p_flag = lpart->p_flag; 22336 vpart->p_start = lmap->dkl_cylno * nblks; 22337 vpart->p_size = lmap->dkl_nblk; 22338 lmap++; 22339 lpart++; 22340 vpart++; 22341 22342 /* (4364927) */ 22343 user_vtoc->timestamp[i] = (time_t)un->un_vtoc.v_timestamp[i]; 22344 } 22345 22346 bcopy(un->un_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII); 22347 } 22348 22349 static int 22350 sd_dkio_partition(dev_t dev, caddr_t arg, int flag) 22351 { 22352 struct sd_lun *un = NULL; 22353 struct partition64 p64; 22354 int rval = 0; 22355 uint_t nparts; 22356 efi_gpe_t *partitions; 22357 efi_gpt_t *buffer; 22358 diskaddr_t gpe_lba; 22359 22360 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22361 return (ENXIO); 22362 } 22363 22364 if (ddi_copyin((const void *)arg, &p64, 22365 sizeof (struct partition64), flag)) { 22366 return (EFAULT); 22367 } 22368 22369 buffer = kmem_alloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP); 22370 rval = sd_send_scsi_READ(un, buffer, DEV_BSIZE, 22371 1, SD_PATH_DIRECT); 22372 if (rval != 0) 22373 goto done_error; 22374 22375 sd_swap_efi_gpt(buffer); 22376 22377 if ((rval = sd_validate_efi(buffer)) != 0) 22378 goto done_error; 22379 22380 nparts = buffer->efi_gpt_NumberOfPartitionEntries; 22381 gpe_lba = buffer->efi_gpt_PartitionEntryLBA; 22382 if (p64.p_partno > nparts) { 22383 /* couldn't find it */ 22384 rval = ESRCH; 22385 goto done_error; 22386 } 22387 /* 22388 * if we're dealing with a partition that's out of the normal 22389 * 16K block, adjust accordingly 22390 */ 22391 gpe_lba += p64.p_partno / sizeof (efi_gpe_t); 22392 rval = sd_send_scsi_READ(un, buffer, EFI_MIN_ARRAY_SIZE, 22393 gpe_lba, SD_PATH_DIRECT); 22394 if (rval) { 22395 goto done_error; 22396 } 22397 partitions = (efi_gpe_t *)buffer; 22398 22399 sd_swap_efi_gpe(nparts, partitions); 22400 22401 partitions += p64.p_partno; 22402 bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type, 22403 sizeof (struct uuid)); 22404 p64.p_start = partitions->efi_gpe_StartingLBA; 22405 p64.p_size = partitions->efi_gpe_EndingLBA - 22406 p64.p_start + 1; 22407 22408 if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag)) 22409 rval = EFAULT; 22410 22411 done_error: 22412 kmem_free(buffer, EFI_MIN_ARRAY_SIZE); 22413 return (rval); 22414 } 22415 22416 22417 /* 22418 * Function: sd_dkio_set_vtoc 22419 * 22420 * Description: This routine is the driver entry point for handling user 22421 * requests to set the current volume table of contents 22422 * (DKIOCSVTOC). 22423 * 22424 * Arguments: dev - the device number 22425 * arg - pointer to user provided vtoc structure used to set the 22426 * current vtoc. 22427 * flag - this argument is a pass through to ddi_copyxxx() 22428 * directly from the mode argument of ioctl(). 22429 * 22430 * Return Code: 0 22431 * EFAULT 22432 * ENXIO 22433 * EINVAL 22434 * ENOTSUP 22435 */ 22436 22437 static int 22438 sd_dkio_set_vtoc(dev_t dev, caddr_t arg, int flag) 22439 { 22440 struct sd_lun *un = NULL; 22441 struct vtoc user_vtoc; 22442 int rval = 0; 22443 22444 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 22445 return (ENXIO); 22446 } 22447 22448 #if defined(__i386) || defined(__amd64) 22449 if (un->un_tgt_blocksize != un->un_sys_blocksize) { 22450 return (EINVAL); 22451 } 22452 #endif 22453 22454 #ifdef _MULTI_DATAMODEL 22455 switch (ddi_model_convert_from(flag & FMODELS)) { 22456 case DDI_MODEL_ILP32: { 22457 struct vtoc32 user_vtoc32; 22458 22459 if (ddi_copyin((const void *)arg, &user_vtoc32, 22460 sizeof (struct vtoc32), flag)) { 22461 return (EFAULT); 22462 } 22463 vtoc32tovtoc(user_vtoc32, user_vtoc); 22464 break; 22465 } 22466 22467 case DDI_MODEL_NONE: 22468 if (ddi_copyin((const void *)arg, &user_vtoc, 22469 sizeof (struct vtoc), flag)) { 22470 return (EFAULT); 22471 } 22472 break; 22473 } 22474 #else /* ! _MULTI_DATAMODEL */ 22475 if (ddi_copyin((const void *)arg, &user_vtoc, 22476 sizeof (struct vtoc), flag)) { 22477 return (EFAULT); 22478 } 22479 #endif /* _MULTI_DATAMODEL */ 22480 22481 mutex_enter(SD_MUTEX(un)); 22482 if (un->un_blockcount > DK_MAX_BLOCKS) { 22483 mutex_exit(SD_MUTEX(un)); 22484 return (ENOTSUP); 22485 } 22486 if (un->un_g.dkg_ncyl == 0) { 22487 mutex_exit(SD_MUTEX(un)); 22488 return (EINVAL); 22489 } 22490 22491 mutex_exit(SD_MUTEX(un)); 22492 sd_clear_efi(un); 22493 ddi_remove_minor_node(SD_DEVINFO(un), "wd"); 22494 ddi_remove_minor_node(SD_DEVINFO(un), "wd,raw"); 22495 (void) ddi_create_minor_node(SD_DEVINFO(un), "h", 22496 S_IFBLK, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22497 un->un_node_type, NULL); 22498 (void) ddi_create_minor_node(SD_DEVINFO(un), "h,raw", 22499 S_IFCHR, (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 22500 un->un_node_type, NULL); 22501 mutex_enter(SD_MUTEX(un)); 22502 22503 if ((rval = sd_build_label_vtoc(un, &user_vtoc)) == 0) { 22504 if ((rval = sd_write_label(dev)) == 0) { 22505 if ((rval = sd_validate_geometry(un, SD_PATH_DIRECT)) 22506 != 0) { 22507 SD_ERROR(SD_LOG_IOCTL_DKIO, un, 22508 "sd_dkio_set_vtoc: " 22509 "Failed validate geometry\n"); 22510 } 22511 } 22512 } 22513 22514 /* 22515 * If sd_build_label_vtoc, or sd_write_label failed above write the 22516 * devid anyway, what can it hurt? Also preserve the device id by 22517 * writing to the disk acyl for the case where a devid has been 22518 * fabricated. 22519 */ 22520 if (!ISREMOVABLE(un) && !ISCD(un) && 22521 (un->un_f_opt_fab_devid == TRUE)) { 22522 if (un->un_devid == NULL) { 22523 sd_register_devid(un, SD_DEVINFO(un), 22524 SD_TARGET_IS_UNRESERVED); 22525 } else { 22526 /* 22527 * The device id for this disk has been 22528 * fabricated. Fabricated device id's are 22529 * managed by storing them in the last 2 22530 * available sectors on the drive. The device 22531 * id must be preserved by writing it back out 22532 * to this location. 22533 */ 22534 if (sd_write_deviceid(un) != 0) { 22535 ddi_devid_free(un->un_devid); 22536 un->un_devid = NULL; 22537 } 22538 } 22539 } 22540 mutex_exit(SD_MUTEX(un)); 22541 return (rval); 22542 } 22543 22544 22545 /* 22546 * Function: sd_build_label_vtoc 22547 * 22548 * Description: This routine updates the driver soft state current volume table 22549 * of contents based on a user specified vtoc. 22550 * 22551 * Arguments: un - driver soft state (unit) structure 22552 * user_vtoc - pointer to vtoc structure specifying vtoc to be used 22553 * to update the driver soft state. 22554 * 22555 * Return Code: 0 22556 * EINVAL 22557 */ 22558 22559 static int 22560 sd_build_label_vtoc(struct sd_lun *un, struct vtoc *user_vtoc) 22561 { 22562 struct dk_map *lmap; 22563 struct partition *vpart; 22564 int nblks; 22565 #if defined(_SUNOS_VTOC_8) 22566 int ncyl; 22567 struct dk_map2 *lpart; 22568 #endif /* defined(_SUNOS_VTOC_8) */ 22569 int i; 22570 22571 ASSERT(mutex_owned(SD_MUTEX(un))); 22572 22573 /* Sanity-check the vtoc */ 22574 if (user_vtoc->v_sanity != VTOC_SANE || 22575 user_vtoc->v_sectorsz != un->un_sys_blocksize || 22576 user_vtoc->v_nparts != V_NUMPAR) { 22577 return (EINVAL); 22578 } 22579 22580 nblks = un->un_g.dkg_nsect * un->un_g.dkg_nhead; 22581 if (nblks == 0) { 22582 return (EINVAL); 22583 } 22584 22585 #if defined(_SUNOS_VTOC_8) 22586 vpart = user_vtoc->v_part; 22587 for (i = 0; i < V_NUMPAR; i++) { 22588 if ((vpart->p_start % nblks) != 0) { 22589 return (EINVAL); 22590 } 22591 ncyl = vpart->p_start / nblks; 22592 ncyl += vpart->p_size / nblks; 22593 if ((vpart->p_size % nblks) != 0) { 22594 ncyl++; 22595 } 22596 if (ncyl > (int)un->un_g.dkg_ncyl) { 22597 return (EINVAL); 22598 } 22599 vpart++; 22600 } 22601 #endif /* defined(_SUNOS_VTOC_8) */ 22602 22603 /* Put appropriate vtoc structure fields into the disk label */ 22604 #if defined(_SUNOS_VTOC_16) 22605 /* 22606 * The vtoc is always a 32bit data structure to maintain the 22607 * on-disk format. Convert "in place" instead of bcopying it. 22608 */ 22609 vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(un->un_vtoc)))); 22610 22611 /* 22612 * in the 16-slice vtoc, starting sectors are expressed in 22613 * numbers *relative* to the start of the Solaris fdisk partition. 22614 */ 22615 lmap = un->un_map; 22616 vpart = user_vtoc->v_part; 22617 22618 for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) { 22619 lmap->dkl_cylno = vpart->p_start / nblks; 22620 lmap->dkl_nblk = vpart->p_size; 22621 } 22622 22623 #elif defined(_SUNOS_VTOC_8) 22624 22625 un->un_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0]; 22626 un->un_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1]; 22627 un->un_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2]; 22628 22629 un->un_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity; 22630 un->un_vtoc.v_version = (uint32_t)user_vtoc->v_version; 22631 22632 bcopy(user_vtoc->v_volume, un->un_vtoc.v_volume, LEN_DKL_VVOL); 22633 22634 un->un_vtoc.v_nparts = user_vtoc->v_nparts; 22635 22636 bcopy(user_vtoc->v_reserved, un->un_vtoc.v_reserved, 22637 sizeof (un->un_vtoc.v_reserved)); 22638 22639 /* 22640 * Note the conversion from starting sector number 22641 * to starting cylinder number. 22642 * Return error if division results in a remainder. 22643 */ 22644 lmap = un->un_map; 22645 lpart = un->un_vtoc.v_part; 22646 vpart = user_vtoc->v_part; 22647 22648 for (i = 0; i < (int)user_vtoc->v_nparts; i++) { 22649 lpart->p_tag = vpart->p_tag; 22650 lpart->p_flag = vpart->p_flag; 22651 lmap->dkl_cylno = vpart->p_start / nblks; 22652 lmap->dkl_nblk = vpart->p_size; 22653 22654 lmap++; 22655 lpart++; 22656 vpart++; 22657 22658 /* (4387723) */ 22659 #ifdef _LP64 22660 if (user_vtoc->timestamp[i] > TIME32_MAX) { 22661 un->un_vtoc.v_timestamp[i] = TIME32_MAX; 22662 } else { 22663 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22664 } 22665 #else 22666 un->un_vtoc.v_timestamp[i] = user_vtoc->timestamp[i]; 22667 #endif 22668 } 22669 22670 bcopy(user_vtoc->v_asciilabel, un->un_asciilabel, LEN_DKL_ASCII); 22671 #else 22672 #error "No VTOC format defined." 22673 #endif 22674 return (0); 22675 } 22676 22677 /* 22678 * Function: sd_clear_efi 22679 * 22680 * Description: This routine clears all EFI labels. 22681 * 22682 * Arguments: un - driver soft state (unit) structure 22683 * 22684 * Return Code: void 22685 */ 22686 22687 static void 22688 sd_clear_efi(struct sd_lun *un) 22689 { 22690 efi_gpt_t *gpt; 22691 uint_t lbasize; 22692 uint64_t cap; 22693 int rval; 22694 22695 ASSERT(!mutex_owned(SD_MUTEX(un))); 22696 22697 gpt = kmem_alloc(sizeof (efi_gpt_t), KM_SLEEP); 22698 22699 if (sd_send_scsi_READ(un, gpt, DEV_BSIZE, 1, SD_PATH_DIRECT) != 0) { 22700 goto done; 22701 } 22702 22703 sd_swap_efi_gpt(gpt); 22704 rval = sd_validate_efi(gpt); 22705 if (rval == 0) { 22706 /* clear primary */ 22707 bzero(gpt, sizeof (efi_gpt_t)); 22708 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 1, 22709 SD_PATH_DIRECT))) { 22710 SD_INFO(SD_LOG_IO_PARTITION, un, 22711 "sd_clear_efi: clear primary label failed\n"); 22712 } 22713 } 22714 /* the backup */ 22715 rval = sd_send_scsi_READ_CAPACITY(un, &cap, &lbasize, 22716 SD_PATH_DIRECT); 22717 if (rval) { 22718 goto done; 22719 } 22720 if ((rval = sd_send_scsi_READ(un, gpt, lbasize, 22721 cap - 1, SD_PATH_DIRECT)) != 0) { 22722 goto done; 22723 } 22724 sd_swap_efi_gpt(gpt); 22725 rval = sd_validate_efi(gpt); 22726 if (rval == 0) { 22727 /* clear backup */ 22728 SD_TRACE(SD_LOG_IOCTL, un, "sd_clear_efi clear backup@%lu\n", 22729 cap-1); 22730 bzero(gpt, sizeof (efi_gpt_t)); 22731 if ((rval = sd_send_scsi_WRITE(un, gpt, EFI_LABEL_SIZE, 22732 cap-1, SD_PATH_DIRECT))) { 22733 SD_INFO(SD_LOG_IO_PARTITION, un, 22734 "sd_clear_efi: clear backup label failed\n"); 22735 } 22736 } 22737 22738 done: 22739 kmem_free(gpt, sizeof (efi_gpt_t)); 22740 } 22741 22742 /* 22743 * Function: sd_set_vtoc 22744 * 22745 * Description: This routine writes data to the appropriate positions 22746 * 22747 * Arguments: un - driver soft state (unit) structure 22748 * dkl - the data to be written 22749 * 22750 * Return: void 22751 */ 22752 22753 static int 22754 sd_set_vtoc(struct sd_lun *un, struct dk_label *dkl) 22755 { 22756 void *shadow_buf; 22757 uint_t label_addr; 22758 int sec; 22759 int blk; 22760 int head; 22761 int cyl; 22762 int rval; 22763 22764 #if defined(__i386) || defined(__amd64) 22765 label_addr = un->un_solaris_offset + DK_LABEL_LOC; 22766 #else 22767 /* Write the primary label at block 0 of the solaris partition. */ 22768 label_addr = 0; 22769 #endif 22770 22771 if (NOT_DEVBSIZE(un)) { 22772 shadow_buf = kmem_zalloc(un->un_tgt_blocksize, KM_SLEEP); 22773 /* 22774 * Read the target's first block. 22775 */ 22776 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22777 un->un_tgt_blocksize, label_addr, 22778 SD_PATH_STANDARD)) != 0) { 22779 goto exit; 22780 } 22781 /* 22782 * Copy the contents of the label into the shadow buffer 22783 * which is of the size of target block size. 22784 */ 22785 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22786 } 22787 22788 /* Write the primary label */ 22789 if (NOT_DEVBSIZE(un)) { 22790 rval = sd_send_scsi_WRITE(un, shadow_buf, un->un_tgt_blocksize, 22791 label_addr, SD_PATH_STANDARD); 22792 } else { 22793 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22794 label_addr, SD_PATH_STANDARD); 22795 } 22796 if (rval != 0) { 22797 return (rval); 22798 } 22799 22800 /* 22801 * Calculate where the backup labels go. They are always on 22802 * the last alternate cylinder, but some older drives put them 22803 * on head 2 instead of the last head. They are always on the 22804 * first 5 odd sectors of the appropriate track. 22805 * 22806 * We have no choice at this point, but to believe that the 22807 * disk label is valid. Use the geometry of the disk 22808 * as described in the label. 22809 */ 22810 cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1; 22811 head = dkl->dkl_nhead - 1; 22812 22813 /* 22814 * Write and verify the backup labels. Make sure we don't try to 22815 * write past the last cylinder. 22816 */ 22817 for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) { 22818 blk = (daddr_t)( 22819 (cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) + 22820 (head * dkl->dkl_nsect) + sec); 22821 #if defined(__i386) || defined(__amd64) 22822 blk += un->un_solaris_offset; 22823 #endif 22824 if (NOT_DEVBSIZE(un)) { 22825 uint64_t tblk; 22826 /* 22827 * Need to read the block first for read modify write. 22828 */ 22829 tblk = (uint64_t)blk; 22830 blk = (int)((tblk * un->un_sys_blocksize) / 22831 un->un_tgt_blocksize); 22832 if ((rval = sd_send_scsi_READ(un, shadow_buf, 22833 un->un_tgt_blocksize, blk, 22834 SD_PATH_STANDARD)) != 0) { 22835 goto exit; 22836 } 22837 /* 22838 * Modify the shadow buffer with the label. 22839 */ 22840 bcopy(dkl, shadow_buf, sizeof (struct dk_label)); 22841 rval = sd_send_scsi_WRITE(un, shadow_buf, 22842 un->un_tgt_blocksize, blk, SD_PATH_STANDARD); 22843 } else { 22844 rval = sd_send_scsi_WRITE(un, dkl, un->un_sys_blocksize, 22845 blk, SD_PATH_STANDARD); 22846 SD_INFO(SD_LOG_IO_PARTITION, un, 22847 "sd_set_vtoc: wrote backup label %d\n", blk); 22848 } 22849 if (rval != 0) { 22850 goto exit; 22851 } 22852 } 22853 exit: 22854 if (NOT_DEVBSIZE(un)) { 22855 kmem_free(shadow_buf, un->un_tgt_blocksize); 22856 } 22857 return (rval); 22858 } 22859 22860 /* 22861 * Function: sd_clear_vtoc 22862 * 22863 * Description: This routine clears out the VTOC labels. 22864 * 22865 * Arguments: un - driver soft state (unit) structure 22866 * 22867 * Return: void 22868 */ 22869 22870 static void 22871 sd_clear_vtoc(struct sd_lun *un) 22872 { 22873 struct dk_label *dkl; 22874 22875 mutex_exit(SD_MUTEX(un)); 22876 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22877 mutex_enter(SD_MUTEX(un)); 22878 /* 22879 * sd_set_vtoc uses these fields in order to figure out 22880 * where to overwrite the backup labels 22881 */ 22882 dkl->dkl_apc = un->un_g.dkg_apc; 22883 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22884 dkl->dkl_acyl = un->un_g.dkg_acyl; 22885 dkl->dkl_nhead = un->un_g.dkg_nhead; 22886 dkl->dkl_nsect = un->un_g.dkg_nsect; 22887 mutex_exit(SD_MUTEX(un)); 22888 (void) sd_set_vtoc(un, dkl); 22889 kmem_free(dkl, sizeof (struct dk_label)); 22890 22891 mutex_enter(SD_MUTEX(un)); 22892 } 22893 22894 /* 22895 * Function: sd_write_label 22896 * 22897 * Description: This routine will validate and write the driver soft state vtoc 22898 * contents to the device. 22899 * 22900 * Arguments: dev - the device number 22901 * 22902 * Return Code: the code returned by sd_send_scsi_cmd() 22903 * 0 22904 * EINVAL 22905 * ENXIO 22906 * ENOMEM 22907 */ 22908 22909 static int 22910 sd_write_label(dev_t dev) 22911 { 22912 struct sd_lun *un; 22913 struct dk_label *dkl; 22914 short sum; 22915 short *sp; 22916 int i; 22917 int rval; 22918 22919 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 22920 (un->un_state == SD_STATE_OFFLINE)) { 22921 return (ENXIO); 22922 } 22923 ASSERT(mutex_owned(SD_MUTEX(un))); 22924 mutex_exit(SD_MUTEX(un)); 22925 dkl = kmem_zalloc(sizeof (struct dk_label), KM_SLEEP); 22926 mutex_enter(SD_MUTEX(un)); 22927 22928 bcopy(&un->un_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc)); 22929 dkl->dkl_rpm = un->un_g.dkg_rpm; 22930 dkl->dkl_pcyl = un->un_g.dkg_pcyl; 22931 dkl->dkl_apc = un->un_g.dkg_apc; 22932 dkl->dkl_intrlv = un->un_g.dkg_intrlv; 22933 dkl->dkl_ncyl = un->un_g.dkg_ncyl; 22934 dkl->dkl_acyl = un->un_g.dkg_acyl; 22935 dkl->dkl_nhead = un->un_g.dkg_nhead; 22936 dkl->dkl_nsect = un->un_g.dkg_nsect; 22937 22938 #if defined(_SUNOS_VTOC_8) 22939 dkl->dkl_obs1 = un->un_g.dkg_obs1; 22940 dkl->dkl_obs2 = un->un_g.dkg_obs2; 22941 dkl->dkl_obs3 = un->un_g.dkg_obs3; 22942 for (i = 0; i < NDKMAP; i++) { 22943 dkl->dkl_map[i].dkl_cylno = un->un_map[i].dkl_cylno; 22944 dkl->dkl_map[i].dkl_nblk = un->un_map[i].dkl_nblk; 22945 } 22946 bcopy(un->un_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII); 22947 #elif defined(_SUNOS_VTOC_16) 22948 dkl->dkl_skew = un->un_dkg_skew; 22949 #else 22950 #error "No VTOC format defined." 22951 #endif 22952 22953 dkl->dkl_magic = DKL_MAGIC; 22954 dkl->dkl_write_reinstruct = un->un_g.dkg_write_reinstruct; 22955 dkl->dkl_read_reinstruct = un->un_g.dkg_read_reinstruct; 22956 22957 /* Construct checksum for the new disk label */ 22958 sum = 0; 22959 sp = (short *)dkl; 22960 i = sizeof (struct dk_label) / sizeof (short); 22961 while (i--) { 22962 sum ^= *sp++; 22963 } 22964 dkl->dkl_cksum = sum; 22965 22966 mutex_exit(SD_MUTEX(un)); 22967 22968 rval = sd_set_vtoc(un, dkl); 22969 exit: 22970 kmem_free(dkl, sizeof (struct dk_label)); 22971 mutex_enter(SD_MUTEX(un)); 22972 return (rval); 22973 } 22974 22975 static int 22976 sd_dkio_set_efi(dev_t dev, caddr_t arg, int flag) 22977 { 22978 struct sd_lun *un = NULL; 22979 dk_efi_t user_efi; 22980 int rval = 0; 22981 void *buffer; 22982 22983 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) 22984 return (ENXIO); 22985 22986 if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag)) 22987 return (EFAULT); 22988 22989 user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64; 22990 22991 if ((user_efi.dki_length % un->un_tgt_blocksize) || 22992 (user_efi.dki_length > un->un_max_xfer_size)) 22993 return (EINVAL); 22994 22995 buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP); 22996 if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) { 22997 rval = EFAULT; 22998 } else { 22999 /* 23000 * let's clear the vtoc labels and clear the softstate 23001 * vtoc. 23002 */ 23003 mutex_enter(SD_MUTEX(un)); 23004 if (un->un_vtoc.v_sanity == VTOC_SANE) { 23005 SD_TRACE(SD_LOG_IO_PARTITION, un, 23006 "sd_dkio_set_efi: CLEAR VTOC\n"); 23007 sd_clear_vtoc(un); 23008 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23009 mutex_exit(SD_MUTEX(un)); 23010 ddi_remove_minor_node(SD_DEVINFO(un), "h"); 23011 ddi_remove_minor_node(SD_DEVINFO(un), "h,raw"); 23012 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd", 23013 S_IFBLK, 23014 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23015 un->un_node_type, NULL); 23016 (void) ddi_create_minor_node(SD_DEVINFO(un), "wd,raw", 23017 S_IFCHR, 23018 (SDUNIT(dev) << SDUNIT_SHIFT) | WD_NODE, 23019 un->un_node_type, NULL); 23020 } else 23021 mutex_exit(SD_MUTEX(un)); 23022 rval = sd_send_scsi_WRITE(un, buffer, user_efi.dki_length, 23023 user_efi.dki_lba, SD_PATH_DIRECT); 23024 if (rval == 0) { 23025 mutex_enter(SD_MUTEX(un)); 23026 un->un_f_geometry_is_valid = FALSE; 23027 mutex_exit(SD_MUTEX(un)); 23028 } 23029 } 23030 kmem_free(buffer, user_efi.dki_length); 23031 return (rval); 23032 } 23033 23034 /* 23035 * Function: sd_dkio_get_mboot 23036 * 23037 * Description: This routine is the driver entry point for handling user 23038 * requests to get the current device mboot (DKIOCGMBOOT) 23039 * 23040 * Arguments: dev - the device number 23041 * arg - pointer to user provided mboot structure specifying 23042 * the current mboot. 23043 * flag - this argument is a pass through to ddi_copyxxx() 23044 * directly from the mode argument of ioctl(). 23045 * 23046 * Return Code: 0 23047 * EINVAL 23048 * EFAULT 23049 * ENXIO 23050 */ 23051 23052 static int 23053 sd_dkio_get_mboot(dev_t dev, caddr_t arg, int flag) 23054 { 23055 struct sd_lun *un; 23056 struct mboot *mboot; 23057 int rval; 23058 size_t buffer_size; 23059 23060 if (((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) || 23061 (un->un_state == SD_STATE_OFFLINE)) { 23062 return (ENXIO); 23063 } 23064 23065 #if defined(_SUNOS_VTOC_8) 23066 if ((!ISREMOVABLE(un)) || (arg == NULL)) { 23067 #elif defined(_SUNOS_VTOC_16) 23068 if (arg == NULL) { 23069 #endif 23070 return (EINVAL); 23071 } 23072 23073 /* 23074 * Read the mboot block, located at absolute block 0 on the target. 23075 */ 23076 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct mboot)); 23077 23078 SD_TRACE(SD_LOG_IO_PARTITION, un, 23079 "sd_dkio_get_mboot: allocation size: 0x%x\n", buffer_size); 23080 23081 mboot = kmem_zalloc(buffer_size, KM_SLEEP); 23082 if ((rval = sd_send_scsi_READ(un, mboot, buffer_size, 0, 23083 SD_PATH_STANDARD)) == 0) { 23084 if (ddi_copyout(mboot, (void *)arg, 23085 sizeof (struct mboot), flag) != 0) { 23086 rval = EFAULT; 23087 } 23088 } 23089 kmem_free(mboot, buffer_size); 23090 return (rval); 23091 } 23092 23093 23094 /* 23095 * Function: sd_dkio_set_mboot 23096 * 23097 * Description: This routine is the driver entry point for handling user 23098 * requests to validate and set the device master boot 23099 * (DKIOCSMBOOT). 23100 * 23101 * Arguments: dev - the device number 23102 * arg - pointer to user provided mboot structure used to set the 23103 * master boot. 23104 * flag - this argument is a pass through to ddi_copyxxx() 23105 * directly from the mode argument of ioctl(). 23106 * 23107 * Return Code: 0 23108 * EINVAL 23109 * EFAULT 23110 * ENXIO 23111 */ 23112 23113 static int 23114 sd_dkio_set_mboot(dev_t dev, caddr_t arg, int flag) 23115 { 23116 struct sd_lun *un = NULL; 23117 struct mboot *mboot = NULL; 23118 int rval; 23119 ushort_t magic; 23120 23121 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23122 return (ENXIO); 23123 } 23124 23125 ASSERT(!mutex_owned(SD_MUTEX(un))); 23126 23127 #if defined(_SUNOS_VTOC_8) 23128 if (!ISREMOVABLE(un)) { 23129 return (EINVAL); 23130 } 23131 #endif 23132 23133 if (arg == NULL) { 23134 return (EINVAL); 23135 } 23136 23137 mboot = kmem_zalloc(sizeof (struct mboot), KM_SLEEP); 23138 23139 if (ddi_copyin((const void *)arg, mboot, 23140 sizeof (struct mboot), flag) != 0) { 23141 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23142 return (EFAULT); 23143 } 23144 23145 /* Is this really a master boot record? */ 23146 magic = LE_16(mboot->signature); 23147 if (magic != MBB_MAGIC) { 23148 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23149 return (EINVAL); 23150 } 23151 23152 rval = sd_send_scsi_WRITE(un, mboot, un->un_sys_blocksize, 0, 23153 SD_PATH_STANDARD); 23154 23155 mutex_enter(SD_MUTEX(un)); 23156 #if defined(__i386) || defined(__amd64) 23157 if (rval == 0) { 23158 /* 23159 * mboot has been written successfully. 23160 * update the fdisk and vtoc tables in memory 23161 */ 23162 rval = sd_update_fdisk_and_vtoc(un); 23163 if ((un->un_f_geometry_is_valid == FALSE) || (rval != 0)) { 23164 mutex_exit(SD_MUTEX(un)); 23165 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23166 return (rval); 23167 } 23168 } 23169 23170 /* 23171 * If the mboot write fails, write the devid anyway, what can it hurt? 23172 * Also preserve the device id by writing to the disk acyl for the case 23173 * where a devid has been fabricated. 23174 */ 23175 if (!ISREMOVABLE(un) && !ISCD(un) && 23176 (un->un_f_opt_fab_devid == TRUE)) { 23177 if (un->un_devid == NULL) { 23178 sd_register_devid(un, SD_DEVINFO(un), 23179 SD_TARGET_IS_UNRESERVED); 23180 } else { 23181 /* 23182 * The device id for this disk has been 23183 * fabricated. Fabricated device id's are 23184 * managed by storing them in the last 2 23185 * available sectors on the drive. The device 23186 * id must be preserved by writing it back out 23187 * to this location. 23188 */ 23189 if (sd_write_deviceid(un) != 0) { 23190 ddi_devid_free(un->un_devid); 23191 un->un_devid = NULL; 23192 } 23193 } 23194 } 23195 #else 23196 if (rval == 0) { 23197 /* 23198 * mboot has been written successfully. 23199 * set up the default geometry and VTOC 23200 */ 23201 if (un->un_blockcount <= DK_MAX_BLOCKS) 23202 sd_setup_default_geometry(un); 23203 } 23204 #endif 23205 mutex_exit(SD_MUTEX(un)); 23206 kmem_free(mboot, (size_t)(sizeof (struct mboot))); 23207 return (rval); 23208 } 23209 23210 23211 /* 23212 * Function: sd_setup_default_geometry 23213 * 23214 * Description: This local utility routine sets the default geometry as part of 23215 * setting the device mboot. 23216 * 23217 * Arguments: un - driver soft state (unit) structure 23218 * 23219 * Note: This may be redundant with sd_build_default_label. 23220 */ 23221 23222 static void 23223 sd_setup_default_geometry(struct sd_lun *un) 23224 { 23225 /* zero out the soft state geometry and partition table. */ 23226 bzero(&un->un_g, sizeof (struct dk_geom)); 23227 bzero(&un->un_vtoc, sizeof (struct dk_vtoc)); 23228 bzero(un->un_map, NDKMAP * (sizeof (struct dk_map))); 23229 un->un_asciilabel[0] = '\0'; 23230 23231 /* 23232 * For the rpm, we use the minimum for the disk. 23233 * For the head, cyl and number of sector per track, 23234 * if the capacity <= 1GB, head = 64, sect = 32. 23235 * else head = 255, sect 63 23236 * Note: the capacity should be equal to C*H*S values. 23237 * This will cause some truncation of size due to 23238 * round off errors. For CD-ROMs, this truncation can 23239 * have adverse side effects, so returning ncyl and 23240 * nhead as 1. The nsect will overflow for most of 23241 * CD-ROMs as nsect is of type ushort. 23242 */ 23243 if (ISCD(un)) { 23244 un->un_g.dkg_ncyl = 1; 23245 un->un_g.dkg_nhead = 1; 23246 un->un_g.dkg_nsect = un->un_blockcount; 23247 } else { 23248 if (un->un_blockcount <= 0x1000) { 23249 /* Needed for unlabeled SCSI floppies. */ 23250 un->un_g.dkg_nhead = 2; 23251 un->un_g.dkg_ncyl = 80; 23252 un->un_g.dkg_pcyl = 80; 23253 un->un_g.dkg_nsect = un->un_blockcount / (2 * 80); 23254 } else if (un->un_blockcount <= 0x200000) { 23255 un->un_g.dkg_nhead = 64; 23256 un->un_g.dkg_nsect = 32; 23257 un->un_g.dkg_ncyl = un->un_blockcount / (64 * 32); 23258 } else { 23259 un->un_g.dkg_nhead = 255; 23260 un->un_g.dkg_nsect = 63; 23261 un->un_g.dkg_ncyl = un->un_blockcount / (255 * 63); 23262 } 23263 un->un_blockcount = un->un_g.dkg_ncyl * 23264 un->un_g.dkg_nhead * un->un_g.dkg_nsect; 23265 } 23266 un->un_g.dkg_acyl = 0; 23267 un->un_g.dkg_bcyl = 0; 23268 un->un_g.dkg_intrlv = 1; 23269 un->un_g.dkg_rpm = 200; 23270 un->un_g.dkg_read_reinstruct = 0; 23271 un->un_g.dkg_write_reinstruct = 0; 23272 if (un->un_g.dkg_pcyl == 0) { 23273 un->un_g.dkg_pcyl = un->un_g.dkg_ncyl + un->un_g.dkg_acyl; 23274 } 23275 23276 un->un_map['a'-'a'].dkl_cylno = 0; 23277 un->un_map['a'-'a'].dkl_nblk = un->un_blockcount; 23278 un->un_map['c'-'a'].dkl_cylno = 0; 23279 un->un_map['c'-'a'].dkl_nblk = un->un_blockcount; 23280 un->un_f_geometry_is_valid = FALSE; 23281 } 23282 23283 23284 #if defined(__i386) || defined(__amd64) 23285 /* 23286 * Function: sd_update_fdisk_and_vtoc 23287 * 23288 * Description: This local utility routine updates the device fdisk and vtoc 23289 * as part of setting the device mboot. 23290 * 23291 * Arguments: un - driver soft state (unit) structure 23292 * 23293 * Return Code: 0 for success or errno-type return code. 23294 * 23295 * Note:x86: This looks like a duplicate of sd_validate_geometry(), but 23296 * these did exist seperately in x86 sd.c!!! 23297 */ 23298 23299 static int 23300 sd_update_fdisk_and_vtoc(struct sd_lun *un) 23301 { 23302 static char labelstring[128]; 23303 static char buf[256]; 23304 char *label = 0; 23305 int count; 23306 int label_rc = 0; 23307 int gvalid = un->un_f_geometry_is_valid; 23308 int fdisk_rval; 23309 int lbasize; 23310 int capacity; 23311 23312 ASSERT(mutex_owned(SD_MUTEX(un))); 23313 23314 if (un->un_f_tgt_blocksize_is_valid == FALSE) { 23315 return (EINVAL); 23316 } 23317 23318 if (un->un_f_blockcount_is_valid == FALSE) { 23319 return (EINVAL); 23320 } 23321 23322 #if defined(_SUNOS_VTOC_16) 23323 /* 23324 * Set up the "whole disk" fdisk partition; this should always 23325 * exist, regardless of whether the disk contains an fdisk table 23326 * or vtoc. 23327 */ 23328 un->un_map[P0_RAW_DISK].dkl_cylno = 0; 23329 un->un_map[P0_RAW_DISK].dkl_nblk = un->un_blockcount; 23330 #endif /* defined(_SUNOS_VTOC_16) */ 23331 23332 /* 23333 * copy the lbasize and capacity so that if they're 23334 * reset while we're not holding the SD_MUTEX(un), we will 23335 * continue to use valid values after the SD_MUTEX(un) is 23336 * reacquired. 23337 */ 23338 lbasize = un->un_tgt_blocksize; 23339 capacity = un->un_blockcount; 23340 23341 /* 23342 * refresh the logical and physical geometry caches. 23343 * (data from mode sense format/rigid disk geometry pages, 23344 * and scsi_ifgetcap("geometry"). 23345 */ 23346 sd_resync_geom_caches(un, capacity, lbasize, SD_PATH_DIRECT); 23347 23348 /* 23349 * Only DIRECT ACCESS devices will have Sun labels. 23350 * CD's supposedly have a Sun label, too 23351 */ 23352 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT || ISREMOVABLE(un)) { 23353 fdisk_rval = sd_read_fdisk(un, capacity, lbasize, 23354 SD_PATH_DIRECT); 23355 if (fdisk_rval == SD_CMD_FAILURE) { 23356 ASSERT(mutex_owned(SD_MUTEX(un))); 23357 return (EIO); 23358 } 23359 23360 if (fdisk_rval == SD_CMD_RESERVATION_CONFLICT) { 23361 ASSERT(mutex_owned(SD_MUTEX(un))); 23362 return (EACCES); 23363 } 23364 23365 if (un->un_solaris_size <= DK_LABEL_LOC) { 23366 /* 23367 * Found fdisk table but no Solaris partition entry, 23368 * so don't call sd_uselabel() and don't create 23369 * a default label. 23370 */ 23371 label_rc = 0; 23372 un->un_f_geometry_is_valid = TRUE; 23373 goto no_solaris_partition; 23374 } 23375 23376 #if defined(_SUNOS_VTOC_8) 23377 label = (char *)un->un_asciilabel; 23378 #elif defined(_SUNOS_VTOC_16) 23379 label = (char *)un->un_vtoc.v_asciilabel; 23380 #else 23381 #error "No VTOC format defined." 23382 #endif 23383 } else if (capacity < 0) { 23384 ASSERT(mutex_owned(SD_MUTEX(un))); 23385 return (EINVAL); 23386 } 23387 23388 /* 23389 * For Removable media We reach here if we have found a 23390 * SOLARIS PARTITION. 23391 * If un_f_geometry_is_valid is FALSE it indicates that the SOLARIS 23392 * PARTITION has changed from the previous one, hence we will setup a 23393 * default VTOC in this case. 23394 */ 23395 if (un->un_f_geometry_is_valid == FALSE) { 23396 sd_build_default_label(un); 23397 label_rc = 0; 23398 } 23399 23400 no_solaris_partition: 23401 if ((!ISREMOVABLE(un) || 23402 (ISREMOVABLE(un) && un->un_mediastate == DKIO_EJECTED)) && 23403 (un->un_state == SD_STATE_NORMAL && gvalid == FALSE)) { 23404 /* 23405 * Print out a message indicating who and what we are. 23406 * We do this only when we happen to really validate the 23407 * geometry. We may call sd_validate_geometry() at other 23408 * times, ioctl()'s like Get VTOC in which case we 23409 * don't want to print the label. 23410 * If the geometry is valid, print the label string, 23411 * else print vendor and product info, if available 23412 */ 23413 if ((un->un_f_geometry_is_valid == TRUE) && (label != NULL)) { 23414 SD_INFO(SD_LOG_IOCTL_DKIO, un, "?<%s>\n", label); 23415 } else { 23416 mutex_enter(&sd_label_mutex); 23417 sd_inq_fill(SD_INQUIRY(un)->inq_vid, VIDMAX, 23418 labelstring); 23419 sd_inq_fill(SD_INQUIRY(un)->inq_pid, PIDMAX, 23420 &labelstring[64]); 23421 (void) sprintf(buf, "?Vendor '%s', product '%s'", 23422 labelstring, &labelstring[64]); 23423 if (un->un_f_blockcount_is_valid == TRUE) { 23424 (void) sprintf(&buf[strlen(buf)], 23425 ", %" PRIu64 " %u byte blocks\n", 23426 un->un_blockcount, 23427 un->un_tgt_blocksize); 23428 } else { 23429 (void) sprintf(&buf[strlen(buf)], 23430 ", (unknown capacity)\n"); 23431 } 23432 SD_INFO(SD_LOG_IOCTL_DKIO, un, buf); 23433 mutex_exit(&sd_label_mutex); 23434 } 23435 } 23436 23437 #if defined(_SUNOS_VTOC_16) 23438 /* 23439 * If we have valid geometry, set up the remaining fdisk partitions. 23440 * Note that dkl_cylno is not used for the fdisk map entries, so 23441 * we set it to an entirely bogus value. 23442 */ 23443 for (count = 0; count < FD_NUMPART; count++) { 23444 un->un_map[FDISK_P1 + count].dkl_cylno = -1; 23445 un->un_map[FDISK_P1 + count].dkl_nblk = 23446 un->un_fmap[count].fmap_nblk; 23447 un->un_offset[FDISK_P1 + count] = 23448 un->un_fmap[count].fmap_start; 23449 } 23450 #endif 23451 23452 for (count = 0; count < NDKMAP; count++) { 23453 #if defined(_SUNOS_VTOC_8) 23454 struct dk_map *lp = &un->un_map[count]; 23455 un->un_offset[count] = 23456 un->un_g.dkg_nhead * un->un_g.dkg_nsect * lp->dkl_cylno; 23457 #elif defined(_SUNOS_VTOC_16) 23458 struct dkl_partition *vp = &un->un_vtoc.v_part[count]; 23459 un->un_offset[count] = vp->p_start + un->un_solaris_offset; 23460 #else 23461 #error "No VTOC format defined." 23462 #endif 23463 } 23464 23465 ASSERT(mutex_owned(SD_MUTEX(un))); 23466 return (label_rc); 23467 } 23468 #endif 23469 23470 23471 /* 23472 * Function: sd_check_media 23473 * 23474 * Description: This utility routine implements the functionality for the 23475 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the 23476 * driver state changes from that specified by the user 23477 * (inserted or ejected). For example, if the user specifies 23478 * DKIO_EJECTED and the current media state is inserted this 23479 * routine will immediately return DKIO_INSERTED. However, if the 23480 * current media state is not inserted the user thread will be 23481 * blocked until the drive state changes. If DKIO_NONE is specified 23482 * the user thread will block until a drive state change occurs. 23483 * 23484 * Arguments: dev - the device number 23485 * state - user pointer to a dkio_state, updated with the current 23486 * drive state at return. 23487 * 23488 * Return Code: ENXIO 23489 * EIO 23490 * EAGAIN 23491 * EINTR 23492 */ 23493 23494 static int 23495 sd_check_media(dev_t dev, enum dkio_state state) 23496 { 23497 struct sd_lun *un = NULL; 23498 enum dkio_state prev_state; 23499 opaque_t token = NULL; 23500 int rval = 0; 23501 23502 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23503 return (ENXIO); 23504 } 23505 23506 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n"); 23507 23508 mutex_enter(SD_MUTEX(un)); 23509 23510 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: " 23511 "state=%x, mediastate=%x\n", state, un->un_mediastate); 23512 23513 prev_state = un->un_mediastate; 23514 23515 /* is there anything to do? */ 23516 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) { 23517 /* 23518 * submit the request to the scsi_watch service; 23519 * scsi_media_watch_cb() does the real work 23520 */ 23521 mutex_exit(SD_MUTEX(un)); 23522 23523 /* 23524 * This change handles the case where a scsi watch request is 23525 * added to a device that is powered down. To accomplish this 23526 * we power up the device before adding the scsi watch request, 23527 * since the scsi watch sends a TUR directly to the device 23528 * which the device cannot handle if it is powered down. 23529 */ 23530 if (sd_pm_entry(un) != DDI_SUCCESS) { 23531 mutex_enter(SD_MUTEX(un)); 23532 goto done; 23533 } 23534 23535 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), 23536 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb, 23537 (caddr_t)dev); 23538 23539 sd_pm_exit(un); 23540 23541 mutex_enter(SD_MUTEX(un)); 23542 if (token == NULL) { 23543 rval = EAGAIN; 23544 goto done; 23545 } 23546 23547 /* 23548 * This is a special case IOCTL that doesn't return 23549 * until the media state changes. Routine sdpower 23550 * knows about and handles this so don't count it 23551 * as an active cmd in the driver, which would 23552 * keep the device busy to the pm framework. 23553 * If the count isn't decremented the device can't 23554 * be powered down. 23555 */ 23556 un->un_ncmds_in_driver--; 23557 ASSERT(un->un_ncmds_in_driver >= 0); 23558 23559 /* 23560 * if a prior request had been made, this will be the same 23561 * token, as scsi_watch was designed that way. 23562 */ 23563 un->un_swr_token = token; 23564 un->un_specified_mediastate = state; 23565 23566 /* 23567 * now wait for media change 23568 * we will not be signalled unless mediastate == state but it is 23569 * still better to test for this condition, since there is a 23570 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED 23571 */ 23572 SD_TRACE(SD_LOG_COMMON, un, 23573 "sd_check_media: waiting for media state change\n"); 23574 while (un->un_mediastate == state) { 23575 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) { 23576 SD_TRACE(SD_LOG_COMMON, un, 23577 "sd_check_media: waiting for media state " 23578 "was interrupted\n"); 23579 un->un_ncmds_in_driver++; 23580 rval = EINTR; 23581 goto done; 23582 } 23583 SD_TRACE(SD_LOG_COMMON, un, 23584 "sd_check_media: received signal, state=%x\n", 23585 un->un_mediastate); 23586 } 23587 /* 23588 * Inc the counter to indicate the device once again 23589 * has an active outstanding cmd. 23590 */ 23591 un->un_ncmds_in_driver++; 23592 } 23593 23594 /* invalidate geometry */ 23595 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) { 23596 sr_ejected(un); 23597 } 23598 23599 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) { 23600 uint64_t capacity; 23601 uint_t lbasize; 23602 23603 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n"); 23604 mutex_exit(SD_MUTEX(un)); 23605 /* 23606 * Since the following routines use SD_PATH_DIRECT, we must 23607 * call PM directly before the upcoming disk accesses. This 23608 * may cause the disk to be power/spin up. 23609 */ 23610 23611 if (sd_pm_entry(un) == DDI_SUCCESS) { 23612 rval = sd_send_scsi_READ_CAPACITY(un, 23613 &capacity, 23614 &lbasize, SD_PATH_DIRECT); 23615 if (rval != 0) { 23616 sd_pm_exit(un); 23617 mutex_enter(SD_MUTEX(un)); 23618 goto done; 23619 } 23620 } else { 23621 rval = EIO; 23622 mutex_enter(SD_MUTEX(un)); 23623 goto done; 23624 } 23625 mutex_enter(SD_MUTEX(un)); 23626 23627 sd_update_block_info(un, lbasize, capacity); 23628 23629 un->un_f_geometry_is_valid = FALSE; 23630 (void) sd_validate_geometry(un, SD_PATH_DIRECT); 23631 23632 mutex_exit(SD_MUTEX(un)); 23633 rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_PREVENT, 23634 SD_PATH_DIRECT); 23635 sd_pm_exit(un); 23636 23637 mutex_enter(SD_MUTEX(un)); 23638 } 23639 done: 23640 un->un_f_watcht_stopped = FALSE; 23641 if (un->un_swr_token) { 23642 /* 23643 * Use of this local token and the mutex ensures that we avoid 23644 * some race conditions associated with terminating the 23645 * scsi watch. 23646 */ 23647 token = un->un_swr_token; 23648 un->un_swr_token = (opaque_t)NULL; 23649 mutex_exit(SD_MUTEX(un)); 23650 (void) scsi_watch_request_terminate(token, 23651 SCSI_WATCH_TERMINATE_WAIT); 23652 mutex_enter(SD_MUTEX(un)); 23653 } 23654 23655 /* 23656 * Update the capacity kstat value, if no media previously 23657 * (capacity kstat is 0) and a media has been inserted 23658 * (un_f_blockcount_is_valid == TRUE) 23659 * This is a more generic way then checking for ISREMOVABLE. 23660 */ 23661 if (un->un_errstats) { 23662 struct sd_errstats *stp = NULL; 23663 23664 stp = (struct sd_errstats *)un->un_errstats->ks_data; 23665 if ((stp->sd_capacity.value.ui64 == 0) && 23666 (un->un_f_blockcount_is_valid == TRUE)) { 23667 stp->sd_capacity.value.ui64 = 23668 (uint64_t)((uint64_t)un->un_blockcount * 23669 un->un_sys_blocksize); 23670 } 23671 } 23672 mutex_exit(SD_MUTEX(un)); 23673 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n"); 23674 return (rval); 23675 } 23676 23677 23678 /* 23679 * Function: sd_delayed_cv_broadcast 23680 * 23681 * Description: Delayed cv_broadcast to allow for target to recover from media 23682 * insertion. 23683 * 23684 * Arguments: arg - driver soft state (unit) structure 23685 */ 23686 23687 static void 23688 sd_delayed_cv_broadcast(void *arg) 23689 { 23690 struct sd_lun *un = arg; 23691 23692 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n"); 23693 23694 mutex_enter(SD_MUTEX(un)); 23695 un->un_dcvb_timeid = NULL; 23696 cv_broadcast(&un->un_state_cv); 23697 mutex_exit(SD_MUTEX(un)); 23698 } 23699 23700 23701 /* 23702 * Function: sd_media_watch_cb 23703 * 23704 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This 23705 * routine processes the TUR sense data and updates the driver 23706 * state if a transition has occurred. The user thread 23707 * (sd_check_media) is then signalled. 23708 * 23709 * Arguments: arg - the device 'dev_t' is used for context to discriminate 23710 * among multiple watches that share this callback function 23711 * resultp - scsi watch facility result packet containing scsi 23712 * packet, status byte and sense data 23713 * 23714 * Return Code: 0 for success, -1 for failure 23715 */ 23716 23717 static int 23718 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 23719 { 23720 struct sd_lun *un; 23721 struct scsi_status *statusp = resultp->statusp; 23722 struct scsi_extended_sense *sensep = resultp->sensep; 23723 enum dkio_state state = DKIO_NONE; 23724 dev_t dev = (dev_t)arg; 23725 uchar_t actual_sense_length; 23726 23727 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23728 return (-1); 23729 } 23730 actual_sense_length = resultp->actual_sense_length; 23731 23732 mutex_enter(SD_MUTEX(un)); 23733 SD_TRACE(SD_LOG_COMMON, un, 23734 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n", 23735 *((char *)statusp), (void *)sensep, actual_sense_length); 23736 23737 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) { 23738 un->un_mediastate = DKIO_DEV_GONE; 23739 printf("sd_media_watch_cb: dev gone\n"); 23740 cv_broadcast(&un->un_state_cv); 23741 mutex_exit(SD_MUTEX(un)); 23742 23743 return (0); 23744 } 23745 23746 /* 23747 * If there was a check condition then sensep points to valid sense data 23748 * If status was not a check condition but a reservation or busy status 23749 * then the new state is DKIO_NONE 23750 */ 23751 if (sensep != NULL) { 23752 SD_INFO(SD_LOG_COMMON, un, 23753 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n", 23754 sensep->es_key, sensep->es_add_code, sensep->es_qual_code); 23755 /* This routine only uses up to 13 bytes of sense data. */ 23756 if (actual_sense_length >= 13) { 23757 if (sensep->es_key == KEY_UNIT_ATTENTION) { 23758 if (sensep->es_add_code == 0x28) { 23759 state = DKIO_INSERTED; 23760 } 23761 } else { 23762 /* 23763 * if 02/04/02 means that the host 23764 * should send start command. Explicitly 23765 * leave the media state as is 23766 * (inserted) as the media is inserted 23767 * and host has stopped device for PM 23768 * reasons. Upon next true read/write 23769 * to this media will bring the 23770 * device to the right state good for 23771 * media access. 23772 */ 23773 if ((sensep->es_key == KEY_NOT_READY) && 23774 (sensep->es_add_code == 0x3a)) { 23775 state = DKIO_EJECTED; 23776 } 23777 23778 /* 23779 * If the drivge is busy with an operation 23780 * or long write, keep the media in an 23781 * inserted state. 23782 */ 23783 23784 if ((sensep->es_key == KEY_NOT_READY) && 23785 (sensep->es_add_code == 0x04) && 23786 ((sensep->es_qual_code == 0x02) || 23787 (sensep->es_qual_code == 0x07) || 23788 (sensep->es_qual_code == 0x08))) { 23789 state = DKIO_INSERTED; 23790 } 23791 } 23792 } 23793 } else if ((*((char *)statusp) == STATUS_GOOD) && 23794 (resultp->pkt->pkt_reason == CMD_CMPLT)) { 23795 state = DKIO_INSERTED; 23796 } 23797 23798 SD_TRACE(SD_LOG_COMMON, un, 23799 "sd_media_watch_cb: state=%x, specified=%x\n", 23800 state, un->un_specified_mediastate); 23801 23802 /* 23803 * now signal the waiting thread if this is *not* the specified state; 23804 * delay the signal if the state is DKIO_INSERTED to allow the target 23805 * to recover 23806 */ 23807 if (state != un->un_specified_mediastate) { 23808 un->un_mediastate = state; 23809 if (state == DKIO_INSERTED) { 23810 /* 23811 * delay the signal to give the drive a chance 23812 * to do what it apparently needs to do 23813 */ 23814 SD_TRACE(SD_LOG_COMMON, un, 23815 "sd_media_watch_cb: delayed cv_broadcast\n"); 23816 if (un->un_dcvb_timeid == NULL) { 23817 un->un_dcvb_timeid = 23818 timeout(sd_delayed_cv_broadcast, un, 23819 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY)); 23820 } 23821 } else { 23822 SD_TRACE(SD_LOG_COMMON, un, 23823 "sd_media_watch_cb: immediate cv_broadcast\n"); 23824 cv_broadcast(&un->un_state_cv); 23825 } 23826 } 23827 mutex_exit(SD_MUTEX(un)); 23828 return (0); 23829 } 23830 23831 23832 /* 23833 * Function: sd_dkio_get_temp 23834 * 23835 * Description: This routine is the driver entry point for handling ioctl 23836 * requests to get the disk temperature. 23837 * 23838 * Arguments: dev - the device number 23839 * arg - pointer to user provided dk_temperature structure. 23840 * flag - this argument is a pass through to ddi_copyxxx() 23841 * directly from the mode argument of ioctl(). 23842 * 23843 * Return Code: 0 23844 * EFAULT 23845 * ENXIO 23846 * EAGAIN 23847 */ 23848 23849 static int 23850 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag) 23851 { 23852 struct sd_lun *un = NULL; 23853 struct dk_temperature *dktemp = NULL; 23854 uchar_t *temperature_page; 23855 int rval = 0; 23856 int path_flag = SD_PATH_STANDARD; 23857 23858 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 23859 return (ENXIO); 23860 } 23861 23862 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP); 23863 23864 /* copyin the disk temp argument to get the user flags */ 23865 if (ddi_copyin((void *)arg, dktemp, 23866 sizeof (struct dk_temperature), flag) != 0) { 23867 rval = EFAULT; 23868 goto done; 23869 } 23870 23871 /* Initialize the temperature to invalid. */ 23872 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23873 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23874 23875 /* 23876 * Note: Investigate removing the "bypass pm" semantic. 23877 * Can we just bypass PM always? 23878 */ 23879 if (dktemp->dkt_flags & DKT_BYPASS_PM) { 23880 path_flag = SD_PATH_DIRECT; 23881 ASSERT(!mutex_owned(&un->un_pm_mutex)); 23882 mutex_enter(&un->un_pm_mutex); 23883 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 23884 /* 23885 * If DKT_BYPASS_PM is set, and the drive happens to be 23886 * in low power mode, we can not wake it up, Need to 23887 * return EAGAIN. 23888 */ 23889 mutex_exit(&un->un_pm_mutex); 23890 rval = EAGAIN; 23891 goto done; 23892 } else { 23893 /* 23894 * Indicate to PM the device is busy. This is required 23895 * to avoid a race - i.e. the ioctl is issuing a 23896 * command and the pm framework brings down the device 23897 * to low power mode (possible power cut-off on some 23898 * platforms). 23899 */ 23900 mutex_exit(&un->un_pm_mutex); 23901 if (sd_pm_entry(un) != DDI_SUCCESS) { 23902 rval = EAGAIN; 23903 goto done; 23904 } 23905 } 23906 } 23907 23908 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP); 23909 23910 if ((rval = sd_send_scsi_LOG_SENSE(un, temperature_page, 23911 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag)) != 0) { 23912 goto done2; 23913 } 23914 23915 /* 23916 * For the current temperature verify that the parameter length is 0x02 23917 * and the parameter code is 0x00 23918 */ 23919 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) && 23920 (temperature_page[5] == 0x00)) { 23921 if (temperature_page[9] == 0xFF) { 23922 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP; 23923 } else { 23924 dktemp->dkt_cur_temp = (short)(temperature_page[9]); 23925 } 23926 } 23927 23928 /* 23929 * For the reference temperature verify that the parameter 23930 * length is 0x02 and the parameter code is 0x01 23931 */ 23932 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) && 23933 (temperature_page[11] == 0x01)) { 23934 if (temperature_page[15] == 0xFF) { 23935 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP; 23936 } else { 23937 dktemp->dkt_ref_temp = (short)(temperature_page[15]); 23938 } 23939 } 23940 23941 /* Do the copyout regardless of the temperature commands status. */ 23942 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature), 23943 flag) != 0) { 23944 rval = EFAULT; 23945 } 23946 23947 done2: 23948 if (path_flag == SD_PATH_DIRECT) { 23949 sd_pm_exit(un); 23950 } 23951 23952 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE); 23953 done: 23954 if (dktemp != NULL) { 23955 kmem_free(dktemp, sizeof (struct dk_temperature)); 23956 } 23957 23958 return (rval); 23959 } 23960 23961 23962 /* 23963 * Function: sd_log_page_supported 23964 * 23965 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of 23966 * supported log pages. 23967 * 23968 * Arguments: un - 23969 * log_page - 23970 * 23971 * Return Code: -1 - on error (log sense is optional and may not be supported). 23972 * 0 - log page not found. 23973 * 1 - log page found. 23974 */ 23975 23976 static int 23977 sd_log_page_supported(struct sd_lun *un, int log_page) 23978 { 23979 uchar_t *log_page_data; 23980 int i; 23981 int match = 0; 23982 int log_size; 23983 23984 log_page_data = kmem_zalloc(0xFF, KM_SLEEP); 23985 23986 if (sd_send_scsi_LOG_SENSE(un, log_page_data, 0xFF, 0, 0x01, 0, 23987 SD_PATH_DIRECT) != 0) { 23988 SD_ERROR(SD_LOG_COMMON, un, 23989 "sd_log_page_supported: failed log page retrieval\n"); 23990 kmem_free(log_page_data, 0xFF); 23991 return (-1); 23992 } 23993 log_size = log_page_data[3]; 23994 23995 /* 23996 * The list of supported log pages start from the fourth byte. Check 23997 * until we run out of log pages or a match is found. 23998 */ 23999 for (i = 4; (i < (log_size + 4)) && !match; i++) { 24000 if (log_page_data[i] == log_page) { 24001 match++; 24002 } 24003 } 24004 kmem_free(log_page_data, 0xFF); 24005 return (match); 24006 } 24007 24008 24009 /* 24010 * Function: sd_mhdioc_failfast 24011 * 24012 * Description: This routine is the driver entry point for handling ioctl 24013 * requests to enable/disable the multihost failfast option. 24014 * (MHIOCENFAILFAST) 24015 * 24016 * Arguments: dev - the device number 24017 * arg - user specified probing interval. 24018 * flag - this argument is a pass through to ddi_copyxxx() 24019 * directly from the mode argument of ioctl(). 24020 * 24021 * Return Code: 0 24022 * EFAULT 24023 * ENXIO 24024 */ 24025 24026 static int 24027 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag) 24028 { 24029 struct sd_lun *un = NULL; 24030 int mh_time; 24031 int rval = 0; 24032 24033 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24034 return (ENXIO); 24035 } 24036 24037 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag)) 24038 return (EFAULT); 24039 24040 if (mh_time) { 24041 mutex_enter(SD_MUTEX(un)); 24042 un->un_resvd_status |= SD_FAILFAST; 24043 mutex_exit(SD_MUTEX(un)); 24044 /* 24045 * If mh_time is INT_MAX, then this ioctl is being used for 24046 * SCSI-3 PGR purposes, and we don't need to spawn watch thread. 24047 */ 24048 if (mh_time != INT_MAX) { 24049 rval = sd_check_mhd(dev, mh_time); 24050 } 24051 } else { 24052 (void) sd_check_mhd(dev, 0); 24053 mutex_enter(SD_MUTEX(un)); 24054 un->un_resvd_status &= ~SD_FAILFAST; 24055 mutex_exit(SD_MUTEX(un)); 24056 } 24057 return (rval); 24058 } 24059 24060 24061 /* 24062 * Function: sd_mhdioc_takeown 24063 * 24064 * Description: This routine is the driver entry point for handling ioctl 24065 * requests to forcefully acquire exclusive access rights to the 24066 * multihost disk (MHIOCTKOWN). 24067 * 24068 * Arguments: dev - the device number 24069 * arg - user provided structure specifying the delay 24070 * parameters in milliseconds 24071 * flag - this argument is a pass through to ddi_copyxxx() 24072 * directly from the mode argument of ioctl(). 24073 * 24074 * Return Code: 0 24075 * EFAULT 24076 * ENXIO 24077 */ 24078 24079 static int 24080 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag) 24081 { 24082 struct sd_lun *un = NULL; 24083 struct mhioctkown *tkown = NULL; 24084 int rval = 0; 24085 24086 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24087 return (ENXIO); 24088 } 24089 24090 if (arg != NULL) { 24091 tkown = (struct mhioctkown *) 24092 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP); 24093 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag); 24094 if (rval != 0) { 24095 rval = EFAULT; 24096 goto error; 24097 } 24098 } 24099 24100 rval = sd_take_ownership(dev, tkown); 24101 mutex_enter(SD_MUTEX(un)); 24102 if (rval == 0) { 24103 un->un_resvd_status |= SD_RESERVE; 24104 if (tkown != NULL && tkown->reinstate_resv_delay != 0) { 24105 sd_reinstate_resv_delay = 24106 tkown->reinstate_resv_delay * 1000; 24107 } else { 24108 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY; 24109 } 24110 /* 24111 * Give the scsi_watch routine interval set by 24112 * the MHIOCENFAILFAST ioctl precedence here. 24113 */ 24114 if ((un->un_resvd_status & SD_FAILFAST) == 0) { 24115 mutex_exit(SD_MUTEX(un)); 24116 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000); 24117 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24118 "sd_mhdioc_takeown : %d\n", 24119 sd_reinstate_resv_delay); 24120 } else { 24121 mutex_exit(SD_MUTEX(un)); 24122 } 24123 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY, 24124 sd_mhd_reset_notify_cb, (caddr_t)un); 24125 } else { 24126 un->un_resvd_status &= ~SD_RESERVE; 24127 mutex_exit(SD_MUTEX(un)); 24128 } 24129 24130 error: 24131 if (tkown != NULL) { 24132 kmem_free(tkown, sizeof (struct mhioctkown)); 24133 } 24134 return (rval); 24135 } 24136 24137 24138 /* 24139 * Function: sd_mhdioc_release 24140 * 24141 * Description: This routine is the driver entry point for handling ioctl 24142 * requests to release exclusive access rights to the multihost 24143 * disk (MHIOCRELEASE). 24144 * 24145 * Arguments: dev - the device number 24146 * 24147 * Return Code: 0 24148 * ENXIO 24149 */ 24150 24151 static int 24152 sd_mhdioc_release(dev_t dev) 24153 { 24154 struct sd_lun *un = NULL; 24155 timeout_id_t resvd_timeid_save; 24156 int resvd_status_save; 24157 int rval = 0; 24158 24159 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24160 return (ENXIO); 24161 } 24162 24163 mutex_enter(SD_MUTEX(un)); 24164 resvd_status_save = un->un_resvd_status; 24165 un->un_resvd_status &= 24166 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE); 24167 if (un->un_resvd_timeid) { 24168 resvd_timeid_save = un->un_resvd_timeid; 24169 un->un_resvd_timeid = NULL; 24170 mutex_exit(SD_MUTEX(un)); 24171 (void) untimeout(resvd_timeid_save); 24172 } else { 24173 mutex_exit(SD_MUTEX(un)); 24174 } 24175 24176 /* 24177 * destroy any pending timeout thread that may be attempting to 24178 * reinstate reservation on this device. 24179 */ 24180 sd_rmv_resv_reclaim_req(dev); 24181 24182 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) { 24183 mutex_enter(SD_MUTEX(un)); 24184 if ((un->un_mhd_token) && 24185 ((un->un_resvd_status & SD_FAILFAST) == 0)) { 24186 mutex_exit(SD_MUTEX(un)); 24187 (void) sd_check_mhd(dev, 0); 24188 } else { 24189 mutex_exit(SD_MUTEX(un)); 24190 } 24191 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL, 24192 sd_mhd_reset_notify_cb, (caddr_t)un); 24193 } else { 24194 /* 24195 * sd_mhd_watch_cb will restart the resvd recover timeout thread 24196 */ 24197 mutex_enter(SD_MUTEX(un)); 24198 un->un_resvd_status = resvd_status_save; 24199 mutex_exit(SD_MUTEX(un)); 24200 } 24201 return (rval); 24202 } 24203 24204 24205 /* 24206 * Function: sd_mhdioc_register_devid 24207 * 24208 * Description: This routine is the driver entry point for handling ioctl 24209 * requests to register the device id (MHIOCREREGISTERDEVID). 24210 * 24211 * Note: The implementation for this ioctl has been updated to 24212 * be consistent with the original PSARC case (1999/357) 24213 * (4375899, 4241671, 4220005) 24214 * 24215 * Arguments: dev - the device number 24216 * 24217 * Return Code: 0 24218 * ENXIO 24219 */ 24220 24221 static int 24222 sd_mhdioc_register_devid(dev_t dev) 24223 { 24224 struct sd_lun *un = NULL; 24225 int rval = 0; 24226 24227 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24228 return (ENXIO); 24229 } 24230 24231 ASSERT(!mutex_owned(SD_MUTEX(un))); 24232 24233 mutex_enter(SD_MUTEX(un)); 24234 24235 /* If a devid already exists, de-register it */ 24236 if (un->un_devid != NULL) { 24237 ddi_devid_unregister(SD_DEVINFO(un)); 24238 /* 24239 * After unregister devid, needs to free devid memory 24240 */ 24241 ddi_devid_free(un->un_devid); 24242 un->un_devid = NULL; 24243 } 24244 24245 /* Check for reservation conflict */ 24246 mutex_exit(SD_MUTEX(un)); 24247 rval = sd_send_scsi_TEST_UNIT_READY(un, 0); 24248 mutex_enter(SD_MUTEX(un)); 24249 24250 switch (rval) { 24251 case 0: 24252 sd_register_devid(un, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED); 24253 break; 24254 case EACCES: 24255 break; 24256 default: 24257 rval = EIO; 24258 } 24259 24260 mutex_exit(SD_MUTEX(un)); 24261 return (rval); 24262 } 24263 24264 24265 /* 24266 * Function: sd_mhdioc_inkeys 24267 * 24268 * Description: This routine is the driver entry point for handling ioctl 24269 * requests to issue the SCSI-3 Persistent In Read Keys command 24270 * to the device (MHIOCGRP_INKEYS). 24271 * 24272 * Arguments: dev - the device number 24273 * arg - user provided in_keys structure 24274 * flag - this argument is a pass through to ddi_copyxxx() 24275 * directly from the mode argument of ioctl(). 24276 * 24277 * Return Code: code returned by sd_persistent_reservation_in_read_keys() 24278 * ENXIO 24279 * EFAULT 24280 */ 24281 24282 static int 24283 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag) 24284 { 24285 struct sd_lun *un; 24286 mhioc_inkeys_t inkeys; 24287 int rval = 0; 24288 24289 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24290 return (ENXIO); 24291 } 24292 24293 #ifdef _MULTI_DATAMODEL 24294 switch (ddi_model_convert_from(flag & FMODELS)) { 24295 case DDI_MODEL_ILP32: { 24296 struct mhioc_inkeys32 inkeys32; 24297 24298 if (ddi_copyin(arg, &inkeys32, 24299 sizeof (struct mhioc_inkeys32), flag) != 0) { 24300 return (EFAULT); 24301 } 24302 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li; 24303 if ((rval = sd_persistent_reservation_in_read_keys(un, 24304 &inkeys, flag)) != 0) { 24305 return (rval); 24306 } 24307 inkeys32.generation = inkeys.generation; 24308 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32), 24309 flag) != 0) { 24310 return (EFAULT); 24311 } 24312 break; 24313 } 24314 case DDI_MODEL_NONE: 24315 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), 24316 flag) != 0) { 24317 return (EFAULT); 24318 } 24319 if ((rval = sd_persistent_reservation_in_read_keys(un, 24320 &inkeys, flag)) != 0) { 24321 return (rval); 24322 } 24323 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), 24324 flag) != 0) { 24325 return (EFAULT); 24326 } 24327 break; 24328 } 24329 24330 #else /* ! _MULTI_DATAMODEL */ 24331 24332 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) { 24333 return (EFAULT); 24334 } 24335 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag); 24336 if (rval != 0) { 24337 return (rval); 24338 } 24339 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) { 24340 return (EFAULT); 24341 } 24342 24343 #endif /* _MULTI_DATAMODEL */ 24344 24345 return (rval); 24346 } 24347 24348 24349 /* 24350 * Function: sd_mhdioc_inresv 24351 * 24352 * Description: This routine is the driver entry point for handling ioctl 24353 * requests to issue the SCSI-3 Persistent In Read Reservations 24354 * command to the device (MHIOCGRP_INKEYS). 24355 * 24356 * Arguments: dev - the device number 24357 * arg - user provided in_resv structure 24358 * flag - this argument is a pass through to ddi_copyxxx() 24359 * directly from the mode argument of ioctl(). 24360 * 24361 * Return Code: code returned by sd_persistent_reservation_in_read_resv() 24362 * ENXIO 24363 * EFAULT 24364 */ 24365 24366 static int 24367 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag) 24368 { 24369 struct sd_lun *un; 24370 mhioc_inresvs_t inresvs; 24371 int rval = 0; 24372 24373 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24374 return (ENXIO); 24375 } 24376 24377 #ifdef _MULTI_DATAMODEL 24378 24379 switch (ddi_model_convert_from(flag & FMODELS)) { 24380 case DDI_MODEL_ILP32: { 24381 struct mhioc_inresvs32 inresvs32; 24382 24383 if (ddi_copyin(arg, &inresvs32, 24384 sizeof (struct mhioc_inresvs32), flag) != 0) { 24385 return (EFAULT); 24386 } 24387 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li; 24388 if ((rval = sd_persistent_reservation_in_read_resv(un, 24389 &inresvs, flag)) != 0) { 24390 return (rval); 24391 } 24392 inresvs32.generation = inresvs.generation; 24393 if (ddi_copyout(&inresvs32, arg, 24394 sizeof (struct mhioc_inresvs32), flag) != 0) { 24395 return (EFAULT); 24396 } 24397 break; 24398 } 24399 case DDI_MODEL_NONE: 24400 if (ddi_copyin(arg, &inresvs, 24401 sizeof (mhioc_inresvs_t), flag) != 0) { 24402 return (EFAULT); 24403 } 24404 if ((rval = sd_persistent_reservation_in_read_resv(un, 24405 &inresvs, flag)) != 0) { 24406 return (rval); 24407 } 24408 if (ddi_copyout(&inresvs, arg, 24409 sizeof (mhioc_inresvs_t), flag) != 0) { 24410 return (EFAULT); 24411 } 24412 break; 24413 } 24414 24415 #else /* ! _MULTI_DATAMODEL */ 24416 24417 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) { 24418 return (EFAULT); 24419 } 24420 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag); 24421 if (rval != 0) { 24422 return (rval); 24423 } 24424 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) { 24425 return (EFAULT); 24426 } 24427 24428 #endif /* ! _MULTI_DATAMODEL */ 24429 24430 return (rval); 24431 } 24432 24433 24434 /* 24435 * The following routines support the clustering functionality described below 24436 * and implement lost reservation reclaim functionality. 24437 * 24438 * Clustering 24439 * ---------- 24440 * The clustering code uses two different, independent forms of SCSI 24441 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3 24442 * Persistent Group Reservations. For any particular disk, it will use either 24443 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk. 24444 * 24445 * SCSI-2 24446 * The cluster software takes ownership of a multi-hosted disk by issuing the 24447 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the 24448 * MHIOCRELEASE ioctl.Closely related is the MHIOCENFAILFAST ioctl -- a cluster, 24449 * just after taking ownership of the disk with the MHIOCTKOWN ioctl then issues 24450 * the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the driver. The 24451 * meaning of failfast is that if the driver (on this host) ever encounters the 24452 * scsi error return code RESERVATION_CONFLICT from the device, it should 24453 * immediately panic the host. The motivation for this ioctl is that if this 24454 * host does encounter reservation conflict, the underlying cause is that some 24455 * other host of the cluster has decided that this host is no longer in the 24456 * cluster and has seized control of the disks for itself. Since this host is no 24457 * longer in the cluster, it ought to panic itself. The MHIOCENFAILFAST ioctl 24458 * does two things: 24459 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT 24460 * error to panic the host 24461 * (b) it sets up a periodic timer to test whether this host still has 24462 * "access" (in that no other host has reserved the device): if the 24463 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The 24464 * purpose of that periodic timer is to handle scenarios where the host is 24465 * otherwise temporarily quiescent, temporarily doing no real i/o. 24466 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host, 24467 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for 24468 * the device itself. 24469 * 24470 * SCSI-3 PGR 24471 * A direct semantic implementation of the SCSI-3 Persistent Reservation 24472 * facility is supported through the shared multihost disk ioctls 24473 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE, 24474 * MHIOCGRP_PREEMPTANDABORT) 24475 * 24476 * Reservation Reclaim: 24477 * -------------------- 24478 * To support the lost reservation reclaim operations this driver creates a 24479 * single thread to handle reinstating reservations on all devices that have 24480 * lost reservations sd_resv_reclaim_requests are logged for all devices that 24481 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb 24482 * and the reservation reclaim thread loops through the requests to regain the 24483 * lost reservations. 24484 */ 24485 24486 /* 24487 * Function: sd_check_mhd() 24488 * 24489 * Description: This function sets up and submits a scsi watch request or 24490 * terminates an existing watch request. This routine is used in 24491 * support of reservation reclaim. 24492 * 24493 * Arguments: dev - the device 'dev_t' is used for context to discriminate 24494 * among multiple watches that share the callback function 24495 * interval - the number of microseconds specifying the watch 24496 * interval for issuing TEST UNIT READY commands. If 24497 * set to 0 the watch should be terminated. If the 24498 * interval is set to 0 and if the device is required 24499 * to hold reservation while disabling failfast, the 24500 * watch is restarted with an interval of 24501 * reinstate_resv_delay. 24502 * 24503 * Return Code: 0 - Successful submit/terminate of scsi watch request 24504 * ENXIO - Indicates an invalid device was specified 24505 * EAGAIN - Unable to submit the scsi watch request 24506 */ 24507 24508 static int 24509 sd_check_mhd(dev_t dev, int interval) 24510 { 24511 struct sd_lun *un; 24512 opaque_t token; 24513 24514 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24515 return (ENXIO); 24516 } 24517 24518 /* is this a watch termination request? */ 24519 if (interval == 0) { 24520 mutex_enter(SD_MUTEX(un)); 24521 /* if there is an existing watch task then terminate it */ 24522 if (un->un_mhd_token) { 24523 token = un->un_mhd_token; 24524 un->un_mhd_token = NULL; 24525 mutex_exit(SD_MUTEX(un)); 24526 (void) scsi_watch_request_terminate(token, 24527 SCSI_WATCH_TERMINATE_WAIT); 24528 mutex_enter(SD_MUTEX(un)); 24529 } else { 24530 mutex_exit(SD_MUTEX(un)); 24531 /* 24532 * Note: If we return here we don't check for the 24533 * failfast case. This is the original legacy 24534 * implementation but perhaps we should be checking 24535 * the failfast case. 24536 */ 24537 return (0); 24538 } 24539 /* 24540 * If the device is required to hold reservation while 24541 * disabling failfast, we need to restart the scsi_watch 24542 * routine with an interval of reinstate_resv_delay. 24543 */ 24544 if (un->un_resvd_status & SD_RESERVE) { 24545 interval = sd_reinstate_resv_delay/1000; 24546 } else { 24547 /* no failfast so bail */ 24548 mutex_exit(SD_MUTEX(un)); 24549 return (0); 24550 } 24551 mutex_exit(SD_MUTEX(un)); 24552 } 24553 24554 /* 24555 * adjust minimum time interval to 1 second, 24556 * and convert from msecs to usecs 24557 */ 24558 if (interval > 0 && interval < 1000) { 24559 interval = 1000; 24560 } 24561 interval *= 1000; 24562 24563 /* 24564 * submit the request to the scsi_watch service 24565 */ 24566 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval, 24567 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev); 24568 if (token == NULL) { 24569 return (EAGAIN); 24570 } 24571 24572 /* 24573 * save token for termination later on 24574 */ 24575 mutex_enter(SD_MUTEX(un)); 24576 un->un_mhd_token = token; 24577 mutex_exit(SD_MUTEX(un)); 24578 return (0); 24579 } 24580 24581 24582 /* 24583 * Function: sd_mhd_watch_cb() 24584 * 24585 * Description: This function is the call back function used by the scsi watch 24586 * facility. The scsi watch facility sends the "Test Unit Ready" 24587 * and processes the status. If applicable (i.e. a "Unit Attention" 24588 * status and automatic "Request Sense" not used) the scsi watch 24589 * facility will send a "Request Sense" and retrieve the sense data 24590 * to be passed to this callback function. In either case the 24591 * automatic "Request Sense" or the facility submitting one, this 24592 * callback is passed the status and sense data. 24593 * 24594 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24595 * among multiple watches that share this callback function 24596 * resultp - scsi watch facility result packet containing scsi 24597 * packet, status byte and sense data 24598 * 24599 * Return Code: 0 - continue the watch task 24600 * non-zero - terminate the watch task 24601 */ 24602 24603 static int 24604 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp) 24605 { 24606 struct sd_lun *un; 24607 struct scsi_status *statusp; 24608 struct scsi_extended_sense *sensep; 24609 struct scsi_pkt *pkt; 24610 uchar_t actual_sense_length; 24611 dev_t dev = (dev_t)arg; 24612 24613 ASSERT(resultp != NULL); 24614 statusp = resultp->statusp; 24615 sensep = resultp->sensep; 24616 pkt = resultp->pkt; 24617 actual_sense_length = resultp->actual_sense_length; 24618 24619 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24620 return (ENXIO); 24621 } 24622 24623 SD_TRACE(SD_LOG_IOCTL_MHD, un, 24624 "sd_mhd_watch_cb: reason '%s', status '%s'\n", 24625 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp))); 24626 24627 /* Begin processing of the status and/or sense data */ 24628 if (pkt->pkt_reason != CMD_CMPLT) { 24629 /* Handle the incomplete packet */ 24630 sd_mhd_watch_incomplete(un, pkt); 24631 return (0); 24632 } else if (*((unsigned char *)statusp) != STATUS_GOOD) { 24633 if (*((unsigned char *)statusp) 24634 == STATUS_RESERVATION_CONFLICT) { 24635 /* 24636 * Handle a reservation conflict by panicking if 24637 * configured for failfast or by logging the conflict 24638 * and updating the reservation status 24639 */ 24640 mutex_enter(SD_MUTEX(un)); 24641 if ((un->un_resvd_status & SD_FAILFAST) && 24642 (sd_failfast_enable)) { 24643 panic("Reservation Conflict"); 24644 /*NOTREACHED*/ 24645 } 24646 SD_INFO(SD_LOG_IOCTL_MHD, un, 24647 "sd_mhd_watch_cb: Reservation Conflict\n"); 24648 un->un_resvd_status |= SD_RESERVATION_CONFLICT; 24649 mutex_exit(SD_MUTEX(un)); 24650 } 24651 } 24652 24653 if (sensep != NULL) { 24654 if (actual_sense_length >= (SENSE_LENGTH - 2)) { 24655 mutex_enter(SD_MUTEX(un)); 24656 if ((sensep->es_add_code == SD_SCSI_RESET_SENSE_CODE) && 24657 (un->un_resvd_status & SD_RESERVE)) { 24658 /* 24659 * The additional sense code indicates a power 24660 * on or bus device reset has occurred; update 24661 * the reservation status. 24662 */ 24663 un->un_resvd_status |= 24664 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24665 SD_INFO(SD_LOG_IOCTL_MHD, un, 24666 "sd_mhd_watch_cb: Lost Reservation\n"); 24667 } 24668 } else { 24669 return (0); 24670 } 24671 } else { 24672 mutex_enter(SD_MUTEX(un)); 24673 } 24674 24675 if ((un->un_resvd_status & SD_RESERVE) && 24676 (un->un_resvd_status & SD_LOST_RESERVE)) { 24677 if (un->un_resvd_status & SD_WANT_RESERVE) { 24678 /* 24679 * A reset occurred in between the last probe and this 24680 * one so if a timeout is pending cancel it. 24681 */ 24682 if (un->un_resvd_timeid) { 24683 timeout_id_t temp_id = un->un_resvd_timeid; 24684 un->un_resvd_timeid = NULL; 24685 mutex_exit(SD_MUTEX(un)); 24686 (void) untimeout(temp_id); 24687 mutex_enter(SD_MUTEX(un)); 24688 } 24689 un->un_resvd_status &= ~SD_WANT_RESERVE; 24690 } 24691 if (un->un_resvd_timeid == 0) { 24692 /* Schedule a timeout to handle the lost reservation */ 24693 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover, 24694 (void *)dev, 24695 drv_usectohz(sd_reinstate_resv_delay)); 24696 } 24697 } 24698 mutex_exit(SD_MUTEX(un)); 24699 return (0); 24700 } 24701 24702 24703 /* 24704 * Function: sd_mhd_watch_incomplete() 24705 * 24706 * Description: This function is used to find out why a scsi pkt sent by the 24707 * scsi watch facility was not completed. Under some scenarios this 24708 * routine will return. Otherwise it will send a bus reset to see 24709 * if the drive is still online. 24710 * 24711 * Arguments: un - driver soft state (unit) structure 24712 * pkt - incomplete scsi pkt 24713 */ 24714 24715 static void 24716 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt) 24717 { 24718 int be_chatty; 24719 int perr; 24720 24721 ASSERT(pkt != NULL); 24722 ASSERT(un != NULL); 24723 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT)); 24724 perr = (pkt->pkt_statistics & STAT_PERR); 24725 24726 mutex_enter(SD_MUTEX(un)); 24727 if (un->un_state == SD_STATE_DUMPING) { 24728 mutex_exit(SD_MUTEX(un)); 24729 return; 24730 } 24731 24732 switch (pkt->pkt_reason) { 24733 case CMD_UNX_BUS_FREE: 24734 /* 24735 * If we had a parity error that caused the target to drop BSY*, 24736 * don't be chatty about it. 24737 */ 24738 if (perr && be_chatty) { 24739 be_chatty = 0; 24740 } 24741 break; 24742 case CMD_TAG_REJECT: 24743 /* 24744 * The SCSI-2 spec states that a tag reject will be sent by the 24745 * target if tagged queuing is not supported. A tag reject may 24746 * also be sent during certain initialization periods or to 24747 * control internal resources. For the latter case the target 24748 * may also return Queue Full. 24749 * 24750 * If this driver receives a tag reject from a target that is 24751 * going through an init period or controlling internal 24752 * resources tagged queuing will be disabled. This is a less 24753 * than optimal behavior but the driver is unable to determine 24754 * the target state and assumes tagged queueing is not supported 24755 */ 24756 pkt->pkt_flags = 0; 24757 un->un_tagflags = 0; 24758 24759 if (un->un_f_opt_queueing == TRUE) { 24760 un->un_throttle = min(un->un_throttle, 3); 24761 } else { 24762 un->un_throttle = 1; 24763 } 24764 mutex_exit(SD_MUTEX(un)); 24765 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1); 24766 mutex_enter(SD_MUTEX(un)); 24767 break; 24768 case CMD_INCOMPLETE: 24769 /* 24770 * The transport stopped with an abnormal state, fallthrough and 24771 * reset the target and/or bus unless selection did not complete 24772 * (indicated by STATE_GOT_BUS) in which case we don't want to 24773 * go through a target/bus reset 24774 */ 24775 if (pkt->pkt_state == STATE_GOT_BUS) { 24776 break; 24777 } 24778 /*FALLTHROUGH*/ 24779 24780 case CMD_TIMEOUT: 24781 default: 24782 /* 24783 * The lun may still be running the command, so a lun reset 24784 * should be attempted. If the lun reset fails or cannot be 24785 * issued, than try a target reset. Lastly try a bus reset. 24786 */ 24787 if ((pkt->pkt_statistics & 24788 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) { 24789 int reset_retval = 0; 24790 mutex_exit(SD_MUTEX(un)); 24791 if (un->un_f_allow_bus_device_reset == TRUE) { 24792 if (un->un_f_lun_reset_enabled == TRUE) { 24793 reset_retval = 24794 scsi_reset(SD_ADDRESS(un), 24795 RESET_LUN); 24796 } 24797 if (reset_retval == 0) { 24798 reset_retval = 24799 scsi_reset(SD_ADDRESS(un), 24800 RESET_TARGET); 24801 } 24802 } 24803 if (reset_retval == 0) { 24804 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 24805 } 24806 mutex_enter(SD_MUTEX(un)); 24807 } 24808 break; 24809 } 24810 24811 /* A device/bus reset has occurred; update the reservation status. */ 24812 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics & 24813 (STAT_BUS_RESET | STAT_DEV_RESET))) { 24814 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 24815 un->un_resvd_status |= 24816 (SD_LOST_RESERVE | SD_WANT_RESERVE); 24817 SD_INFO(SD_LOG_IOCTL_MHD, un, 24818 "sd_mhd_watch_incomplete: Lost Reservation\n"); 24819 } 24820 } 24821 24822 /* 24823 * The disk has been turned off; Update the device state. 24824 * 24825 * Note: Should we be offlining the disk here? 24826 */ 24827 if (pkt->pkt_state == STATE_GOT_BUS) { 24828 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: " 24829 "Disk not responding to selection\n"); 24830 if (un->un_state != SD_STATE_OFFLINE) { 24831 New_state(un, SD_STATE_OFFLINE); 24832 } 24833 } else if (be_chatty) { 24834 /* 24835 * suppress messages if they are all the same pkt reason; 24836 * with TQ, many (up to 256) are returned with the same 24837 * pkt_reason 24838 */ 24839 if (pkt->pkt_reason != un->un_last_pkt_reason) { 24840 SD_ERROR(SD_LOG_IOCTL_MHD, un, 24841 "sd_mhd_watch_incomplete: " 24842 "SCSI transport failed: reason '%s'\n", 24843 scsi_rname(pkt->pkt_reason)); 24844 } 24845 } 24846 un->un_last_pkt_reason = pkt->pkt_reason; 24847 mutex_exit(SD_MUTEX(un)); 24848 } 24849 24850 24851 /* 24852 * Function: sd_sname() 24853 * 24854 * Description: This is a simple little routine to return a string containing 24855 * a printable description of command status byte for use in 24856 * logging. 24857 * 24858 * Arguments: status - pointer to a status byte 24859 * 24860 * Return Code: char * - string containing status description. 24861 */ 24862 24863 static char * 24864 sd_sname(uchar_t status) 24865 { 24866 switch (status & STATUS_MASK) { 24867 case STATUS_GOOD: 24868 return ("good status"); 24869 case STATUS_CHECK: 24870 return ("check condition"); 24871 case STATUS_MET: 24872 return ("condition met"); 24873 case STATUS_BUSY: 24874 return ("busy"); 24875 case STATUS_INTERMEDIATE: 24876 return ("intermediate"); 24877 case STATUS_INTERMEDIATE_MET: 24878 return ("intermediate - condition met"); 24879 case STATUS_RESERVATION_CONFLICT: 24880 return ("reservation_conflict"); 24881 case STATUS_TERMINATED: 24882 return ("command terminated"); 24883 case STATUS_QFULL: 24884 return ("queue full"); 24885 default: 24886 return ("<unknown status>"); 24887 } 24888 } 24889 24890 24891 /* 24892 * Function: sd_mhd_resvd_recover() 24893 * 24894 * Description: This function adds a reservation entry to the 24895 * sd_resv_reclaim_request list and signals the reservation 24896 * reclaim thread that there is work pending. If the reservation 24897 * reclaim thread has not been previously created this function 24898 * will kick it off. 24899 * 24900 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24901 * among multiple watches that share this callback function 24902 * 24903 * Context: This routine is called by timeout() and is run in interrupt 24904 * context. It must not sleep or call other functions which may 24905 * sleep. 24906 */ 24907 24908 static void 24909 sd_mhd_resvd_recover(void *arg) 24910 { 24911 dev_t dev = (dev_t)arg; 24912 struct sd_lun *un; 24913 struct sd_thr_request *sd_treq = NULL; 24914 struct sd_thr_request *sd_cur = NULL; 24915 struct sd_thr_request *sd_prev = NULL; 24916 int already_there = 0; 24917 24918 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 24919 return; 24920 } 24921 24922 mutex_enter(SD_MUTEX(un)); 24923 un->un_resvd_timeid = NULL; 24924 if (un->un_resvd_status & SD_WANT_RESERVE) { 24925 /* 24926 * There was a reset so don't issue the reserve, allow the 24927 * sd_mhd_watch_cb callback function to notice this and 24928 * reschedule the timeout for reservation. 24929 */ 24930 mutex_exit(SD_MUTEX(un)); 24931 return; 24932 } 24933 mutex_exit(SD_MUTEX(un)); 24934 24935 /* 24936 * Add this device to the sd_resv_reclaim_request list and the 24937 * sd_resv_reclaim_thread should take care of the rest. 24938 * 24939 * Note: We can't sleep in this context so if the memory allocation 24940 * fails allow the sd_mhd_watch_cb callback function to notice this and 24941 * reschedule the timeout for reservation. (4378460) 24942 */ 24943 sd_treq = (struct sd_thr_request *) 24944 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP); 24945 if (sd_treq == NULL) { 24946 return; 24947 } 24948 24949 sd_treq->sd_thr_req_next = NULL; 24950 sd_treq->dev = dev; 24951 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 24952 if (sd_tr.srq_thr_req_head == NULL) { 24953 sd_tr.srq_thr_req_head = sd_treq; 24954 } else { 24955 sd_cur = sd_prev = sd_tr.srq_thr_req_head; 24956 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) { 24957 if (sd_cur->dev == dev) { 24958 /* 24959 * already in Queue so don't log 24960 * another request for the device 24961 */ 24962 already_there = 1; 24963 break; 24964 } 24965 sd_prev = sd_cur; 24966 } 24967 if (!already_there) { 24968 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: " 24969 "logging request for %lx\n", dev); 24970 sd_prev->sd_thr_req_next = sd_treq; 24971 } else { 24972 kmem_free(sd_treq, sizeof (struct sd_thr_request)); 24973 } 24974 } 24975 24976 /* 24977 * Create a kernel thread to do the reservation reclaim and free up this 24978 * thread. We cannot block this thread while we go away to do the 24979 * reservation reclaim 24980 */ 24981 if (sd_tr.srq_resv_reclaim_thread == NULL) 24982 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0, 24983 sd_resv_reclaim_thread, NULL, 24984 0, &p0, TS_RUN, v.v_maxsyspri - 2); 24985 24986 /* Tell the reservation reclaim thread that it has work to do */ 24987 cv_signal(&sd_tr.srq_resv_reclaim_cv); 24988 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 24989 } 24990 24991 /* 24992 * Function: sd_resv_reclaim_thread() 24993 * 24994 * Description: This function implements the reservation reclaim operations 24995 * 24996 * Arguments: arg - the device 'dev_t' is used for context to discriminate 24997 * among multiple watches that share this callback function 24998 */ 24999 25000 static void 25001 sd_resv_reclaim_thread() 25002 { 25003 struct sd_lun *un; 25004 struct sd_thr_request *sd_mhreq; 25005 25006 /* Wait for work */ 25007 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25008 if (sd_tr.srq_thr_req_head == NULL) { 25009 cv_wait(&sd_tr.srq_resv_reclaim_cv, 25010 &sd_tr.srq_resv_reclaim_mutex); 25011 } 25012 25013 /* Loop while we have work */ 25014 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) { 25015 un = ddi_get_soft_state(sd_state, 25016 SDUNIT(sd_tr.srq_thr_cur_req->dev)); 25017 if (un == NULL) { 25018 /* 25019 * softstate structure is NULL so just 25020 * dequeue the request and continue 25021 */ 25022 sd_tr.srq_thr_req_head = 25023 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25024 kmem_free(sd_tr.srq_thr_cur_req, 25025 sizeof (struct sd_thr_request)); 25026 continue; 25027 } 25028 25029 /* dequeue the request */ 25030 sd_mhreq = sd_tr.srq_thr_cur_req; 25031 sd_tr.srq_thr_req_head = 25032 sd_tr.srq_thr_cur_req->sd_thr_req_next; 25033 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25034 25035 /* 25036 * Reclaim reservation only if SD_RESERVE is still set. There 25037 * may have been a call to MHIOCRELEASE before we got here. 25038 */ 25039 mutex_enter(SD_MUTEX(un)); 25040 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25041 /* 25042 * Note: The SD_LOST_RESERVE flag is cleared before 25043 * reclaiming the reservation. If this is done after the 25044 * call to sd_reserve_release a reservation loss in the 25045 * window between pkt completion of reserve cmd and 25046 * mutex_enter below may not be recognized 25047 */ 25048 un->un_resvd_status &= ~SD_LOST_RESERVE; 25049 mutex_exit(SD_MUTEX(un)); 25050 25051 if (sd_reserve_release(sd_mhreq->dev, 25052 SD_RESERVE) == 0) { 25053 mutex_enter(SD_MUTEX(un)); 25054 un->un_resvd_status |= SD_RESERVE; 25055 mutex_exit(SD_MUTEX(un)); 25056 SD_INFO(SD_LOG_IOCTL_MHD, un, 25057 "sd_resv_reclaim_thread: " 25058 "Reservation Recovered\n"); 25059 } else { 25060 mutex_enter(SD_MUTEX(un)); 25061 un->un_resvd_status |= SD_LOST_RESERVE; 25062 mutex_exit(SD_MUTEX(un)); 25063 SD_INFO(SD_LOG_IOCTL_MHD, un, 25064 "sd_resv_reclaim_thread: Failed " 25065 "Reservation Recovery\n"); 25066 } 25067 } else { 25068 mutex_exit(SD_MUTEX(un)); 25069 } 25070 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25071 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req); 25072 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25073 sd_mhreq = sd_tr.srq_thr_cur_req = NULL; 25074 /* 25075 * wakeup the destroy thread if anyone is waiting on 25076 * us to complete. 25077 */ 25078 cv_signal(&sd_tr.srq_inprocess_cv); 25079 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25080 "sd_resv_reclaim_thread: cv_signalling current request \n"); 25081 } 25082 25083 /* 25084 * cleanup the sd_tr structure now that this thread will not exist 25085 */ 25086 ASSERT(sd_tr.srq_thr_req_head == NULL); 25087 ASSERT(sd_tr.srq_thr_cur_req == NULL); 25088 sd_tr.srq_resv_reclaim_thread = NULL; 25089 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25090 thread_exit(); 25091 } 25092 25093 25094 /* 25095 * Function: sd_rmv_resv_reclaim_req() 25096 * 25097 * Description: This function removes any pending reservation reclaim requests 25098 * for the specified device. 25099 * 25100 * Arguments: dev - the device 'dev_t' 25101 */ 25102 25103 static void 25104 sd_rmv_resv_reclaim_req(dev_t dev) 25105 { 25106 struct sd_thr_request *sd_mhreq; 25107 struct sd_thr_request *sd_prev; 25108 25109 /* Remove a reservation reclaim request from the list */ 25110 mutex_enter(&sd_tr.srq_resv_reclaim_mutex); 25111 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) { 25112 /* 25113 * We are attempting to reinstate reservation for 25114 * this device. We wait for sd_reserve_release() 25115 * to return before we return. 25116 */ 25117 cv_wait(&sd_tr.srq_inprocess_cv, 25118 &sd_tr.srq_resv_reclaim_mutex); 25119 } else { 25120 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head; 25121 if (sd_mhreq && sd_mhreq->dev == dev) { 25122 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next; 25123 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25124 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25125 return; 25126 } 25127 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) { 25128 if (sd_mhreq && sd_mhreq->dev == dev) { 25129 break; 25130 } 25131 sd_prev = sd_mhreq; 25132 } 25133 if (sd_mhreq != NULL) { 25134 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next; 25135 kmem_free(sd_mhreq, sizeof (struct sd_thr_request)); 25136 } 25137 } 25138 mutex_exit(&sd_tr.srq_resv_reclaim_mutex); 25139 } 25140 25141 25142 /* 25143 * Function: sd_mhd_reset_notify_cb() 25144 * 25145 * Description: This is a call back function for scsi_reset_notify. This 25146 * function updates the softstate reserved status and logs the 25147 * reset. The driver scsi watch facility callback function 25148 * (sd_mhd_watch_cb) and reservation reclaim thread functionality 25149 * will reclaim the reservation. 25150 * 25151 * Arguments: arg - driver soft state (unit) structure 25152 */ 25153 25154 static void 25155 sd_mhd_reset_notify_cb(caddr_t arg) 25156 { 25157 struct sd_lun *un = (struct sd_lun *)arg; 25158 25159 mutex_enter(SD_MUTEX(un)); 25160 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) { 25161 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE); 25162 SD_INFO(SD_LOG_IOCTL_MHD, un, 25163 "sd_mhd_reset_notify_cb: Lost Reservation\n"); 25164 } 25165 mutex_exit(SD_MUTEX(un)); 25166 } 25167 25168 25169 /* 25170 * Function: sd_take_ownership() 25171 * 25172 * Description: This routine implements an algorithm to achieve a stable 25173 * reservation on disks which don't implement priority reserve, 25174 * and makes sure that other host lose re-reservation attempts. 25175 * This algorithm contains of a loop that keeps issuing the RESERVE 25176 * for some period of time (min_ownership_delay, default 6 seconds) 25177 * During that loop, it looks to see if there has been a bus device 25178 * reset or bus reset (both of which cause an existing reservation 25179 * to be lost). If the reservation is lost issue RESERVE until a 25180 * period of min_ownership_delay with no resets has gone by, or 25181 * until max_ownership_delay has expired. This loop ensures that 25182 * the host really did manage to reserve the device, in spite of 25183 * resets. The looping for min_ownership_delay (default six 25184 * seconds) is important to early generation clustering products, 25185 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an 25186 * MHIOCENFAILFAST periodic timer of two seconds. By having 25187 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having 25188 * MHIOCENFAILFAST poll every two seconds, the idea is that by the 25189 * time the MHIOCTKOWN ioctl returns, the other host (if any) will 25190 * have already noticed, via the MHIOCENFAILFAST polling, that it 25191 * no longer "owns" the disk and will have panicked itself. Thus, 25192 * the host issuing the MHIOCTKOWN is assured (with timing 25193 * dependencies) that by the time it actually starts to use the 25194 * disk for real work, the old owner is no longer accessing it. 25195 * 25196 * min_ownership_delay is the minimum amount of time for which the 25197 * disk must be reserved continuously devoid of resets before the 25198 * MHIOCTKOWN ioctl will return success. 25199 * 25200 * max_ownership_delay indicates the amount of time by which the 25201 * take ownership should succeed or timeout with an error. 25202 * 25203 * Arguments: dev - the device 'dev_t' 25204 * *p - struct containing timing info. 25205 * 25206 * Return Code: 0 for success or error code 25207 */ 25208 25209 static int 25210 sd_take_ownership(dev_t dev, struct mhioctkown *p) 25211 { 25212 struct sd_lun *un; 25213 int rval; 25214 int err; 25215 int reservation_count = 0; 25216 int min_ownership_delay = 6000000; /* in usec */ 25217 int max_ownership_delay = 30000000; /* in usec */ 25218 clock_t start_time; /* starting time of this algorithm */ 25219 clock_t end_time; /* time limit for giving up */ 25220 clock_t ownership_time; /* time limit for stable ownership */ 25221 clock_t current_time; 25222 clock_t previous_current_time; 25223 25224 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25225 return (ENXIO); 25226 } 25227 25228 /* 25229 * Attempt a device reservation. A priority reservation is requested. 25230 */ 25231 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE)) 25232 != SD_SUCCESS) { 25233 SD_ERROR(SD_LOG_IOCTL_MHD, un, 25234 "sd_take_ownership: return(1)=%d\n", rval); 25235 return (rval); 25236 } 25237 25238 /* Update the softstate reserved status to indicate the reservation */ 25239 mutex_enter(SD_MUTEX(un)); 25240 un->un_resvd_status |= SD_RESERVE; 25241 un->un_resvd_status &= 25242 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT); 25243 mutex_exit(SD_MUTEX(un)); 25244 25245 if (p != NULL) { 25246 if (p->min_ownership_delay != 0) { 25247 min_ownership_delay = p->min_ownership_delay * 1000; 25248 } 25249 if (p->max_ownership_delay != 0) { 25250 max_ownership_delay = p->max_ownership_delay * 1000; 25251 } 25252 } 25253 SD_INFO(SD_LOG_IOCTL_MHD, un, 25254 "sd_take_ownership: min, max delays: %d, %d\n", 25255 min_ownership_delay, max_ownership_delay); 25256 25257 start_time = ddi_get_lbolt(); 25258 current_time = start_time; 25259 ownership_time = current_time + drv_usectohz(min_ownership_delay); 25260 end_time = start_time + drv_usectohz(max_ownership_delay); 25261 25262 while (current_time - end_time < 0) { 25263 delay(drv_usectohz(500000)); 25264 25265 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) { 25266 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) { 25267 mutex_enter(SD_MUTEX(un)); 25268 rval = (un->un_resvd_status & 25269 SD_RESERVATION_CONFLICT) ? EACCES : EIO; 25270 mutex_exit(SD_MUTEX(un)); 25271 break; 25272 } 25273 } 25274 previous_current_time = current_time; 25275 current_time = ddi_get_lbolt(); 25276 mutex_enter(SD_MUTEX(un)); 25277 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) { 25278 ownership_time = ddi_get_lbolt() + 25279 drv_usectohz(min_ownership_delay); 25280 reservation_count = 0; 25281 } else { 25282 reservation_count++; 25283 } 25284 un->un_resvd_status |= SD_RESERVE; 25285 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE); 25286 mutex_exit(SD_MUTEX(un)); 25287 25288 SD_INFO(SD_LOG_IOCTL_MHD, un, 25289 "sd_take_ownership: ticks for loop iteration=%ld, " 25290 "reservation=%s\n", (current_time - previous_current_time), 25291 reservation_count ? "ok" : "reclaimed"); 25292 25293 if (current_time - ownership_time >= 0 && 25294 reservation_count >= 4) { 25295 rval = 0; /* Achieved a stable ownership */ 25296 break; 25297 } 25298 if (current_time - end_time >= 0) { 25299 rval = EACCES; /* No ownership in max possible time */ 25300 break; 25301 } 25302 } 25303 SD_TRACE(SD_LOG_IOCTL_MHD, un, 25304 "sd_take_ownership: return(2)=%d\n", rval); 25305 return (rval); 25306 } 25307 25308 25309 /* 25310 * Function: sd_reserve_release() 25311 * 25312 * Description: This function builds and sends scsi RESERVE, RELEASE, and 25313 * PRIORITY RESERVE commands based on a user specified command type 25314 * 25315 * Arguments: dev - the device 'dev_t' 25316 * cmd - user specified command type; one of SD_PRIORITY_RESERVE, 25317 * SD_RESERVE, SD_RELEASE 25318 * 25319 * Return Code: 0 or Error Code 25320 */ 25321 25322 static int 25323 sd_reserve_release(dev_t dev, int cmd) 25324 { 25325 struct uscsi_cmd *com = NULL; 25326 struct sd_lun *un = NULL; 25327 char cdb[CDB_GROUP0]; 25328 int rval; 25329 25330 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) || 25331 (cmd == SD_PRIORITY_RESERVE)); 25332 25333 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 25334 return (ENXIO); 25335 } 25336 25337 /* instantiate and initialize the command and cdb */ 25338 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 25339 bzero(cdb, CDB_GROUP0); 25340 com->uscsi_flags = USCSI_SILENT; 25341 com->uscsi_timeout = un->un_reserve_release_time; 25342 com->uscsi_cdblen = CDB_GROUP0; 25343 com->uscsi_cdb = cdb; 25344 if (cmd == SD_RELEASE) { 25345 cdb[0] = SCMD_RELEASE; 25346 } else { 25347 cdb[0] = SCMD_RESERVE; 25348 } 25349 25350 /* Send the command. */ 25351 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25352 UIO_SYSSPACE, SD_PATH_STANDARD); 25353 25354 /* 25355 * "break" a reservation that is held by another host, by issuing a 25356 * reset if priority reserve is desired, and we could not get the 25357 * device. 25358 */ 25359 if ((cmd == SD_PRIORITY_RESERVE) && 25360 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25361 /* 25362 * First try to reset the LUN. If we cannot, then try a target 25363 * reset, followed by a bus reset if the target reset fails. 25364 */ 25365 int reset_retval = 0; 25366 if (un->un_f_lun_reset_enabled == TRUE) { 25367 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN); 25368 } 25369 if (reset_retval == 0) { 25370 /* The LUN reset either failed or was not issued */ 25371 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25372 } 25373 if ((reset_retval == 0) && 25374 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) { 25375 rval = EIO; 25376 kmem_free(com, sizeof (*com)); 25377 return (rval); 25378 } 25379 25380 bzero(com, sizeof (struct uscsi_cmd)); 25381 com->uscsi_flags = USCSI_SILENT; 25382 com->uscsi_cdb = cdb; 25383 com->uscsi_cdblen = CDB_GROUP0; 25384 com->uscsi_timeout = 5; 25385 25386 /* 25387 * Reissue the last reserve command, this time without request 25388 * sense. Assume that it is just a regular reserve command. 25389 */ 25390 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 25391 UIO_SYSSPACE, SD_PATH_STANDARD); 25392 } 25393 25394 /* Return an error if still getting a reservation conflict. */ 25395 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) { 25396 rval = EACCES; 25397 } 25398 25399 kmem_free(com, sizeof (*com)); 25400 return (rval); 25401 } 25402 25403 25404 #define SD_NDUMP_RETRIES 12 25405 /* 25406 * System Crash Dump routine 25407 */ 25408 25409 static int 25410 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 25411 { 25412 int instance; 25413 int partition; 25414 int i; 25415 int err; 25416 struct sd_lun *un; 25417 struct dk_map *lp; 25418 struct scsi_pkt *wr_pktp; 25419 struct buf *wr_bp; 25420 struct buf wr_buf; 25421 daddr_t tgt_byte_offset; /* rmw - byte offset for target */ 25422 daddr_t tgt_blkno; /* rmw - blkno for target */ 25423 size_t tgt_byte_count; /* rmw - # of bytes to xfer */ 25424 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */ 25425 size_t io_start_offset; 25426 int doing_rmw = FALSE; 25427 int rval; 25428 #if defined(__i386) || defined(__amd64) 25429 ssize_t dma_resid; 25430 daddr_t oblkno; 25431 #endif 25432 25433 instance = SDUNIT(dev); 25434 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) || 25435 (!un->un_f_geometry_is_valid) || ISCD(un)) { 25436 return (ENXIO); 25437 } 25438 25439 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 25440 25441 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n"); 25442 25443 partition = SDPART(dev); 25444 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition); 25445 25446 /* Validate blocks to dump at against partition size. */ 25447 lp = &un->un_map[partition]; 25448 if ((blkno + nblk) > lp->dkl_nblk) { 25449 SD_TRACE(SD_LOG_DUMP, un, 25450 "sddump: dump range larger than partition: " 25451 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n", 25452 blkno, nblk, lp->dkl_nblk); 25453 return (EINVAL); 25454 } 25455 25456 mutex_enter(&un->un_pm_mutex); 25457 if (SD_DEVICE_IS_IN_LOW_POWER(un)) { 25458 struct scsi_pkt *start_pktp; 25459 25460 mutex_exit(&un->un_pm_mutex); 25461 25462 /* 25463 * use pm framework to power on HBA 1st 25464 */ 25465 (void) pm_raise_power(SD_DEVINFO(un), 0, SD_SPINDLE_ON); 25466 25467 /* 25468 * Dump no long uses sdpower to power on a device, it's 25469 * in-line here so it can be done in polled mode. 25470 */ 25471 25472 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n"); 25473 25474 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL, 25475 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL); 25476 25477 if (start_pktp == NULL) { 25478 /* We were not given a SCSI packet, fail. */ 25479 return (EIO); 25480 } 25481 bzero(start_pktp->pkt_cdbp, CDB_GROUP0); 25482 start_pktp->pkt_cdbp[0] = SCMD_START_STOP; 25483 start_pktp->pkt_cdbp[4] = SD_TARGET_START; 25484 start_pktp->pkt_flags = FLAG_NOINTR; 25485 25486 mutex_enter(SD_MUTEX(un)); 25487 SD_FILL_SCSI1_LUN(un, start_pktp); 25488 mutex_exit(SD_MUTEX(un)); 25489 /* 25490 * Scsi_poll returns 0 (success) if the command completes and 25491 * the status block is STATUS_GOOD. 25492 */ 25493 if (sd_scsi_poll(un, start_pktp) != 0) { 25494 scsi_destroy_pkt(start_pktp); 25495 return (EIO); 25496 } 25497 scsi_destroy_pkt(start_pktp); 25498 (void) sd_ddi_pm_resume(un); 25499 } else { 25500 mutex_exit(&un->un_pm_mutex); 25501 } 25502 25503 mutex_enter(SD_MUTEX(un)); 25504 un->un_throttle = 0; 25505 25506 /* 25507 * The first time through, reset the specific target device. 25508 * However, when cpr calls sddump we know that sd is in a 25509 * a good state so no bus reset is required. 25510 * Clear sense data via Request Sense cmd. 25511 * In sddump we don't care about allow_bus_device_reset anymore 25512 */ 25513 25514 if ((un->un_state != SD_STATE_SUSPENDED) && 25515 (un->un_state != SD_STATE_DUMPING)) { 25516 25517 New_state(un, SD_STATE_DUMPING); 25518 25519 if (un->un_f_is_fibre == FALSE) { 25520 mutex_exit(SD_MUTEX(un)); 25521 /* 25522 * Attempt a bus reset for parallel scsi. 25523 * 25524 * Note: A bus reset is required because on some host 25525 * systems (i.e. E420R) a bus device reset is 25526 * insufficient to reset the state of the target. 25527 * 25528 * Note: Don't issue the reset for fibre-channel, 25529 * because this tends to hang the bus (loop) for 25530 * too long while everyone is logging out and in 25531 * and the deadman timer for dumping will fire 25532 * before the dump is complete. 25533 */ 25534 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) { 25535 mutex_enter(SD_MUTEX(un)); 25536 Restore_state(un); 25537 mutex_exit(SD_MUTEX(un)); 25538 return (EIO); 25539 } 25540 25541 /* Delay to give the device some recovery time. */ 25542 drv_usecwait(10000); 25543 25544 if (sd_send_polled_RQS(un) == SD_FAILURE) { 25545 SD_INFO(SD_LOG_DUMP, un, 25546 "sddump: sd_send_polled_RQS failed\n"); 25547 } 25548 mutex_enter(SD_MUTEX(un)); 25549 } 25550 } 25551 25552 /* 25553 * Convert the partition-relative block number to a 25554 * disk physical block number. 25555 */ 25556 blkno += un->un_offset[partition]; 25557 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno); 25558 25559 25560 /* 25561 * Check if the device has a non-512 block size. 25562 */ 25563 wr_bp = NULL; 25564 if (NOT_DEVBSIZE(un)) { 25565 tgt_byte_offset = blkno * un->un_sys_blocksize; 25566 tgt_byte_count = nblk * un->un_sys_blocksize; 25567 if ((tgt_byte_offset % un->un_tgt_blocksize) || 25568 (tgt_byte_count % un->un_tgt_blocksize)) { 25569 doing_rmw = TRUE; 25570 /* 25571 * Calculate the block number and number of block 25572 * in terms of the media block size. 25573 */ 25574 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25575 tgt_nblk = 25576 ((tgt_byte_offset + tgt_byte_count + 25577 (un->un_tgt_blocksize - 1)) / 25578 un->un_tgt_blocksize) - tgt_blkno; 25579 25580 /* 25581 * Invoke the routine which is going to do read part 25582 * of read-modify-write. 25583 * Note that this routine returns a pointer to 25584 * a valid bp in wr_bp. 25585 */ 25586 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk, 25587 &wr_bp); 25588 if (err) { 25589 mutex_exit(SD_MUTEX(un)); 25590 return (err); 25591 } 25592 /* 25593 * Offset is being calculated as - 25594 * (original block # * system block size) - 25595 * (new block # * target block size) 25596 */ 25597 io_start_offset = 25598 ((uint64_t)(blkno * un->un_sys_blocksize)) - 25599 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize)); 25600 25601 ASSERT((io_start_offset >= 0) && 25602 (io_start_offset < un->un_tgt_blocksize)); 25603 /* 25604 * Do the modify portion of read modify write. 25605 */ 25606 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset], 25607 (size_t)nblk * un->un_sys_blocksize); 25608 } else { 25609 doing_rmw = FALSE; 25610 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize; 25611 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize; 25612 } 25613 25614 /* Convert blkno and nblk to target blocks */ 25615 blkno = tgt_blkno; 25616 nblk = tgt_nblk; 25617 } else { 25618 wr_bp = &wr_buf; 25619 bzero(wr_bp, sizeof (struct buf)); 25620 wr_bp->b_flags = B_BUSY; 25621 wr_bp->b_un.b_addr = addr; 25622 wr_bp->b_bcount = nblk << DEV_BSHIFT; 25623 wr_bp->b_resid = 0; 25624 } 25625 25626 mutex_exit(SD_MUTEX(un)); 25627 25628 /* 25629 * Obtain a SCSI packet for the write command. 25630 * It should be safe to call the allocator here without 25631 * worrying about being locked for DVMA mapping because 25632 * the address we're passed is already a DVMA mapping 25633 * 25634 * We are also not going to worry about semaphore ownership 25635 * in the dump buffer. Dumping is single threaded at present. 25636 */ 25637 25638 wr_pktp = NULL; 25639 25640 #if defined(__i386) || defined(__amd64) 25641 dma_resid = wr_bp->b_bcount; 25642 oblkno = blkno; 25643 while (dma_resid != 0) { 25644 #endif 25645 25646 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25647 wr_bp->b_flags &= ~B_ERROR; 25648 25649 #if defined(__i386) || defined(__amd64) 25650 blkno = oblkno + 25651 ((wr_bp->b_bcount - dma_resid) / 25652 un->un_tgt_blocksize); 25653 nblk = dma_resid / un->un_tgt_blocksize; 25654 25655 if (wr_pktp) { 25656 /* Partial DMA transfers after initial transfer */ 25657 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp, 25658 blkno, nblk); 25659 } else { 25660 /* Initial transfer */ 25661 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25662 un->un_pkt_flags, NULL_FUNC, NULL, 25663 blkno, nblk); 25664 } 25665 #else 25666 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp, 25667 0, NULL_FUNC, NULL, blkno, nblk); 25668 #endif 25669 25670 if (rval == 0) { 25671 /* We were given a SCSI packet, continue. */ 25672 break; 25673 } 25674 25675 if (i == 0) { 25676 if (wr_bp->b_flags & B_ERROR) { 25677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25678 "no resources for dumping; " 25679 "error code: 0x%x, retrying", 25680 geterror(wr_bp)); 25681 } else { 25682 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 25683 "no resources for dumping; retrying"); 25684 } 25685 } else if (i != (SD_NDUMP_RETRIES - 1)) { 25686 if (wr_bp->b_flags & B_ERROR) { 25687 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25688 "no resources for dumping; error code: " 25689 "0x%x, retrying\n", geterror(wr_bp)); 25690 } 25691 } else { 25692 if (wr_bp->b_flags & B_ERROR) { 25693 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25694 "no resources for dumping; " 25695 "error code: 0x%x, retries failed, " 25696 "giving up.\n", geterror(wr_bp)); 25697 } else { 25698 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25699 "no resources for dumping; " 25700 "retries failed, giving up.\n"); 25701 } 25702 mutex_enter(SD_MUTEX(un)); 25703 Restore_state(un); 25704 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) { 25705 mutex_exit(SD_MUTEX(un)); 25706 scsi_free_consistent_buf(wr_bp); 25707 } else { 25708 mutex_exit(SD_MUTEX(un)); 25709 } 25710 return (EIO); 25711 } 25712 drv_usecwait(10000); 25713 } 25714 25715 #if defined(__i386) || defined(__amd64) 25716 /* 25717 * save the resid from PARTIAL_DMA 25718 */ 25719 dma_resid = wr_pktp->pkt_resid; 25720 if (dma_resid != 0) 25721 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid); 25722 wr_pktp->pkt_resid = 0; 25723 #endif 25724 25725 /* SunBug 1222170 */ 25726 wr_pktp->pkt_flags = FLAG_NOINTR; 25727 25728 err = EIO; 25729 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 25730 25731 /* 25732 * Scsi_poll returns 0 (success) if the command completes and 25733 * the status block is STATUS_GOOD. We should only check 25734 * errors if this condition is not true. Even then we should 25735 * send our own request sense packet only if we have a check 25736 * condition and auto request sense has not been performed by 25737 * the hba. 25738 */ 25739 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n"); 25740 25741 if ((sd_scsi_poll(un, wr_pktp) == 0) && 25742 (wr_pktp->pkt_resid == 0)) { 25743 err = SD_SUCCESS; 25744 break; 25745 } 25746 25747 /* 25748 * Check CMD_DEV_GONE 1st, give up if device is gone. 25749 */ 25750 if (wr_pktp->pkt_reason == CMD_DEV_GONE) { 25751 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 25752 "Device is gone\n"); 25753 break; 25754 } 25755 25756 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) { 25757 SD_INFO(SD_LOG_DUMP, un, 25758 "sddump: write failed with CHECK, try # %d\n", i); 25759 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) { 25760 (void) sd_send_polled_RQS(un); 25761 } 25762 25763 continue; 25764 } 25765 25766 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) { 25767 int reset_retval = 0; 25768 25769 SD_INFO(SD_LOG_DUMP, un, 25770 "sddump: write failed with BUSY, try # %d\n", i); 25771 25772 if (un->un_f_lun_reset_enabled == TRUE) { 25773 reset_retval = scsi_reset(SD_ADDRESS(un), 25774 RESET_LUN); 25775 } 25776 if (reset_retval == 0) { 25777 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 25778 } 25779 (void) sd_send_polled_RQS(un); 25780 25781 } else { 25782 SD_INFO(SD_LOG_DUMP, un, 25783 "sddump: write failed with 0x%x, try # %d\n", 25784 SD_GET_PKT_STATUS(wr_pktp), i); 25785 mutex_enter(SD_MUTEX(un)); 25786 sd_reset_target(un, wr_pktp); 25787 mutex_exit(SD_MUTEX(un)); 25788 } 25789 25790 /* 25791 * If we are not getting anywhere with lun/target resets, 25792 * let's reset the bus. 25793 */ 25794 if (i == SD_NDUMP_RETRIES/2) { 25795 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 25796 (void) sd_send_polled_RQS(un); 25797 } 25798 25799 } 25800 #if defined(__i386) || defined(__amd64) 25801 } /* dma_resid */ 25802 #endif 25803 25804 scsi_destroy_pkt(wr_pktp); 25805 mutex_enter(SD_MUTEX(un)); 25806 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) { 25807 mutex_exit(SD_MUTEX(un)); 25808 scsi_free_consistent_buf(wr_bp); 25809 } else { 25810 mutex_exit(SD_MUTEX(un)); 25811 } 25812 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err); 25813 return (err); 25814 } 25815 25816 /* 25817 * Function: sd_scsi_poll() 25818 * 25819 * Description: This is a wrapper for the scsi_poll call. 25820 * 25821 * Arguments: sd_lun - The unit structure 25822 * scsi_pkt - The scsi packet being sent to the device. 25823 * 25824 * Return Code: 0 - Command completed successfully with good status 25825 * -1 - Command failed. This could indicate a check condition 25826 * or other status value requiring recovery action. 25827 * 25828 */ 25829 25830 static int 25831 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp) 25832 { 25833 int status; 25834 25835 ASSERT(un != NULL); 25836 ASSERT(!mutex_owned(SD_MUTEX(un))); 25837 ASSERT(pktp != NULL); 25838 25839 status = SD_SUCCESS; 25840 25841 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) { 25842 pktp->pkt_flags |= un->un_tagflags; 25843 pktp->pkt_flags &= ~FLAG_NODISCON; 25844 } 25845 25846 status = sd_ddi_scsi_poll(pktp); 25847 /* 25848 * Scsi_poll returns 0 (success) if the command completes and the 25849 * status block is STATUS_GOOD. We should only check errors if this 25850 * condition is not true. Even then we should send our own request 25851 * sense packet only if we have a check condition and auto 25852 * request sense has not been performed by the hba. 25853 * Don't get RQS data if pkt_reason is CMD_DEV_GONE. 25854 */ 25855 if ((status != SD_SUCCESS) && 25856 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) && 25857 (pktp->pkt_state & STATE_ARQ_DONE) == 0 && 25858 (pktp->pkt_reason != CMD_DEV_GONE)) 25859 (void) sd_send_polled_RQS(un); 25860 25861 return (status); 25862 } 25863 25864 /* 25865 * Function: sd_send_polled_RQS() 25866 * 25867 * Description: This sends the request sense command to a device. 25868 * 25869 * Arguments: sd_lun - The unit structure 25870 * 25871 * Return Code: 0 - Command completed successfully with good status 25872 * -1 - Command failed. 25873 * 25874 */ 25875 25876 static int 25877 sd_send_polled_RQS(struct sd_lun *un) 25878 { 25879 int ret_val; 25880 struct scsi_pkt *rqs_pktp; 25881 struct buf *rqs_bp; 25882 25883 ASSERT(un != NULL); 25884 ASSERT(!mutex_owned(SD_MUTEX(un))); 25885 25886 ret_val = SD_SUCCESS; 25887 25888 rqs_pktp = un->un_rqs_pktp; 25889 rqs_bp = un->un_rqs_bp; 25890 25891 mutex_enter(SD_MUTEX(un)); 25892 25893 if (un->un_sense_isbusy) { 25894 ret_val = SD_FAILURE; 25895 mutex_exit(SD_MUTEX(un)); 25896 return (ret_val); 25897 } 25898 25899 /* 25900 * If the request sense buffer (and packet) is not in use, 25901 * let's set the un_sense_isbusy and send our packet 25902 */ 25903 un->un_sense_isbusy = 1; 25904 rqs_pktp->pkt_resid = 0; 25905 rqs_pktp->pkt_reason = 0; 25906 rqs_pktp->pkt_flags |= FLAG_NOINTR; 25907 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH); 25908 25909 mutex_exit(SD_MUTEX(un)); 25910 25911 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at" 25912 " 0x%p\n", rqs_bp->b_un.b_addr); 25913 25914 /* 25915 * Can't send this to sd_scsi_poll, we wrap ourselves around the 25916 * axle - it has a call into us! 25917 */ 25918 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) { 25919 SD_INFO(SD_LOG_COMMON, un, 25920 "sd_send_polled_RQS: RQS failed\n"); 25921 } 25922 25923 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:", 25924 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX); 25925 25926 mutex_enter(SD_MUTEX(un)); 25927 un->un_sense_isbusy = 0; 25928 mutex_exit(SD_MUTEX(un)); 25929 25930 return (ret_val); 25931 } 25932 25933 /* 25934 * Defines needed for localized version of the scsi_poll routine. 25935 */ 25936 #define SD_CSEC 10000 /* usecs */ 25937 #define SD_SEC_TO_CSEC (1000000/SD_CSEC) 25938 25939 25940 /* 25941 * Function: sd_ddi_scsi_poll() 25942 * 25943 * Description: Localized version of the scsi_poll routine. The purpose is to 25944 * send a scsi_pkt to a device as a polled command. This version 25945 * is to ensure more robust handling of transport errors. 25946 * Specifically this routine cures not ready, coming ready 25947 * transition for power up and reset of sonoma's. This can take 25948 * up to 45 seconds for power-on and 20 seconds for reset of a 25949 * sonoma lun. 25950 * 25951 * Arguments: scsi_pkt - The scsi_pkt being sent to a device 25952 * 25953 * Return Code: 0 - Command completed successfully with good status 25954 * -1 - Command failed. 25955 * 25956 */ 25957 25958 static int 25959 sd_ddi_scsi_poll(struct scsi_pkt *pkt) 25960 { 25961 int busy_count; 25962 int timeout; 25963 int rval = SD_FAILURE; 25964 int savef; 25965 struct scsi_extended_sense *sensep; 25966 long savet; 25967 void (*savec)(); 25968 /* 25969 * The following is defined in machdep.c and is used in determining if 25970 * the scsi transport system will do polled I/O instead of interrupt 25971 * I/O when called from xx_dump(). 25972 */ 25973 extern int do_polled_io; 25974 25975 /* 25976 * save old flags in pkt, to restore at end 25977 */ 25978 savef = pkt->pkt_flags; 25979 savec = pkt->pkt_comp; 25980 savet = pkt->pkt_time; 25981 25982 pkt->pkt_flags |= FLAG_NOINTR; 25983 25984 /* 25985 * XXX there is nothing in the SCSA spec that states that we should not 25986 * do a callback for polled cmds; however, removing this will break sd 25987 * and probably other target drivers 25988 */ 25989 pkt->pkt_comp = NULL; 25990 25991 /* 25992 * we don't like a polled command without timeout. 25993 * 60 seconds seems long enough. 25994 */ 25995 if (pkt->pkt_time == 0) { 25996 pkt->pkt_time = SCSI_POLL_TIMEOUT; 25997 } 25998 25999 /* 26000 * Send polled cmd. 26001 * 26002 * We do some error recovery for various errors. Tran_busy, 26003 * queue full, and non-dispatched commands are retried every 10 msec. 26004 * as they are typically transient failures. Busy status and Not 26005 * Ready are retried every second as this status takes a while to 26006 * change. Unit attention is retried for pkt_time (60) times 26007 * with no delay. 26008 */ 26009 timeout = pkt->pkt_time * SD_SEC_TO_CSEC; 26010 26011 for (busy_count = 0; busy_count < timeout; busy_count++) { 26012 int rc; 26013 int poll_delay; 26014 26015 /* 26016 * Initialize pkt status variables. 26017 */ 26018 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0; 26019 26020 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) { 26021 if (rc != TRAN_BUSY) { 26022 /* Transport failed - give up. */ 26023 break; 26024 } else { 26025 /* Transport busy - try again. */ 26026 poll_delay = 1 * SD_CSEC; /* 10 msec */ 26027 } 26028 } else { 26029 /* 26030 * Transport accepted - check pkt status. 26031 */ 26032 rc = (*pkt->pkt_scbp) & STATUS_MASK; 26033 if (pkt->pkt_reason == CMD_CMPLT && 26034 rc == STATUS_CHECK && 26035 pkt->pkt_state & STATE_ARQ_DONE) { 26036 struct scsi_arq_status *arqstat = 26037 (struct scsi_arq_status *)(pkt->pkt_scbp); 26038 26039 sensep = &arqstat->sts_sensedata; 26040 } else { 26041 sensep = NULL; 26042 } 26043 26044 if ((pkt->pkt_reason == CMD_CMPLT) && 26045 (rc == STATUS_GOOD)) { 26046 /* No error - we're done */ 26047 rval = SD_SUCCESS; 26048 break; 26049 26050 } else if (pkt->pkt_reason == CMD_DEV_GONE) { 26051 /* Lost connection - give up */ 26052 break; 26053 26054 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) && 26055 (pkt->pkt_state == 0)) { 26056 /* Pkt not dispatched - try again. */ 26057 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26058 26059 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26060 (rc == STATUS_QFULL)) { 26061 /* Queue full - try again. */ 26062 poll_delay = 1 * SD_CSEC; /* 10 msec. */ 26063 26064 } else if ((pkt->pkt_reason == CMD_CMPLT) && 26065 (rc == STATUS_BUSY)) { 26066 /* Busy - try again. */ 26067 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26068 busy_count += (SD_SEC_TO_CSEC - 1); 26069 26070 } else if ((sensep != NULL) && 26071 (sensep->es_key == KEY_UNIT_ATTENTION)) { 26072 /* Unit Attention - try again */ 26073 busy_count += (SD_SEC_TO_CSEC - 1); /* 1 */ 26074 continue; 26075 26076 } else if ((sensep != NULL) && 26077 (sensep->es_key == KEY_NOT_READY) && 26078 (sensep->es_add_code == 0x04) && 26079 (sensep->es_qual_code == 0x01)) { 26080 /* Not ready -> ready - try again. */ 26081 poll_delay = 100 * SD_CSEC; /* 1 sec. */ 26082 busy_count += (SD_SEC_TO_CSEC - 1); 26083 26084 } else { 26085 /* BAD status - give up. */ 26086 break; 26087 } 26088 } 26089 26090 if ((curthread->t_flag & T_INTR_THREAD) == 0 && 26091 !do_polled_io) { 26092 delay(drv_usectohz(poll_delay)); 26093 } else { 26094 /* we busy wait during cpr_dump or interrupt threads */ 26095 drv_usecwait(poll_delay); 26096 } 26097 } 26098 26099 pkt->pkt_flags = savef; 26100 pkt->pkt_comp = savec; 26101 pkt->pkt_time = savet; 26102 return (rval); 26103 } 26104 26105 26106 /* 26107 * Function: sd_persistent_reservation_in_read_keys 26108 * 26109 * Description: This routine is the driver entry point for handling CD-ROM 26110 * multi-host persistent reservation requests (MHIOCGRP_INKEYS) 26111 * by sending the SCSI-3 PRIN commands to the device. 26112 * Processes the read keys command response by copying the 26113 * reservation key information into the user provided buffer. 26114 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented. 26115 * 26116 * Arguments: un - Pointer to soft state struct for the target. 26117 * usrp - user provided pointer to multihost Persistent In Read 26118 * Keys structure (mhioc_inkeys_t) 26119 * flag - this argument is a pass through to ddi_copyxxx() 26120 * directly from the mode argument of ioctl(). 26121 * 26122 * Return Code: 0 - Success 26123 * EACCES 26124 * ENOTSUP 26125 * errno return code from sd_send_scsi_cmd() 26126 * 26127 * Context: Can sleep. Does not return until command is completed. 26128 */ 26129 26130 static int 26131 sd_persistent_reservation_in_read_keys(struct sd_lun *un, 26132 mhioc_inkeys_t *usrp, int flag) 26133 { 26134 #ifdef _MULTI_DATAMODEL 26135 struct mhioc_key_list32 li32; 26136 #endif 26137 sd_prin_readkeys_t *in; 26138 mhioc_inkeys_t *ptr; 26139 mhioc_key_list_t li; 26140 uchar_t *data_bufp; 26141 int data_len; 26142 int rval; 26143 size_t copysz; 26144 26145 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) { 26146 return (EINVAL); 26147 } 26148 bzero(&li, sizeof (mhioc_key_list_t)); 26149 26150 /* 26151 * Get the listsize from user 26152 */ 26153 #ifdef _MULTI_DATAMODEL 26154 26155 switch (ddi_model_convert_from(flag & FMODELS)) { 26156 case DDI_MODEL_ILP32: 26157 copysz = sizeof (struct mhioc_key_list32); 26158 if (ddi_copyin(ptr->li, &li32, copysz, flag)) { 26159 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26160 "sd_persistent_reservation_in_read_keys: " 26161 "failed ddi_copyin: mhioc_key_list32_t\n"); 26162 rval = EFAULT; 26163 goto done; 26164 } 26165 li.listsize = li32.listsize; 26166 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list; 26167 break; 26168 26169 case DDI_MODEL_NONE: 26170 copysz = sizeof (mhioc_key_list_t); 26171 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26172 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26173 "sd_persistent_reservation_in_read_keys: " 26174 "failed ddi_copyin: mhioc_key_list_t\n"); 26175 rval = EFAULT; 26176 goto done; 26177 } 26178 break; 26179 } 26180 26181 #else /* ! _MULTI_DATAMODEL */ 26182 copysz = sizeof (mhioc_key_list_t); 26183 if (ddi_copyin(ptr->li, &li, copysz, flag)) { 26184 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26185 "sd_persistent_reservation_in_read_keys: " 26186 "failed ddi_copyin: mhioc_key_list_t\n"); 26187 rval = EFAULT; 26188 goto done; 26189 } 26190 #endif 26191 26192 data_len = li.listsize * MHIOC_RESV_KEY_SIZE; 26193 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t)); 26194 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26195 26196 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_KEYS, 26197 data_len, data_bufp)) != 0) { 26198 goto done; 26199 } 26200 in = (sd_prin_readkeys_t *)data_bufp; 26201 ptr->generation = BE_32(in->generation); 26202 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE; 26203 26204 /* 26205 * Return the min(listsize, listlen) keys 26206 */ 26207 #ifdef _MULTI_DATAMODEL 26208 26209 switch (ddi_model_convert_from(flag & FMODELS)) { 26210 case DDI_MODEL_ILP32: 26211 li32.listlen = li.listlen; 26212 if (ddi_copyout(&li32, ptr->li, copysz, flag)) { 26213 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26214 "sd_persistent_reservation_in_read_keys: " 26215 "failed ddi_copyout: mhioc_key_list32_t\n"); 26216 rval = EFAULT; 26217 goto done; 26218 } 26219 break; 26220 26221 case DDI_MODEL_NONE: 26222 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26223 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26224 "sd_persistent_reservation_in_read_keys: " 26225 "failed ddi_copyout: mhioc_key_list_t\n"); 26226 rval = EFAULT; 26227 goto done; 26228 } 26229 break; 26230 } 26231 26232 #else /* ! _MULTI_DATAMODEL */ 26233 26234 if (ddi_copyout(&li, ptr->li, copysz, flag)) { 26235 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26236 "sd_persistent_reservation_in_read_keys: " 26237 "failed ddi_copyout: mhioc_key_list_t\n"); 26238 rval = EFAULT; 26239 goto done; 26240 } 26241 26242 #endif /* _MULTI_DATAMODEL */ 26243 26244 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE, 26245 li.listsize * MHIOC_RESV_KEY_SIZE); 26246 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) { 26247 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26248 "sd_persistent_reservation_in_read_keys: " 26249 "failed ddi_copyout: keylist\n"); 26250 rval = EFAULT; 26251 } 26252 done: 26253 kmem_free(data_bufp, data_len); 26254 return (rval); 26255 } 26256 26257 26258 /* 26259 * Function: sd_persistent_reservation_in_read_resv 26260 * 26261 * Description: This routine is the driver entry point for handling CD-ROM 26262 * multi-host persistent reservation requests (MHIOCGRP_INRESV) 26263 * by sending the SCSI-3 PRIN commands to the device. 26264 * Process the read persistent reservations command response by 26265 * copying the reservation information into the user provided 26266 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented. 26267 * 26268 * Arguments: un - Pointer to soft state struct for the target. 26269 * usrp - user provided pointer to multihost Persistent In Read 26270 * Keys structure (mhioc_inkeys_t) 26271 * flag - this argument is a pass through to ddi_copyxxx() 26272 * directly from the mode argument of ioctl(). 26273 * 26274 * Return Code: 0 - Success 26275 * EACCES 26276 * ENOTSUP 26277 * errno return code from sd_send_scsi_cmd() 26278 * 26279 * Context: Can sleep. Does not return until command is completed. 26280 */ 26281 26282 static int 26283 sd_persistent_reservation_in_read_resv(struct sd_lun *un, 26284 mhioc_inresvs_t *usrp, int flag) 26285 { 26286 #ifdef _MULTI_DATAMODEL 26287 struct mhioc_resv_desc_list32 resvlist32; 26288 #endif 26289 sd_prin_readresv_t *in; 26290 mhioc_inresvs_t *ptr; 26291 sd_readresv_desc_t *readresv_ptr; 26292 mhioc_resv_desc_list_t resvlist; 26293 mhioc_resv_desc_t resvdesc; 26294 uchar_t *data_bufp; 26295 int data_len; 26296 int rval; 26297 int i; 26298 size_t copysz; 26299 mhioc_resv_desc_t *bufp; 26300 26301 if ((ptr = usrp) == NULL) { 26302 return (EINVAL); 26303 } 26304 26305 /* 26306 * Get the listsize from user 26307 */ 26308 #ifdef _MULTI_DATAMODEL 26309 switch (ddi_model_convert_from(flag & FMODELS)) { 26310 case DDI_MODEL_ILP32: 26311 copysz = sizeof (struct mhioc_resv_desc_list32); 26312 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) { 26313 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26314 "sd_persistent_reservation_in_read_resv: " 26315 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26316 rval = EFAULT; 26317 goto done; 26318 } 26319 resvlist.listsize = resvlist32.listsize; 26320 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list; 26321 break; 26322 26323 case DDI_MODEL_NONE: 26324 copysz = sizeof (mhioc_resv_desc_list_t); 26325 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26326 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26327 "sd_persistent_reservation_in_read_resv: " 26328 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26329 rval = EFAULT; 26330 goto done; 26331 } 26332 break; 26333 } 26334 #else /* ! _MULTI_DATAMODEL */ 26335 copysz = sizeof (mhioc_resv_desc_list_t); 26336 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) { 26337 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26338 "sd_persistent_reservation_in_read_resv: " 26339 "failed ddi_copyin: mhioc_resv_desc_list_t\n"); 26340 rval = EFAULT; 26341 goto done; 26342 } 26343 #endif /* ! _MULTI_DATAMODEL */ 26344 26345 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN; 26346 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t)); 26347 data_bufp = kmem_zalloc(data_len, KM_SLEEP); 26348 26349 if ((rval = sd_send_scsi_PERSISTENT_RESERVE_IN(un, SD_READ_RESV, 26350 data_len, data_bufp)) != 0) { 26351 goto done; 26352 } 26353 in = (sd_prin_readresv_t *)data_bufp; 26354 ptr->generation = BE_32(in->generation); 26355 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN; 26356 26357 /* 26358 * Return the min(listsize, listlen( keys 26359 */ 26360 #ifdef _MULTI_DATAMODEL 26361 26362 switch (ddi_model_convert_from(flag & FMODELS)) { 26363 case DDI_MODEL_ILP32: 26364 resvlist32.listlen = resvlist.listlen; 26365 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) { 26366 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26367 "sd_persistent_reservation_in_read_resv: " 26368 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26369 rval = EFAULT; 26370 goto done; 26371 } 26372 break; 26373 26374 case DDI_MODEL_NONE: 26375 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26376 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26377 "sd_persistent_reservation_in_read_resv: " 26378 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26379 rval = EFAULT; 26380 goto done; 26381 } 26382 break; 26383 } 26384 26385 #else /* ! _MULTI_DATAMODEL */ 26386 26387 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) { 26388 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26389 "sd_persistent_reservation_in_read_resv: " 26390 "failed ddi_copyout: mhioc_resv_desc_list_t\n"); 26391 rval = EFAULT; 26392 goto done; 26393 } 26394 26395 #endif /* ! _MULTI_DATAMODEL */ 26396 26397 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc; 26398 bufp = resvlist.list; 26399 copysz = sizeof (mhioc_resv_desc_t); 26400 for (i = 0; i < min(resvlist.listlen, resvlist.listsize); 26401 i++, readresv_ptr++, bufp++) { 26402 26403 bcopy(&readresv_ptr->resvkey, &resvdesc.key, 26404 MHIOC_RESV_KEY_SIZE); 26405 resvdesc.type = readresv_ptr->type; 26406 resvdesc.scope = readresv_ptr->scope; 26407 resvdesc.scope_specific_addr = 26408 BE_32(readresv_ptr->scope_specific_addr); 26409 26410 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) { 26411 SD_ERROR(SD_LOG_IOCTL_MHD, un, 26412 "sd_persistent_reservation_in_read_resv: " 26413 "failed ddi_copyout: resvlist\n"); 26414 rval = EFAULT; 26415 goto done; 26416 } 26417 } 26418 done: 26419 kmem_free(data_bufp, data_len); 26420 return (rval); 26421 } 26422 26423 26424 /* 26425 * Function: sr_change_blkmode() 26426 * 26427 * Description: This routine is the driver entry point for handling CD-ROM 26428 * block mode ioctl requests. Support for returning and changing 26429 * the current block size in use by the device is implemented. The 26430 * LBA size is changed via a MODE SELECT Block Descriptor. 26431 * 26432 * This routine issues a mode sense with an allocation length of 26433 * 12 bytes for the mode page header and a single block descriptor. 26434 * 26435 * Arguments: dev - the device 'dev_t' 26436 * cmd - the request type; one of CDROMGBLKMODE (get) or 26437 * CDROMSBLKMODE (set) 26438 * data - current block size or requested block size 26439 * flag - this argument is a pass through to ddi_copyxxx() directly 26440 * from the mode argument of ioctl(). 26441 * 26442 * Return Code: the code returned by sd_send_scsi_cmd() 26443 * EINVAL if invalid arguments are provided 26444 * EFAULT if ddi_copyxxx() fails 26445 * ENXIO if fail ddi_get_soft_state 26446 * EIO if invalid mode sense block descriptor length 26447 * 26448 */ 26449 26450 static int 26451 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag) 26452 { 26453 struct sd_lun *un = NULL; 26454 struct mode_header *sense_mhp, *select_mhp; 26455 struct block_descriptor *sense_desc, *select_desc; 26456 int current_bsize; 26457 int rval = EINVAL; 26458 uchar_t *sense = NULL; 26459 uchar_t *select = NULL; 26460 26461 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE)); 26462 26463 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26464 return (ENXIO); 26465 } 26466 26467 /* 26468 * The block length is changed via the Mode Select block descriptor, the 26469 * "Read/Write Error Recovery" mode page (0x1) contents are not actually 26470 * required as part of this routine. Therefore the mode sense allocation 26471 * length is specified to be the length of a mode page header and a 26472 * block descriptor. 26473 */ 26474 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26475 26476 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26477 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD)) != 0) { 26478 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26479 "sr_change_blkmode: Mode Sense Failed\n"); 26480 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26481 return (rval); 26482 } 26483 26484 /* Check the block descriptor len to handle only 1 block descriptor */ 26485 sense_mhp = (struct mode_header *)sense; 26486 if ((sense_mhp->bdesc_length == 0) || 26487 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) { 26488 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26489 "sr_change_blkmode: Mode Sense returned invalid block" 26490 " descriptor length\n"); 26491 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26492 return (EIO); 26493 } 26494 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH); 26495 current_bsize = ((sense_desc->blksize_hi << 16) | 26496 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo); 26497 26498 /* Process command */ 26499 switch (cmd) { 26500 case CDROMGBLKMODE: 26501 /* Return the block size obtained during the mode sense */ 26502 if (ddi_copyout(¤t_bsize, (void *)data, 26503 sizeof (int), flag) != 0) 26504 rval = EFAULT; 26505 break; 26506 case CDROMSBLKMODE: 26507 /* Validate the requested block size */ 26508 switch (data) { 26509 case CDROM_BLK_512: 26510 case CDROM_BLK_1024: 26511 case CDROM_BLK_2048: 26512 case CDROM_BLK_2056: 26513 case CDROM_BLK_2336: 26514 case CDROM_BLK_2340: 26515 case CDROM_BLK_2352: 26516 case CDROM_BLK_2368: 26517 case CDROM_BLK_2448: 26518 case CDROM_BLK_2646: 26519 case CDROM_BLK_2647: 26520 break; 26521 default: 26522 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26523 "sr_change_blkmode: " 26524 "Block Size '%ld' Not Supported\n", data); 26525 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26526 return (EINVAL); 26527 } 26528 26529 /* 26530 * The current block size matches the requested block size so 26531 * there is no need to send the mode select to change the size 26532 */ 26533 if (current_bsize == data) { 26534 break; 26535 } 26536 26537 /* Build the select data for the requested block size */ 26538 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP); 26539 select_mhp = (struct mode_header *)select; 26540 select_desc = 26541 (struct block_descriptor *)(select + MODE_HEADER_LENGTH); 26542 /* 26543 * The LBA size is changed via the block descriptor, so the 26544 * descriptor is built according to the user data 26545 */ 26546 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH; 26547 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16); 26548 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8); 26549 select_desc->blksize_lo = (char)((data) & 0x000000ff); 26550 26551 /* Send the mode select for the requested block size */ 26552 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26553 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26554 SD_PATH_STANDARD)) != 0) { 26555 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26556 "sr_change_blkmode: Mode Select Failed\n"); 26557 /* 26558 * The mode select failed for the requested block size, 26559 * so reset the data for the original block size and 26560 * send it to the target. The error is indicated by the 26561 * return value for the failed mode select. 26562 */ 26563 select_desc->blksize_hi = sense_desc->blksize_hi; 26564 select_desc->blksize_mid = sense_desc->blksize_mid; 26565 select_desc->blksize_lo = sense_desc->blksize_lo; 26566 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, 26567 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE, 26568 SD_PATH_STANDARD); 26569 } else { 26570 ASSERT(!mutex_owned(SD_MUTEX(un))); 26571 mutex_enter(SD_MUTEX(un)); 26572 sd_update_block_info(un, (uint32_t)data, 0); 26573 26574 mutex_exit(SD_MUTEX(un)); 26575 } 26576 break; 26577 default: 26578 /* should not reach here, but check anyway */ 26579 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26580 "sr_change_blkmode: Command '%x' Not Supported\n", cmd); 26581 rval = EINVAL; 26582 break; 26583 } 26584 26585 if (select) { 26586 kmem_free(select, BUFLEN_CHG_BLK_MODE); 26587 } 26588 if (sense) { 26589 kmem_free(sense, BUFLEN_CHG_BLK_MODE); 26590 } 26591 return (rval); 26592 } 26593 26594 26595 /* 26596 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines 26597 * implement driver support for getting and setting the CD speed. The command 26598 * set used will be based on the device type. If the device has not been 26599 * identified as MMC the Toshiba vendor specific mode page will be used. If 26600 * the device is MMC but does not support the Real Time Streaming feature 26601 * the SET CD SPEED command will be used to set speed and mode page 0x2A will 26602 * be used to read the speed. 26603 */ 26604 26605 /* 26606 * Function: sr_change_speed() 26607 * 26608 * Description: This routine is the driver entry point for handling CD-ROM 26609 * drive speed ioctl requests for devices supporting the Toshiba 26610 * vendor specific drive speed mode page. Support for returning 26611 * and changing the current drive speed in use by the device is 26612 * implemented. 26613 * 26614 * Arguments: dev - the device 'dev_t' 26615 * cmd - the request type; one of CDROMGDRVSPEED (get) or 26616 * CDROMSDRVSPEED (set) 26617 * data - current drive speed or requested drive speed 26618 * flag - this argument is a pass through to ddi_copyxxx() directly 26619 * from the mode argument of ioctl(). 26620 * 26621 * Return Code: the code returned by sd_send_scsi_cmd() 26622 * EINVAL if invalid arguments are provided 26623 * EFAULT if ddi_copyxxx() fails 26624 * ENXIO if fail ddi_get_soft_state 26625 * EIO if invalid mode sense block descriptor length 26626 */ 26627 26628 static int 26629 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26630 { 26631 struct sd_lun *un = NULL; 26632 struct mode_header *sense_mhp, *select_mhp; 26633 struct mode_speed *sense_page, *select_page; 26634 int current_speed; 26635 int rval = EINVAL; 26636 int bd_len; 26637 uchar_t *sense = NULL; 26638 uchar_t *select = NULL; 26639 26640 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26641 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26642 return (ENXIO); 26643 } 26644 26645 /* 26646 * Note: The drive speed is being modified here according to a Toshiba 26647 * vendor specific mode page (0x31). 26648 */ 26649 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26650 26651 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 26652 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED, 26653 SD_PATH_STANDARD)) != 0) { 26654 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26655 "sr_change_speed: Mode Sense Failed\n"); 26656 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26657 return (rval); 26658 } 26659 sense_mhp = (struct mode_header *)sense; 26660 26661 /* Check the block descriptor len to handle only 1 block descriptor */ 26662 bd_len = sense_mhp->bdesc_length; 26663 if (bd_len > MODE_BLK_DESC_LENGTH) { 26664 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26665 "sr_change_speed: Mode Sense returned invalid block " 26666 "descriptor length\n"); 26667 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26668 return (EIO); 26669 } 26670 26671 sense_page = (struct mode_speed *) 26672 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 26673 current_speed = sense_page->speed; 26674 26675 /* Process command */ 26676 switch (cmd) { 26677 case CDROMGDRVSPEED: 26678 /* Return the drive speed obtained during the mode sense */ 26679 if (current_speed == 0x2) { 26680 current_speed = CDROM_TWELVE_SPEED; 26681 } 26682 if (ddi_copyout(¤t_speed, (void *)data, 26683 sizeof (int), flag) != 0) { 26684 rval = EFAULT; 26685 } 26686 break; 26687 case CDROMSDRVSPEED: 26688 /* Validate the requested drive speed */ 26689 switch ((uchar_t)data) { 26690 case CDROM_TWELVE_SPEED: 26691 data = 0x2; 26692 /*FALLTHROUGH*/ 26693 case CDROM_NORMAL_SPEED: 26694 case CDROM_DOUBLE_SPEED: 26695 case CDROM_QUAD_SPEED: 26696 case CDROM_MAXIMUM_SPEED: 26697 break; 26698 default: 26699 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26700 "sr_change_speed: " 26701 "Drive Speed '%d' Not Supported\n", (uchar_t)data); 26702 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26703 return (EINVAL); 26704 } 26705 26706 /* 26707 * The current drive speed matches the requested drive speed so 26708 * there is no need to send the mode select to change the speed 26709 */ 26710 if (current_speed == data) { 26711 break; 26712 } 26713 26714 /* Build the select data for the requested drive speed */ 26715 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP); 26716 select_mhp = (struct mode_header *)select; 26717 select_mhp->bdesc_length = 0; 26718 select_page = 26719 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26720 select_page = 26721 (struct mode_speed *)(select + MODE_HEADER_LENGTH); 26722 select_page->mode_page.code = CDROM_MODE_SPEED; 26723 select_page->mode_page.length = 2; 26724 select_page->speed = (uchar_t)data; 26725 26726 /* Send the mode select for the requested block size */ 26727 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26728 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26729 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 26730 /* 26731 * The mode select failed for the requested drive speed, 26732 * so reset the data for the original drive speed and 26733 * send it to the target. The error is indicated by the 26734 * return value for the failed mode select. 26735 */ 26736 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26737 "sr_drive_speed: Mode Select Failed\n"); 26738 select_page->speed = sense_page->speed; 26739 (void) sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 26740 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH, 26741 SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 26742 } 26743 break; 26744 default: 26745 /* should not reach here, but check anyway */ 26746 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26747 "sr_change_speed: Command '%x' Not Supported\n", cmd); 26748 rval = EINVAL; 26749 break; 26750 } 26751 26752 if (select) { 26753 kmem_free(select, BUFLEN_MODE_CDROM_SPEED); 26754 } 26755 if (sense) { 26756 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED); 26757 } 26758 26759 return (rval); 26760 } 26761 26762 26763 /* 26764 * Function: sr_atapi_change_speed() 26765 * 26766 * Description: This routine is the driver entry point for handling CD-ROM 26767 * drive speed ioctl requests for MMC devices that do not support 26768 * the Real Time Streaming feature (0x107). 26769 * 26770 * Note: This routine will use the SET SPEED command which may not 26771 * be supported by all devices. 26772 * 26773 * Arguments: dev- the device 'dev_t' 26774 * cmd- the request type; one of CDROMGDRVSPEED (get) or 26775 * CDROMSDRVSPEED (set) 26776 * data- current drive speed or requested drive speed 26777 * flag- this argument is a pass through to ddi_copyxxx() directly 26778 * from the mode argument of ioctl(). 26779 * 26780 * Return Code: the code returned by sd_send_scsi_cmd() 26781 * EINVAL if invalid arguments are provided 26782 * EFAULT if ddi_copyxxx() fails 26783 * ENXIO if fail ddi_get_soft_state 26784 * EIO if invalid mode sense block descriptor length 26785 */ 26786 26787 static int 26788 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag) 26789 { 26790 struct sd_lun *un; 26791 struct uscsi_cmd *com = NULL; 26792 struct mode_header_grp2 *sense_mhp; 26793 uchar_t *sense_page; 26794 uchar_t *sense = NULL; 26795 char cdb[CDB_GROUP5]; 26796 int bd_len; 26797 int current_speed = 0; 26798 int max_speed = 0; 26799 int rval; 26800 26801 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED)); 26802 26803 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26804 return (ENXIO); 26805 } 26806 26807 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP); 26808 26809 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 26810 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, 26811 SD_PATH_STANDARD)) != 0) { 26812 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26813 "sr_atapi_change_speed: Mode Sense Failed\n"); 26814 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26815 return (rval); 26816 } 26817 26818 /* Check the block descriptor len to handle only 1 block descriptor */ 26819 sense_mhp = (struct mode_header_grp2 *)sense; 26820 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo; 26821 if (bd_len > MODE_BLK_DESC_LENGTH) { 26822 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26823 "sr_atapi_change_speed: Mode Sense returned invalid " 26824 "block descriptor length\n"); 26825 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26826 return (EIO); 26827 } 26828 26829 /* Calculate the current and maximum drive speeds */ 26830 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 26831 current_speed = (sense_page[14] << 8) | sense_page[15]; 26832 max_speed = (sense_page[8] << 8) | sense_page[9]; 26833 26834 /* Process the command */ 26835 switch (cmd) { 26836 case CDROMGDRVSPEED: 26837 current_speed /= SD_SPEED_1X; 26838 if (ddi_copyout(¤t_speed, (void *)data, 26839 sizeof (int), flag) != 0) 26840 rval = EFAULT; 26841 break; 26842 case CDROMSDRVSPEED: 26843 /* Convert the speed code to KB/sec */ 26844 switch ((uchar_t)data) { 26845 case CDROM_NORMAL_SPEED: 26846 current_speed = SD_SPEED_1X; 26847 break; 26848 case CDROM_DOUBLE_SPEED: 26849 current_speed = 2 * SD_SPEED_1X; 26850 break; 26851 case CDROM_QUAD_SPEED: 26852 current_speed = 4 * SD_SPEED_1X; 26853 break; 26854 case CDROM_TWELVE_SPEED: 26855 current_speed = 12 * SD_SPEED_1X; 26856 break; 26857 case CDROM_MAXIMUM_SPEED: 26858 current_speed = 0xffff; 26859 break; 26860 default: 26861 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26862 "sr_atapi_change_speed: invalid drive speed %d\n", 26863 (uchar_t)data); 26864 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26865 return (EINVAL); 26866 } 26867 26868 /* Check the request against the drive's max speed. */ 26869 if (current_speed != 0xffff) { 26870 if (current_speed > max_speed) { 26871 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26872 return (EINVAL); 26873 } 26874 } 26875 26876 /* 26877 * Build and send the SET SPEED command 26878 * 26879 * Note: The SET SPEED (0xBB) command used in this routine is 26880 * obsolete per the SCSI MMC spec but still supported in the 26881 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 26882 * therefore the command is still implemented in this routine. 26883 */ 26884 bzero(cdb, sizeof (cdb)); 26885 cdb[0] = (char)SCMD_SET_CDROM_SPEED; 26886 cdb[2] = (uchar_t)(current_speed >> 8); 26887 cdb[3] = (uchar_t)current_speed; 26888 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26889 com->uscsi_cdb = (caddr_t)cdb; 26890 com->uscsi_cdblen = CDB_GROUP5; 26891 com->uscsi_bufaddr = NULL; 26892 com->uscsi_buflen = 0; 26893 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26894 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, 0, 26895 UIO_SYSSPACE, SD_PATH_STANDARD); 26896 break; 26897 default: 26898 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 26899 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd); 26900 rval = EINVAL; 26901 } 26902 26903 if (sense) { 26904 kmem_free(sense, BUFLEN_MODE_CDROM_CAP); 26905 } 26906 if (com) { 26907 kmem_free(com, sizeof (*com)); 26908 } 26909 return (rval); 26910 } 26911 26912 26913 /* 26914 * Function: sr_pause_resume() 26915 * 26916 * Description: This routine is the driver entry point for handling CD-ROM 26917 * pause/resume ioctl requests. This only affects the audio play 26918 * operation. 26919 * 26920 * Arguments: dev - the device 'dev_t' 26921 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used 26922 * for setting the resume bit of the cdb. 26923 * 26924 * Return Code: the code returned by sd_send_scsi_cmd() 26925 * EINVAL if invalid mode specified 26926 * 26927 */ 26928 26929 static int 26930 sr_pause_resume(dev_t dev, int cmd) 26931 { 26932 struct sd_lun *un; 26933 struct uscsi_cmd *com; 26934 char cdb[CDB_GROUP1]; 26935 int rval; 26936 26937 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 26938 return (ENXIO); 26939 } 26940 26941 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 26942 bzero(cdb, CDB_GROUP1); 26943 cdb[0] = SCMD_PAUSE_RESUME; 26944 switch (cmd) { 26945 case CDROMRESUME: 26946 cdb[8] = 1; 26947 break; 26948 case CDROMPAUSE: 26949 cdb[8] = 0; 26950 break; 26951 default: 26952 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:" 26953 " Command '%x' Not Supported\n", cmd); 26954 rval = EINVAL; 26955 goto done; 26956 } 26957 26958 com->uscsi_cdb = cdb; 26959 com->uscsi_cdblen = CDB_GROUP1; 26960 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 26961 26962 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 26963 UIO_SYSSPACE, SD_PATH_STANDARD); 26964 26965 done: 26966 kmem_free(com, sizeof (*com)); 26967 return (rval); 26968 } 26969 26970 26971 /* 26972 * Function: sr_play_msf() 26973 * 26974 * Description: This routine is the driver entry point for handling CD-ROM 26975 * ioctl requests to output the audio signals at the specified 26976 * starting address and continue the audio play until the specified 26977 * ending address (CDROMPLAYMSF) The address is in Minute Second 26978 * Frame (MSF) format. 26979 * 26980 * Arguments: dev - the device 'dev_t' 26981 * data - pointer to user provided audio msf structure, 26982 * specifying start/end addresses. 26983 * flag - this argument is a pass through to ddi_copyxxx() 26984 * directly from the mode argument of ioctl(). 26985 * 26986 * Return Code: the code returned by sd_send_scsi_cmd() 26987 * EFAULT if ddi_copyxxx() fails 26988 * ENXIO if fail ddi_get_soft_state 26989 * EINVAL if data pointer is NULL 26990 */ 26991 26992 static int 26993 sr_play_msf(dev_t dev, caddr_t data, int flag) 26994 { 26995 struct sd_lun *un; 26996 struct uscsi_cmd *com; 26997 struct cdrom_msf msf_struct; 26998 struct cdrom_msf *msf = &msf_struct; 26999 char cdb[CDB_GROUP1]; 27000 int rval; 27001 27002 if (data == NULL) { 27003 return (EINVAL); 27004 } 27005 27006 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27007 return (ENXIO); 27008 } 27009 27010 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) { 27011 return (EFAULT); 27012 } 27013 27014 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27015 bzero(cdb, CDB_GROUP1); 27016 cdb[0] = SCMD_PLAYAUDIO_MSF; 27017 if (un->un_f_cfg_playmsf_bcd == TRUE) { 27018 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0); 27019 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0); 27020 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0); 27021 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1); 27022 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1); 27023 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1); 27024 } else { 27025 cdb[3] = msf->cdmsf_min0; 27026 cdb[4] = msf->cdmsf_sec0; 27027 cdb[5] = msf->cdmsf_frame0; 27028 cdb[6] = msf->cdmsf_min1; 27029 cdb[7] = msf->cdmsf_sec1; 27030 cdb[8] = msf->cdmsf_frame1; 27031 } 27032 com->uscsi_cdb = cdb; 27033 com->uscsi_cdblen = CDB_GROUP1; 27034 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27035 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27036 UIO_SYSSPACE, SD_PATH_STANDARD); 27037 kmem_free(com, sizeof (*com)); 27038 return (rval); 27039 } 27040 27041 27042 /* 27043 * Function: sr_play_trkind() 27044 * 27045 * Description: This routine is the driver entry point for handling CD-ROM 27046 * ioctl requests to output the audio signals at the specified 27047 * starting address and continue the audio play until the specified 27048 * ending address (CDROMPLAYTRKIND). The address is in Track Index 27049 * format. 27050 * 27051 * Arguments: dev - the device 'dev_t' 27052 * data - pointer to user provided audio track/index structure, 27053 * specifying start/end addresses. 27054 * flag - this argument is a pass through to ddi_copyxxx() 27055 * directly from the mode argument of ioctl(). 27056 * 27057 * Return Code: the code returned by sd_send_scsi_cmd() 27058 * EFAULT if ddi_copyxxx() fails 27059 * ENXIO if fail ddi_get_soft_state 27060 * EINVAL if data pointer is NULL 27061 */ 27062 27063 static int 27064 sr_play_trkind(dev_t dev, caddr_t data, int flag) 27065 { 27066 struct cdrom_ti ti_struct; 27067 struct cdrom_ti *ti = &ti_struct; 27068 struct uscsi_cmd *com = NULL; 27069 char cdb[CDB_GROUP1]; 27070 int rval; 27071 27072 if (data == NULL) { 27073 return (EINVAL); 27074 } 27075 27076 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) { 27077 return (EFAULT); 27078 } 27079 27080 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27081 bzero(cdb, CDB_GROUP1); 27082 cdb[0] = SCMD_PLAYAUDIO_TI; 27083 cdb[4] = ti->cdti_trk0; 27084 cdb[5] = ti->cdti_ind0; 27085 cdb[7] = ti->cdti_trk1; 27086 cdb[8] = ti->cdti_ind1; 27087 com->uscsi_cdb = cdb; 27088 com->uscsi_cdblen = CDB_GROUP1; 27089 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT; 27090 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27091 UIO_SYSSPACE, SD_PATH_STANDARD); 27092 kmem_free(com, sizeof (*com)); 27093 return (rval); 27094 } 27095 27096 27097 /* 27098 * Function: sr_read_all_subcodes() 27099 * 27100 * Description: This routine is the driver entry point for handling CD-ROM 27101 * ioctl requests to return raw subcode data while the target is 27102 * playing audio (CDROMSUBCODE). 27103 * 27104 * Arguments: dev - the device 'dev_t' 27105 * data - pointer to user provided cdrom subcode structure, 27106 * specifying the transfer length and address. 27107 * flag - this argument is a pass through to ddi_copyxxx() 27108 * directly from the mode argument of ioctl(). 27109 * 27110 * Return Code: the code returned by sd_send_scsi_cmd() 27111 * EFAULT if ddi_copyxxx() fails 27112 * ENXIO if fail ddi_get_soft_state 27113 * EINVAL if data pointer is NULL 27114 */ 27115 27116 static int 27117 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag) 27118 { 27119 struct sd_lun *un = NULL; 27120 struct uscsi_cmd *com = NULL; 27121 struct cdrom_subcode *subcode = NULL; 27122 int rval; 27123 size_t buflen; 27124 char cdb[CDB_GROUP5]; 27125 27126 #ifdef _MULTI_DATAMODEL 27127 /* To support ILP32 applications in an LP64 world */ 27128 struct cdrom_subcode32 cdrom_subcode32; 27129 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32; 27130 #endif 27131 if (data == NULL) { 27132 return (EINVAL); 27133 } 27134 27135 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 27136 return (ENXIO); 27137 } 27138 27139 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP); 27140 27141 #ifdef _MULTI_DATAMODEL 27142 switch (ddi_model_convert_from(flag & FMODELS)) { 27143 case DDI_MODEL_ILP32: 27144 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) { 27145 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27146 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27147 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27148 return (EFAULT); 27149 } 27150 /* Convert the ILP32 uscsi data from the application to LP64 */ 27151 cdrom_subcode32tocdrom_subcode(cdsc32, subcode); 27152 break; 27153 case DDI_MODEL_NONE: 27154 if (ddi_copyin(data, subcode, 27155 sizeof (struct cdrom_subcode), flag)) { 27156 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27157 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27158 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27159 return (EFAULT); 27160 } 27161 break; 27162 } 27163 #else /* ! _MULTI_DATAMODEL */ 27164 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) { 27165 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27166 "sr_read_all_subcodes: ddi_copyin Failed\n"); 27167 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27168 return (EFAULT); 27169 } 27170 #endif /* _MULTI_DATAMODEL */ 27171 27172 /* 27173 * Since MMC-2 expects max 3 bytes for length, check if the 27174 * length input is greater than 3 bytes 27175 */ 27176 if ((subcode->cdsc_length & 0xFF000000) != 0) { 27177 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27178 "sr_read_all_subcodes: " 27179 "cdrom transfer length too large: %d (limit %d)\n", 27180 subcode->cdsc_length, 0xFFFFFF); 27181 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27182 return (EINVAL); 27183 } 27184 27185 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length; 27186 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27187 bzero(cdb, CDB_GROUP5); 27188 27189 if (un->un_f_mmc_cap == TRUE) { 27190 cdb[0] = (char)SCMD_READ_CD; 27191 cdb[2] = (char)0xff; 27192 cdb[3] = (char)0xff; 27193 cdb[4] = (char)0xff; 27194 cdb[5] = (char)0xff; 27195 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27196 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27197 cdb[8] = ((subcode->cdsc_length) & 0x000000ff); 27198 cdb[10] = 1; 27199 } else { 27200 /* 27201 * Note: A vendor specific command (0xDF) is being used her to 27202 * request a read of all subcodes. 27203 */ 27204 cdb[0] = (char)SCMD_READ_ALL_SUBCODES; 27205 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24); 27206 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16); 27207 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8); 27208 cdb[9] = ((subcode->cdsc_length) & 0x000000ff); 27209 } 27210 com->uscsi_cdb = cdb; 27211 com->uscsi_cdblen = CDB_GROUP5; 27212 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr; 27213 com->uscsi_buflen = buflen; 27214 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27215 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27216 UIO_SYSSPACE, SD_PATH_STANDARD); 27217 kmem_free(subcode, sizeof (struct cdrom_subcode)); 27218 kmem_free(com, sizeof (*com)); 27219 return (rval); 27220 } 27221 27222 27223 /* 27224 * Function: sr_read_subchannel() 27225 * 27226 * Description: This routine is the driver entry point for handling CD-ROM 27227 * ioctl requests to return the Q sub-channel data of the CD 27228 * current position block. (CDROMSUBCHNL) The data includes the 27229 * track number, index number, absolute CD-ROM address (LBA or MSF 27230 * format per the user) , track relative CD-ROM address (LBA or MSF 27231 * format per the user), control data and audio status. 27232 * 27233 * Arguments: dev - the device 'dev_t' 27234 * data - pointer to user provided cdrom sub-channel structure 27235 * flag - this argument is a pass through to ddi_copyxxx() 27236 * directly from the mode argument of ioctl(). 27237 * 27238 * Return Code: the code returned by sd_send_scsi_cmd() 27239 * EFAULT if ddi_copyxxx() fails 27240 * ENXIO if fail ddi_get_soft_state 27241 * EINVAL if data pointer is NULL 27242 */ 27243 27244 static int 27245 sr_read_subchannel(dev_t dev, caddr_t data, int flag) 27246 { 27247 struct sd_lun *un; 27248 struct uscsi_cmd *com; 27249 struct cdrom_subchnl subchanel; 27250 struct cdrom_subchnl *subchnl = &subchanel; 27251 char cdb[CDB_GROUP1]; 27252 caddr_t buffer; 27253 int rval; 27254 27255 if (data == NULL) { 27256 return (EINVAL); 27257 } 27258 27259 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27260 (un->un_state == SD_STATE_OFFLINE)) { 27261 return (ENXIO); 27262 } 27263 27264 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) { 27265 return (EFAULT); 27266 } 27267 27268 buffer = kmem_zalloc((size_t)16, KM_SLEEP); 27269 bzero(cdb, CDB_GROUP1); 27270 cdb[0] = SCMD_READ_SUBCHANNEL; 27271 /* Set the MSF bit based on the user requested address format */ 27272 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02; 27273 /* 27274 * Set the Q bit in byte 2 to indicate that Q sub-channel data be 27275 * returned 27276 */ 27277 cdb[2] = 0x40; 27278 /* 27279 * Set byte 3 to specify the return data format. A value of 0x01 27280 * indicates that the CD-ROM current position should be returned. 27281 */ 27282 cdb[3] = 0x01; 27283 cdb[8] = 0x10; 27284 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27285 com->uscsi_cdb = cdb; 27286 com->uscsi_cdblen = CDB_GROUP1; 27287 com->uscsi_bufaddr = buffer; 27288 com->uscsi_buflen = 16; 27289 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27290 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27291 UIO_SYSSPACE, SD_PATH_STANDARD); 27292 if (rval != 0) { 27293 kmem_free(buffer, 16); 27294 kmem_free(com, sizeof (*com)); 27295 return (rval); 27296 } 27297 27298 /* Process the returned Q sub-channel data */ 27299 subchnl->cdsc_audiostatus = buffer[1]; 27300 subchnl->cdsc_adr = (buffer[5] & 0xF0); 27301 subchnl->cdsc_ctrl = (buffer[5] & 0x0F); 27302 subchnl->cdsc_trk = buffer[6]; 27303 subchnl->cdsc_ind = buffer[7]; 27304 if (subchnl->cdsc_format & CDROM_LBA) { 27305 subchnl->cdsc_absaddr.lba = 27306 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27307 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27308 subchnl->cdsc_reladdr.lba = 27309 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) + 27310 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]); 27311 } else if (un->un_f_cfg_readsub_bcd == TRUE) { 27312 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]); 27313 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]); 27314 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]); 27315 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]); 27316 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]); 27317 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]); 27318 } else { 27319 subchnl->cdsc_absaddr.msf.minute = buffer[9]; 27320 subchnl->cdsc_absaddr.msf.second = buffer[10]; 27321 subchnl->cdsc_absaddr.msf.frame = buffer[11]; 27322 subchnl->cdsc_reladdr.msf.minute = buffer[13]; 27323 subchnl->cdsc_reladdr.msf.second = buffer[14]; 27324 subchnl->cdsc_reladdr.msf.frame = buffer[15]; 27325 } 27326 kmem_free(buffer, 16); 27327 kmem_free(com, sizeof (*com)); 27328 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag) 27329 != 0) { 27330 return (EFAULT); 27331 } 27332 return (rval); 27333 } 27334 27335 27336 /* 27337 * Function: sr_read_tocentry() 27338 * 27339 * Description: This routine is the driver entry point for handling CD-ROM 27340 * ioctl requests to read from the Table of Contents (TOC) 27341 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL 27342 * fields, the starting address (LBA or MSF format per the user) 27343 * and the data mode if the user specified track is a data track. 27344 * 27345 * Note: The READ HEADER (0x44) command used in this routine is 27346 * obsolete per the SCSI MMC spec but still supported in the 27347 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI 27348 * therefore the command is still implemented in this routine. 27349 * 27350 * Arguments: dev - the device 'dev_t' 27351 * data - pointer to user provided toc entry structure, 27352 * specifying the track # and the address format 27353 * (LBA or MSF). 27354 * flag - this argument is a pass through to ddi_copyxxx() 27355 * directly from the mode argument of ioctl(). 27356 * 27357 * Return Code: the code returned by sd_send_scsi_cmd() 27358 * EFAULT if ddi_copyxxx() fails 27359 * ENXIO if fail ddi_get_soft_state 27360 * EINVAL if data pointer is NULL 27361 */ 27362 27363 static int 27364 sr_read_tocentry(dev_t dev, caddr_t data, int flag) 27365 { 27366 struct sd_lun *un = NULL; 27367 struct uscsi_cmd *com; 27368 struct cdrom_tocentry toc_entry; 27369 struct cdrom_tocentry *entry = &toc_entry; 27370 caddr_t buffer; 27371 int rval; 27372 char cdb[CDB_GROUP1]; 27373 27374 if (data == NULL) { 27375 return (EINVAL); 27376 } 27377 27378 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27379 (un->un_state == SD_STATE_OFFLINE)) { 27380 return (ENXIO); 27381 } 27382 27383 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) { 27384 return (EFAULT); 27385 } 27386 27387 /* Validate the requested track and address format */ 27388 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) { 27389 return (EINVAL); 27390 } 27391 27392 if (entry->cdte_track == 0) { 27393 return (EINVAL); 27394 } 27395 27396 buffer = kmem_zalloc((size_t)12, KM_SLEEP); 27397 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27398 bzero(cdb, CDB_GROUP1); 27399 27400 cdb[0] = SCMD_READ_TOC; 27401 /* Set the MSF bit based on the user requested address format */ 27402 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2); 27403 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27404 cdb[6] = BYTE_TO_BCD(entry->cdte_track); 27405 } else { 27406 cdb[6] = entry->cdte_track; 27407 } 27408 27409 /* 27410 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 27411 * (4 byte TOC response header + 8 byte track descriptor) 27412 */ 27413 cdb[8] = 12; 27414 com->uscsi_cdb = cdb; 27415 com->uscsi_cdblen = CDB_GROUP1; 27416 com->uscsi_bufaddr = buffer; 27417 com->uscsi_buflen = 0x0C; 27418 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ); 27419 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27420 UIO_SYSSPACE, SD_PATH_STANDARD); 27421 if (rval != 0) { 27422 kmem_free(buffer, 12); 27423 kmem_free(com, sizeof (*com)); 27424 return (rval); 27425 } 27426 27427 /* Process the toc entry */ 27428 entry->cdte_adr = (buffer[5] & 0xF0) >> 4; 27429 entry->cdte_ctrl = (buffer[5] & 0x0F); 27430 if (entry->cdte_format & CDROM_LBA) { 27431 entry->cdte_addr.lba = 27432 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 27433 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 27434 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) { 27435 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]); 27436 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]); 27437 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]); 27438 /* 27439 * Send a READ TOC command using the LBA address format to get 27440 * the LBA for the track requested so it can be used in the 27441 * READ HEADER request 27442 * 27443 * Note: The MSF bit of the READ HEADER command specifies the 27444 * output format. The block address specified in that command 27445 * must be in LBA format. 27446 */ 27447 cdb[1] = 0; 27448 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27449 UIO_SYSSPACE, SD_PATH_STANDARD); 27450 if (rval != 0) { 27451 kmem_free(buffer, 12); 27452 kmem_free(com, sizeof (*com)); 27453 return (rval); 27454 } 27455 } else { 27456 entry->cdte_addr.msf.minute = buffer[9]; 27457 entry->cdte_addr.msf.second = buffer[10]; 27458 entry->cdte_addr.msf.frame = buffer[11]; 27459 /* 27460 * Send a READ TOC command using the LBA address format to get 27461 * the LBA for the track requested so it can be used in the 27462 * READ HEADER request 27463 * 27464 * Note: The MSF bit of the READ HEADER command specifies the 27465 * output format. The block address specified in that command 27466 * must be in LBA format. 27467 */ 27468 cdb[1] = 0; 27469 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27470 UIO_SYSSPACE, SD_PATH_STANDARD); 27471 if (rval != 0) { 27472 kmem_free(buffer, 12); 27473 kmem_free(com, sizeof (*com)); 27474 return (rval); 27475 } 27476 } 27477 27478 /* 27479 * Build and send the READ HEADER command to determine the data mode of 27480 * the user specified track. 27481 */ 27482 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) && 27483 (entry->cdte_track != CDROM_LEADOUT)) { 27484 bzero(cdb, CDB_GROUP1); 27485 cdb[0] = SCMD_READ_HEADER; 27486 cdb[2] = buffer[8]; 27487 cdb[3] = buffer[9]; 27488 cdb[4] = buffer[10]; 27489 cdb[5] = buffer[11]; 27490 cdb[8] = 0x08; 27491 com->uscsi_buflen = 0x08; 27492 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27493 UIO_SYSSPACE, SD_PATH_STANDARD); 27494 if (rval == 0) { 27495 entry->cdte_datamode = buffer[0]; 27496 } else { 27497 /* 27498 * READ HEADER command failed, since this is 27499 * obsoleted in one spec, its better to return 27500 * -1 for an invlid track so that we can still 27501 * recieve the rest of the TOC data. 27502 */ 27503 entry->cdte_datamode = (uchar_t)-1; 27504 } 27505 } else { 27506 entry->cdte_datamode = (uchar_t)-1; 27507 } 27508 27509 kmem_free(buffer, 12); 27510 kmem_free(com, sizeof (*com)); 27511 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0) 27512 return (EFAULT); 27513 27514 return (rval); 27515 } 27516 27517 27518 /* 27519 * Function: sr_read_tochdr() 27520 * 27521 * Description: This routine is the driver entry point for handling CD-ROM 27522 * ioctl requests to read the Table of Contents (TOC) header 27523 * (CDROMREADTOHDR). The TOC header consists of the disk starting 27524 * and ending track numbers 27525 * 27526 * Arguments: dev - the device 'dev_t' 27527 * data - pointer to user provided toc header structure, 27528 * specifying the starting and ending track numbers. 27529 * flag - this argument is a pass through to ddi_copyxxx() 27530 * directly from the mode argument of ioctl(). 27531 * 27532 * Return Code: the code returned by sd_send_scsi_cmd() 27533 * EFAULT if ddi_copyxxx() fails 27534 * ENXIO if fail ddi_get_soft_state 27535 * EINVAL if data pointer is NULL 27536 */ 27537 27538 static int 27539 sr_read_tochdr(dev_t dev, caddr_t data, int flag) 27540 { 27541 struct sd_lun *un; 27542 struct uscsi_cmd *com; 27543 struct cdrom_tochdr toc_header; 27544 struct cdrom_tochdr *hdr = &toc_header; 27545 char cdb[CDB_GROUP1]; 27546 int rval; 27547 caddr_t buffer; 27548 27549 if (data == NULL) { 27550 return (EINVAL); 27551 } 27552 27553 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27554 (un->un_state == SD_STATE_OFFLINE)) { 27555 return (ENXIO); 27556 } 27557 27558 buffer = kmem_zalloc(4, KM_SLEEP); 27559 bzero(cdb, CDB_GROUP1); 27560 cdb[0] = SCMD_READ_TOC; 27561 /* 27562 * Specifying a track number of 0x00 in the READ TOC command indicates 27563 * that the TOC header should be returned 27564 */ 27565 cdb[6] = 0x00; 27566 /* 27567 * Bytes 7 & 8 are the 4 byte allocation length for TOC header. 27568 * (2 byte data len + 1 byte starting track # + 1 byte ending track #) 27569 */ 27570 cdb[8] = 0x04; 27571 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27572 com->uscsi_cdb = cdb; 27573 com->uscsi_cdblen = CDB_GROUP1; 27574 com->uscsi_bufaddr = buffer; 27575 com->uscsi_buflen = 0x04; 27576 com->uscsi_timeout = 300; 27577 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27578 27579 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 27580 UIO_SYSSPACE, SD_PATH_STANDARD); 27581 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) { 27582 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]); 27583 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]); 27584 } else { 27585 hdr->cdth_trk0 = buffer[2]; 27586 hdr->cdth_trk1 = buffer[3]; 27587 } 27588 kmem_free(buffer, 4); 27589 kmem_free(com, sizeof (*com)); 27590 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) { 27591 return (EFAULT); 27592 } 27593 return (rval); 27594 } 27595 27596 27597 /* 27598 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(), 27599 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for 27600 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data, 27601 * digital audio and extended architecture digital audio. These modes are 27602 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3 27603 * MMC specs. 27604 * 27605 * In addition to support for the various data formats these routines also 27606 * include support for devices that implement only the direct access READ 27607 * commands (0x08, 0x28), devices that implement the READ_CD commands 27608 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and 27609 * READ CDXA commands (0xD8, 0xDB) 27610 */ 27611 27612 /* 27613 * Function: sr_read_mode1() 27614 * 27615 * Description: This routine is the driver entry point for handling CD-ROM 27616 * ioctl read mode1 requests (CDROMREADMODE1). 27617 * 27618 * Arguments: dev - the device 'dev_t' 27619 * data - pointer to user provided cd read structure specifying 27620 * the lba buffer address and length. 27621 * flag - this argument is a pass through to ddi_copyxxx() 27622 * directly from the mode argument of ioctl(). 27623 * 27624 * Return Code: the code returned by sd_send_scsi_cmd() 27625 * EFAULT if ddi_copyxxx() fails 27626 * ENXIO if fail ddi_get_soft_state 27627 * EINVAL if data pointer is NULL 27628 */ 27629 27630 static int 27631 sr_read_mode1(dev_t dev, caddr_t data, int flag) 27632 { 27633 struct sd_lun *un; 27634 struct cdrom_read mode1_struct; 27635 struct cdrom_read *mode1 = &mode1_struct; 27636 int rval; 27637 #ifdef _MULTI_DATAMODEL 27638 /* To support ILP32 applications in an LP64 world */ 27639 struct cdrom_read32 cdrom_read32; 27640 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27641 #endif /* _MULTI_DATAMODEL */ 27642 27643 if (data == NULL) { 27644 return (EINVAL); 27645 } 27646 27647 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27648 (un->un_state == SD_STATE_OFFLINE)) { 27649 return (ENXIO); 27650 } 27651 27652 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27653 "sd_read_mode1: entry: un:0x%p\n", un); 27654 27655 #ifdef _MULTI_DATAMODEL 27656 switch (ddi_model_convert_from(flag & FMODELS)) { 27657 case DDI_MODEL_ILP32: 27658 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27659 return (EFAULT); 27660 } 27661 /* Convert the ILP32 uscsi data from the application to LP64 */ 27662 cdrom_read32tocdrom_read(cdrd32, mode1); 27663 break; 27664 case DDI_MODEL_NONE: 27665 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27666 return (EFAULT); 27667 } 27668 } 27669 #else /* ! _MULTI_DATAMODEL */ 27670 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) { 27671 return (EFAULT); 27672 } 27673 #endif /* _MULTI_DATAMODEL */ 27674 27675 rval = sd_send_scsi_READ(un, mode1->cdread_bufaddr, 27676 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD); 27677 27678 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27679 "sd_read_mode1: exit: un:0x%p\n", un); 27680 27681 return (rval); 27682 } 27683 27684 27685 /* 27686 * Function: sr_read_cd_mode2() 27687 * 27688 * Description: This routine is the driver entry point for handling CD-ROM 27689 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27690 * support the READ CD (0xBE) command or the 1st generation 27691 * READ CD (0xD4) command. 27692 * 27693 * Arguments: dev - the device 'dev_t' 27694 * data - pointer to user provided cd read structure specifying 27695 * the lba buffer address and length. 27696 * flag - this argument is a pass through to ddi_copyxxx() 27697 * directly from the mode argument of ioctl(). 27698 * 27699 * Return Code: the code returned by sd_send_scsi_cmd() 27700 * EFAULT if ddi_copyxxx() fails 27701 * ENXIO if fail ddi_get_soft_state 27702 * EINVAL if data pointer is NULL 27703 */ 27704 27705 static int 27706 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag) 27707 { 27708 struct sd_lun *un; 27709 struct uscsi_cmd *com; 27710 struct cdrom_read mode2_struct; 27711 struct cdrom_read *mode2 = &mode2_struct; 27712 uchar_t cdb[CDB_GROUP5]; 27713 int nblocks; 27714 int rval; 27715 #ifdef _MULTI_DATAMODEL 27716 /* To support ILP32 applications in an LP64 world */ 27717 struct cdrom_read32 cdrom_read32; 27718 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27719 #endif /* _MULTI_DATAMODEL */ 27720 27721 if (data == NULL) { 27722 return (EINVAL); 27723 } 27724 27725 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27726 (un->un_state == SD_STATE_OFFLINE)) { 27727 return (ENXIO); 27728 } 27729 27730 #ifdef _MULTI_DATAMODEL 27731 switch (ddi_model_convert_from(flag & FMODELS)) { 27732 case DDI_MODEL_ILP32: 27733 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27734 return (EFAULT); 27735 } 27736 /* Convert the ILP32 uscsi data from the application to LP64 */ 27737 cdrom_read32tocdrom_read(cdrd32, mode2); 27738 break; 27739 case DDI_MODEL_NONE: 27740 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27741 return (EFAULT); 27742 } 27743 break; 27744 } 27745 27746 #else /* ! _MULTI_DATAMODEL */ 27747 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27748 return (EFAULT); 27749 } 27750 #endif /* _MULTI_DATAMODEL */ 27751 27752 bzero(cdb, sizeof (cdb)); 27753 if (un->un_f_cfg_read_cd_xd4 == TRUE) { 27754 /* Read command supported by 1st generation atapi drives */ 27755 cdb[0] = SCMD_READ_CDD4; 27756 } else { 27757 /* Universal CD Access Command */ 27758 cdb[0] = SCMD_READ_CD; 27759 } 27760 27761 /* 27762 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book 27763 */ 27764 cdb[1] = CDROM_SECTOR_TYPE_MODE2; 27765 27766 /* set the start address */ 27767 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF); 27768 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF); 27769 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27770 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF); 27771 27772 /* set the transfer length */ 27773 nblocks = mode2->cdread_buflen / 2336; 27774 cdb[6] = (uchar_t)(nblocks >> 16); 27775 cdb[7] = (uchar_t)(nblocks >> 8); 27776 cdb[8] = (uchar_t)nblocks; 27777 27778 /* set the filter bits */ 27779 cdb[9] = CDROM_READ_CD_USERDATA; 27780 27781 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27782 com->uscsi_cdb = (caddr_t)cdb; 27783 com->uscsi_cdblen = sizeof (cdb); 27784 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27785 com->uscsi_buflen = mode2->cdread_buflen; 27786 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27787 27788 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27789 UIO_SYSSPACE, SD_PATH_STANDARD); 27790 kmem_free(com, sizeof (*com)); 27791 return (rval); 27792 } 27793 27794 27795 /* 27796 * Function: sr_read_mode2() 27797 * 27798 * Description: This routine is the driver entry point for handling CD-ROM 27799 * ioctl read mode2 requests (CDROMREADMODE2) for devices that 27800 * do not support the READ CD (0xBE) command. 27801 * 27802 * Arguments: dev - the device 'dev_t' 27803 * data - pointer to user provided cd read structure specifying 27804 * the lba buffer address and length. 27805 * flag - this argument is a pass through to ddi_copyxxx() 27806 * directly from the mode argument of ioctl(). 27807 * 27808 * Return Code: the code returned by sd_send_scsi_cmd() 27809 * EFAULT if ddi_copyxxx() fails 27810 * ENXIO if fail ddi_get_soft_state 27811 * EINVAL if data pointer is NULL 27812 * EIO if fail to reset block size 27813 * EAGAIN if commands are in progress in the driver 27814 */ 27815 27816 static int 27817 sr_read_mode2(dev_t dev, caddr_t data, int flag) 27818 { 27819 struct sd_lun *un; 27820 struct cdrom_read mode2_struct; 27821 struct cdrom_read *mode2 = &mode2_struct; 27822 int rval; 27823 uint32_t restore_blksize; 27824 struct uscsi_cmd *com; 27825 uchar_t cdb[CDB_GROUP0]; 27826 int nblocks; 27827 27828 #ifdef _MULTI_DATAMODEL 27829 /* To support ILP32 applications in an LP64 world */ 27830 struct cdrom_read32 cdrom_read32; 27831 struct cdrom_read32 *cdrd32 = &cdrom_read32; 27832 #endif /* _MULTI_DATAMODEL */ 27833 27834 if (data == NULL) { 27835 return (EINVAL); 27836 } 27837 27838 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27839 (un->un_state == SD_STATE_OFFLINE)) { 27840 return (ENXIO); 27841 } 27842 27843 /* 27844 * Because this routine will update the device and driver block size 27845 * being used we want to make sure there are no commands in progress. 27846 * If commands are in progress the user will have to try again. 27847 * 27848 * We check for 1 instead of 0 because we increment un_ncmds_in_driver 27849 * in sdioctl to protect commands from sdioctl through to the top of 27850 * sd_uscsi_strategy. See sdioctl for details. 27851 */ 27852 mutex_enter(SD_MUTEX(un)); 27853 if (un->un_ncmds_in_driver != 1) { 27854 mutex_exit(SD_MUTEX(un)); 27855 return (EAGAIN); 27856 } 27857 mutex_exit(SD_MUTEX(un)); 27858 27859 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27860 "sd_read_mode2: entry: un:0x%p\n", un); 27861 27862 #ifdef _MULTI_DATAMODEL 27863 switch (ddi_model_convert_from(flag & FMODELS)) { 27864 case DDI_MODEL_ILP32: 27865 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) { 27866 return (EFAULT); 27867 } 27868 /* Convert the ILP32 uscsi data from the application to LP64 */ 27869 cdrom_read32tocdrom_read(cdrd32, mode2); 27870 break; 27871 case DDI_MODEL_NONE: 27872 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) { 27873 return (EFAULT); 27874 } 27875 break; 27876 } 27877 #else /* ! _MULTI_DATAMODEL */ 27878 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) { 27879 return (EFAULT); 27880 } 27881 #endif /* _MULTI_DATAMODEL */ 27882 27883 /* Store the current target block size for restoration later */ 27884 restore_blksize = un->un_tgt_blocksize; 27885 27886 /* Change the device and soft state target block size to 2336 */ 27887 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) { 27888 rval = EIO; 27889 goto done; 27890 } 27891 27892 27893 bzero(cdb, sizeof (cdb)); 27894 27895 /* set READ operation */ 27896 cdb[0] = SCMD_READ; 27897 27898 /* adjust lba for 2kbyte blocks from 512 byte blocks */ 27899 mode2->cdread_lba >>= 2; 27900 27901 /* set the start address */ 27902 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F); 27903 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF); 27904 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF); 27905 27906 /* set the transfer length */ 27907 nblocks = mode2->cdread_buflen / 2336; 27908 cdb[4] = (uchar_t)nblocks & 0xFF; 27909 27910 /* build command */ 27911 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 27912 com->uscsi_cdb = (caddr_t)cdb; 27913 com->uscsi_cdblen = sizeof (cdb); 27914 com->uscsi_bufaddr = mode2->cdread_bufaddr; 27915 com->uscsi_buflen = mode2->cdread_buflen; 27916 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 27917 27918 /* 27919 * Issue SCSI command with user space address for read buffer. 27920 * 27921 * This sends the command through main channel in the driver. 27922 * 27923 * Since this is accessed via an IOCTL call, we go through the 27924 * standard path, so that if the device was powered down, then 27925 * it would be 'awakened' to handle the command. 27926 */ 27927 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 27928 UIO_SYSSPACE, SD_PATH_STANDARD); 27929 27930 kmem_free(com, sizeof (*com)); 27931 27932 /* Restore the device and soft state target block size */ 27933 if (sr_sector_mode(dev, restore_blksize) != 0) { 27934 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 27935 "can't do switch back to mode 1\n"); 27936 /* 27937 * If sd_send_scsi_READ succeeded we still need to report 27938 * an error because we failed to reset the block size 27939 */ 27940 if (rval == 0) { 27941 rval = EIO; 27942 } 27943 } 27944 27945 done: 27946 SD_TRACE(SD_LOG_ATTACH_DETACH, un, 27947 "sd_read_mode2: exit: un:0x%p\n", un); 27948 27949 return (rval); 27950 } 27951 27952 27953 /* 27954 * Function: sr_sector_mode() 27955 * 27956 * Description: This utility function is used by sr_read_mode2 to set the target 27957 * block size based on the user specified size. This is a legacy 27958 * implementation based upon a vendor specific mode page 27959 * 27960 * Arguments: dev - the device 'dev_t' 27961 * data - flag indicating if block size is being set to 2336 or 27962 * 512. 27963 * 27964 * Return Code: the code returned by sd_send_scsi_cmd() 27965 * EFAULT if ddi_copyxxx() fails 27966 * ENXIO if fail ddi_get_soft_state 27967 * EINVAL if data pointer is NULL 27968 */ 27969 27970 static int 27971 sr_sector_mode(dev_t dev, uint32_t blksize) 27972 { 27973 struct sd_lun *un; 27974 uchar_t *sense; 27975 uchar_t *select; 27976 int rval; 27977 27978 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 27979 (un->un_state == SD_STATE_OFFLINE)) { 27980 return (ENXIO); 27981 } 27982 27983 sense = kmem_zalloc(20, KM_SLEEP); 27984 27985 /* Note: This is a vendor specific mode page (0x81) */ 27986 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 20, 0x81, 27987 SD_PATH_STANDARD)) != 0) { 27988 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 27989 "sr_sector_mode: Mode Sense failed\n"); 27990 kmem_free(sense, 20); 27991 return (rval); 27992 } 27993 select = kmem_zalloc(20, KM_SLEEP); 27994 select[3] = 0x08; 27995 select[10] = ((blksize >> 8) & 0xff); 27996 select[11] = (blksize & 0xff); 27997 select[12] = 0x01; 27998 select[13] = 0x06; 27999 select[14] = sense[14]; 28000 select[15] = sense[15]; 28001 if (blksize == SD_MODE2_BLKSIZE) { 28002 select[14] |= 0x01; 28003 } 28004 28005 if ((rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 20, 28006 SD_DONTSAVE_PAGE, SD_PATH_STANDARD)) != 0) { 28007 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28008 "sr_sector_mode: Mode Select failed\n"); 28009 } else { 28010 /* 28011 * Only update the softstate block size if we successfully 28012 * changed the device block mode. 28013 */ 28014 mutex_enter(SD_MUTEX(un)); 28015 sd_update_block_info(un, blksize, 0); 28016 mutex_exit(SD_MUTEX(un)); 28017 } 28018 kmem_free(sense, 20); 28019 kmem_free(select, 20); 28020 return (rval); 28021 } 28022 28023 28024 /* 28025 * Function: sr_read_cdda() 28026 * 28027 * Description: This routine is the driver entry point for handling CD-ROM 28028 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If 28029 * the target supports CDDA these requests are handled via a vendor 28030 * specific command (0xD8) If the target does not support CDDA 28031 * these requests are handled via the READ CD command (0xBE). 28032 * 28033 * Arguments: dev - the device 'dev_t' 28034 * data - pointer to user provided CD-DA structure specifying 28035 * the track starting address, transfer length, and 28036 * subcode options. 28037 * flag - this argument is a pass through to ddi_copyxxx() 28038 * directly from the mode argument of ioctl(). 28039 * 28040 * Return Code: the code returned by sd_send_scsi_cmd() 28041 * EFAULT if ddi_copyxxx() fails 28042 * ENXIO if fail ddi_get_soft_state 28043 * EINVAL if invalid arguments are provided 28044 * ENOTTY 28045 */ 28046 28047 static int 28048 sr_read_cdda(dev_t dev, caddr_t data, int flag) 28049 { 28050 struct sd_lun *un; 28051 struct uscsi_cmd *com; 28052 struct cdrom_cdda *cdda; 28053 int rval; 28054 size_t buflen; 28055 char cdb[CDB_GROUP5]; 28056 28057 #ifdef _MULTI_DATAMODEL 28058 /* To support ILP32 applications in an LP64 world */ 28059 struct cdrom_cdda32 cdrom_cdda32; 28060 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32; 28061 #endif /* _MULTI_DATAMODEL */ 28062 28063 if (data == NULL) { 28064 return (EINVAL); 28065 } 28066 28067 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28068 return (ENXIO); 28069 } 28070 28071 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP); 28072 28073 #ifdef _MULTI_DATAMODEL 28074 switch (ddi_model_convert_from(flag & FMODELS)) { 28075 case DDI_MODEL_ILP32: 28076 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) { 28077 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28078 "sr_read_cdda: ddi_copyin Failed\n"); 28079 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28080 return (EFAULT); 28081 } 28082 /* Convert the ILP32 uscsi data from the application to LP64 */ 28083 cdrom_cdda32tocdrom_cdda(cdda32, cdda); 28084 break; 28085 case DDI_MODEL_NONE: 28086 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28087 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28088 "sr_read_cdda: ddi_copyin Failed\n"); 28089 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28090 return (EFAULT); 28091 } 28092 break; 28093 } 28094 #else /* ! _MULTI_DATAMODEL */ 28095 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) { 28096 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28097 "sr_read_cdda: ddi_copyin Failed\n"); 28098 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28099 return (EFAULT); 28100 } 28101 #endif /* _MULTI_DATAMODEL */ 28102 28103 /* 28104 * Since MMC-2 expects max 3 bytes for length, check if the 28105 * length input is greater than 3 bytes 28106 */ 28107 if ((cdda->cdda_length & 0xFF000000) != 0) { 28108 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: " 28109 "cdrom transfer length too large: %d (limit %d)\n", 28110 cdda->cdda_length, 0xFFFFFF); 28111 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28112 return (EINVAL); 28113 } 28114 28115 switch (cdda->cdda_subcode) { 28116 case CDROM_DA_NO_SUBCODE: 28117 buflen = CDROM_BLK_2352 * cdda->cdda_length; 28118 break; 28119 case CDROM_DA_SUBQ: 28120 buflen = CDROM_BLK_2368 * cdda->cdda_length; 28121 break; 28122 case CDROM_DA_ALL_SUBCODE: 28123 buflen = CDROM_BLK_2448 * cdda->cdda_length; 28124 break; 28125 case CDROM_DA_SUBCODE_ONLY: 28126 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length; 28127 break; 28128 default: 28129 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28130 "sr_read_cdda: Subcode '0x%x' Not Supported\n", 28131 cdda->cdda_subcode); 28132 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28133 return (EINVAL); 28134 } 28135 28136 /* Build and send the command */ 28137 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28138 bzero(cdb, CDB_GROUP5); 28139 28140 if (un->un_f_cfg_cdda == TRUE) { 28141 cdb[0] = (char)SCMD_READ_CD; 28142 cdb[1] = 0x04; 28143 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28144 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28145 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28146 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28147 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28148 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28149 cdb[8] = ((cdda->cdda_length) & 0x000000ff); 28150 cdb[9] = 0x10; 28151 switch (cdda->cdda_subcode) { 28152 case CDROM_DA_NO_SUBCODE : 28153 cdb[10] = 0x0; 28154 break; 28155 case CDROM_DA_SUBQ : 28156 cdb[10] = 0x2; 28157 break; 28158 case CDROM_DA_ALL_SUBCODE : 28159 cdb[10] = 0x1; 28160 break; 28161 case CDROM_DA_SUBCODE_ONLY : 28162 /* FALLTHROUGH */ 28163 default : 28164 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28165 kmem_free(com, sizeof (*com)); 28166 return (ENOTTY); 28167 } 28168 } else { 28169 cdb[0] = (char)SCMD_READ_CDDA; 28170 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24); 28171 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16); 28172 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8); 28173 cdb[5] = ((cdda->cdda_addr) & 0x000000ff); 28174 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24); 28175 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16); 28176 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8); 28177 cdb[9] = ((cdda->cdda_length) & 0x000000ff); 28178 cdb[10] = cdda->cdda_subcode; 28179 } 28180 28181 com->uscsi_cdb = cdb; 28182 com->uscsi_cdblen = CDB_GROUP5; 28183 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data; 28184 com->uscsi_buflen = buflen; 28185 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28186 28187 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28188 UIO_SYSSPACE, SD_PATH_STANDARD); 28189 28190 kmem_free(cdda, sizeof (struct cdrom_cdda)); 28191 kmem_free(com, sizeof (*com)); 28192 return (rval); 28193 } 28194 28195 28196 /* 28197 * Function: sr_read_cdxa() 28198 * 28199 * Description: This routine is the driver entry point for handling CD-ROM 28200 * ioctl requests to return CD-XA (Extended Architecture) data. 28201 * (CDROMCDXA). 28202 * 28203 * Arguments: dev - the device 'dev_t' 28204 * data - pointer to user provided CD-XA structure specifying 28205 * the data starting address, transfer length, and format 28206 * flag - this argument is a pass through to ddi_copyxxx() 28207 * directly from the mode argument of ioctl(). 28208 * 28209 * Return Code: the code returned by sd_send_scsi_cmd() 28210 * EFAULT if ddi_copyxxx() fails 28211 * ENXIO if fail ddi_get_soft_state 28212 * EINVAL if data pointer is NULL 28213 */ 28214 28215 static int 28216 sr_read_cdxa(dev_t dev, caddr_t data, int flag) 28217 { 28218 struct sd_lun *un; 28219 struct uscsi_cmd *com; 28220 struct cdrom_cdxa *cdxa; 28221 int rval; 28222 size_t buflen; 28223 char cdb[CDB_GROUP5]; 28224 uchar_t read_flags; 28225 28226 #ifdef _MULTI_DATAMODEL 28227 /* To support ILP32 applications in an LP64 world */ 28228 struct cdrom_cdxa32 cdrom_cdxa32; 28229 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32; 28230 #endif /* _MULTI_DATAMODEL */ 28231 28232 if (data == NULL) { 28233 return (EINVAL); 28234 } 28235 28236 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28237 return (ENXIO); 28238 } 28239 28240 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP); 28241 28242 #ifdef _MULTI_DATAMODEL 28243 switch (ddi_model_convert_from(flag & FMODELS)) { 28244 case DDI_MODEL_ILP32: 28245 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) { 28246 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28247 return (EFAULT); 28248 } 28249 /* 28250 * Convert the ILP32 uscsi data from the 28251 * application to LP64 for internal use. 28252 */ 28253 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa); 28254 break; 28255 case DDI_MODEL_NONE: 28256 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28257 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28258 return (EFAULT); 28259 } 28260 break; 28261 } 28262 #else /* ! _MULTI_DATAMODEL */ 28263 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) { 28264 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28265 return (EFAULT); 28266 } 28267 #endif /* _MULTI_DATAMODEL */ 28268 28269 /* 28270 * Since MMC-2 expects max 3 bytes for length, check if the 28271 * length input is greater than 3 bytes 28272 */ 28273 if ((cdxa->cdxa_length & 0xFF000000) != 0) { 28274 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: " 28275 "cdrom transfer length too large: %d (limit %d)\n", 28276 cdxa->cdxa_length, 0xFFFFFF); 28277 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28278 return (EINVAL); 28279 } 28280 28281 switch (cdxa->cdxa_format) { 28282 case CDROM_XA_DATA: 28283 buflen = CDROM_BLK_2048 * cdxa->cdxa_length; 28284 read_flags = 0x10; 28285 break; 28286 case CDROM_XA_SECTOR_DATA: 28287 buflen = CDROM_BLK_2352 * cdxa->cdxa_length; 28288 read_flags = 0xf8; 28289 break; 28290 case CDROM_XA_DATA_W_ERROR: 28291 buflen = CDROM_BLK_2646 * cdxa->cdxa_length; 28292 read_flags = 0xfc; 28293 break; 28294 default: 28295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28296 "sr_read_cdxa: Format '0x%x' Not Supported\n", 28297 cdxa->cdxa_format); 28298 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28299 return (EINVAL); 28300 } 28301 28302 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28303 bzero(cdb, CDB_GROUP5); 28304 if (un->un_f_mmc_cap == TRUE) { 28305 cdb[0] = (char)SCMD_READ_CD; 28306 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28307 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28308 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28309 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28310 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28311 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28312 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff); 28313 cdb[9] = (char)read_flags; 28314 } else { 28315 /* 28316 * Note: A vendor specific command (0xDB) is being used her to 28317 * request a read of all subcodes. 28318 */ 28319 cdb[0] = (char)SCMD_READ_CDXA; 28320 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24); 28321 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16); 28322 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8); 28323 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff); 28324 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24); 28325 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16); 28326 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8); 28327 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff); 28328 cdb[10] = cdxa->cdxa_format; 28329 } 28330 com->uscsi_cdb = cdb; 28331 com->uscsi_cdblen = CDB_GROUP5; 28332 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data; 28333 com->uscsi_buflen = buflen; 28334 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28335 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_USERSPACE, 28336 UIO_SYSSPACE, SD_PATH_STANDARD); 28337 kmem_free(cdxa, sizeof (struct cdrom_cdxa)); 28338 kmem_free(com, sizeof (*com)); 28339 return (rval); 28340 } 28341 28342 28343 /* 28344 * Function: sr_eject() 28345 * 28346 * Description: This routine is the driver entry point for handling CD-ROM 28347 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT) 28348 * 28349 * Arguments: dev - the device 'dev_t' 28350 * 28351 * Return Code: the code returned by sd_send_scsi_cmd() 28352 */ 28353 28354 static int 28355 sr_eject(dev_t dev) 28356 { 28357 struct sd_lun *un; 28358 int rval; 28359 28360 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28361 (un->un_state == SD_STATE_OFFLINE)) { 28362 return (ENXIO); 28363 } 28364 if ((rval = sd_send_scsi_DOORLOCK(un, SD_REMOVAL_ALLOW, 28365 SD_PATH_STANDARD)) != 0) { 28366 return (rval); 28367 } 28368 28369 rval = sd_send_scsi_START_STOP_UNIT(un, SD_TARGET_EJECT, 28370 SD_PATH_STANDARD); 28371 28372 if (rval == 0) { 28373 mutex_enter(SD_MUTEX(un)); 28374 sr_ejected(un); 28375 un->un_mediastate = DKIO_EJECTED; 28376 cv_broadcast(&un->un_state_cv); 28377 mutex_exit(SD_MUTEX(un)); 28378 } 28379 return (rval); 28380 } 28381 28382 28383 /* 28384 * Function: sr_ejected() 28385 * 28386 * Description: This routine updates the soft state structure to invalidate the 28387 * geometry information after the media has been ejected or a 28388 * media eject has been detected. 28389 * 28390 * Arguments: un - driver soft state (unit) structure 28391 */ 28392 28393 static void 28394 sr_ejected(struct sd_lun *un) 28395 { 28396 struct sd_errstats *stp; 28397 28398 ASSERT(un != NULL); 28399 ASSERT(mutex_owned(SD_MUTEX(un))); 28400 28401 un->un_f_blockcount_is_valid = FALSE; 28402 un->un_f_tgt_blocksize_is_valid = FALSE; 28403 un->un_f_geometry_is_valid = FALSE; 28404 28405 if (un->un_errstats != NULL) { 28406 stp = (struct sd_errstats *)un->un_errstats->ks_data; 28407 stp->sd_capacity.value.ui64 = 0; 28408 } 28409 } 28410 28411 28412 /* 28413 * Function: sr_check_wp() 28414 * 28415 * Description: This routine checks the write protection of a removable media 28416 * disk via the write protect bit of the Mode Page Header device 28417 * specific field. This routine has been implemented to use the 28418 * error recovery mode page for all device types. 28419 * Note: In the future use a sd_send_scsi_MODE_SENSE() routine 28420 * 28421 * Arguments: dev - the device 'dev_t' 28422 * 28423 * Return Code: int indicating if the device is write protected (1) or not (0) 28424 * 28425 * Context: Kernel thread. 28426 * 28427 */ 28428 28429 static int 28430 sr_check_wp(dev_t dev) 28431 { 28432 struct sd_lun *un; 28433 uchar_t device_specific; 28434 uchar_t *sense; 28435 int hdrlen; 28436 int rval; 28437 int retry_flag = FALSE; 28438 28439 /* 28440 * Note: The return codes for this routine should be reworked to 28441 * properly handle the case of a NULL softstate. 28442 */ 28443 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) { 28444 return (FALSE); 28445 } 28446 28447 if (un->un_f_cfg_is_atapi == TRUE) { 28448 retry_flag = TRUE; 28449 } 28450 28451 retry: 28452 if (un->un_f_cfg_is_atapi == TRUE) { 28453 /* 28454 * The mode page contents are not required; set the allocation 28455 * length for the mode page header only 28456 */ 28457 hdrlen = MODE_HEADER_LENGTH_GRP2; 28458 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28459 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, hdrlen, 28460 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28461 device_specific = 28462 ((struct mode_header_grp2 *)sense)->device_specific; 28463 } else { 28464 hdrlen = MODE_HEADER_LENGTH; 28465 sense = kmem_zalloc(hdrlen, KM_SLEEP); 28466 rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, hdrlen, 28467 MODEPAGE_ERR_RECOV, SD_PATH_STANDARD); 28468 device_specific = 28469 ((struct mode_header *)sense)->device_specific; 28470 } 28471 28472 if (rval != 0) { 28473 if ((un->un_f_cfg_is_atapi == TRUE) && (retry_flag)) { 28474 /* 28475 * For an Atapi Zip drive, observed the drive 28476 * reporting check condition for the first attempt. 28477 * Sense data indicating power on or bus device/reset. 28478 * Hence in case of failure need to try at least once 28479 * for Atapi devices. 28480 */ 28481 retry_flag = FALSE; 28482 kmem_free(sense, hdrlen); 28483 goto retry; 28484 } else { 28485 /* 28486 * Write protect mode sense failed; not all disks 28487 * understand this query. Return FALSE assuming that 28488 * these devices are not writable. 28489 */ 28490 rval = FALSE; 28491 } 28492 } else { 28493 if (device_specific & WRITE_PROTECT) { 28494 rval = TRUE; 28495 } else { 28496 rval = FALSE; 28497 } 28498 } 28499 kmem_free(sense, hdrlen); 28500 return (rval); 28501 } 28502 28503 28504 /* 28505 * Function: sr_volume_ctrl() 28506 * 28507 * Description: This routine is the driver entry point for handling CD-ROM 28508 * audio output volume ioctl requests. (CDROMVOLCTRL) 28509 * 28510 * Arguments: dev - the device 'dev_t' 28511 * data - pointer to user audio volume control structure 28512 * flag - this argument is a pass through to ddi_copyxxx() 28513 * directly from the mode argument of ioctl(). 28514 * 28515 * Return Code: the code returned by sd_send_scsi_cmd() 28516 * EFAULT if ddi_copyxxx() fails 28517 * ENXIO if fail ddi_get_soft_state 28518 * EINVAL if data pointer is NULL 28519 * 28520 */ 28521 28522 static int 28523 sr_volume_ctrl(dev_t dev, caddr_t data, int flag) 28524 { 28525 struct sd_lun *un; 28526 struct cdrom_volctrl volume; 28527 struct cdrom_volctrl *vol = &volume; 28528 uchar_t *sense_page; 28529 uchar_t *select_page; 28530 uchar_t *sense; 28531 uchar_t *select; 28532 int sense_buflen; 28533 int select_buflen; 28534 int rval; 28535 28536 if (data == NULL) { 28537 return (EINVAL); 28538 } 28539 28540 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28541 (un->un_state == SD_STATE_OFFLINE)) { 28542 return (ENXIO); 28543 } 28544 28545 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) { 28546 return (EFAULT); 28547 } 28548 28549 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28550 struct mode_header_grp2 *sense_mhp; 28551 struct mode_header_grp2 *select_mhp; 28552 int bd_len; 28553 28554 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN; 28555 select_buflen = MODE_HEADER_LENGTH_GRP2 + 28556 MODEPAGE_AUDIO_CTRL_LEN; 28557 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28558 select = kmem_zalloc(select_buflen, KM_SLEEP); 28559 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP1, sense, 28560 sense_buflen, MODEPAGE_AUDIO_CTRL, 28561 SD_PATH_STANDARD)) != 0) { 28562 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, 28563 "sr_volume_ctrl: Mode Sense Failed\n"); 28564 kmem_free(sense, sense_buflen); 28565 kmem_free(select, select_buflen); 28566 return (rval); 28567 } 28568 sense_mhp = (struct mode_header_grp2 *)sense; 28569 select_mhp = (struct mode_header_grp2 *)select; 28570 bd_len = (sense_mhp->bdesc_length_hi << 8) | 28571 sense_mhp->bdesc_length_lo; 28572 if (bd_len > MODE_BLK_DESC_LENGTH) { 28573 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28574 "sr_volume_ctrl: Mode Sense returned invalid " 28575 "block descriptor length\n"); 28576 kmem_free(sense, sense_buflen); 28577 kmem_free(select, select_buflen); 28578 return (EIO); 28579 } 28580 sense_page = (uchar_t *) 28581 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len); 28582 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2); 28583 select_mhp->length_msb = 0; 28584 select_mhp->length_lsb = 0; 28585 select_mhp->bdesc_length_hi = 0; 28586 select_mhp->bdesc_length_lo = 0; 28587 } else { 28588 struct mode_header *sense_mhp, *select_mhp; 28589 28590 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28591 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN; 28592 sense = kmem_zalloc(sense_buflen, KM_SLEEP); 28593 select = kmem_zalloc(select_buflen, KM_SLEEP); 28594 if ((rval = sd_send_scsi_MODE_SENSE(un, CDB_GROUP0, sense, 28595 sense_buflen, MODEPAGE_AUDIO_CTRL, 28596 SD_PATH_STANDARD)) != 0) { 28597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28598 "sr_volume_ctrl: Mode Sense Failed\n"); 28599 kmem_free(sense, sense_buflen); 28600 kmem_free(select, select_buflen); 28601 return (rval); 28602 } 28603 sense_mhp = (struct mode_header *)sense; 28604 select_mhp = (struct mode_header *)select; 28605 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) { 28606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 28607 "sr_volume_ctrl: Mode Sense returned invalid " 28608 "block descriptor length\n"); 28609 kmem_free(sense, sense_buflen); 28610 kmem_free(select, select_buflen); 28611 return (EIO); 28612 } 28613 sense_page = (uchar_t *) 28614 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length); 28615 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH); 28616 select_mhp->length = 0; 28617 select_mhp->bdesc_length = 0; 28618 } 28619 /* 28620 * Note: An audio control data structure could be created and overlayed 28621 * on the following in place of the array indexing method implemented. 28622 */ 28623 28624 /* Build the select data for the user volume data */ 28625 select_page[0] = MODEPAGE_AUDIO_CTRL; 28626 select_page[1] = 0xE; 28627 /* Set the immediate bit */ 28628 select_page[2] = 0x04; 28629 /* Zero out reserved fields */ 28630 select_page[3] = 0x00; 28631 select_page[4] = 0x00; 28632 /* Return sense data for fields not to be modified */ 28633 select_page[5] = sense_page[5]; 28634 select_page[6] = sense_page[6]; 28635 select_page[7] = sense_page[7]; 28636 /* Set the user specified volume levels for channel 0 and 1 */ 28637 select_page[8] = 0x01; 28638 select_page[9] = vol->channel0; 28639 select_page[10] = 0x02; 28640 select_page[11] = vol->channel1; 28641 /* Channel 2 and 3 are currently unsupported so return the sense data */ 28642 select_page[12] = sense_page[12]; 28643 select_page[13] = sense_page[13]; 28644 select_page[14] = sense_page[14]; 28645 select_page[15] = sense_page[15]; 28646 28647 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) { 28648 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP1, select, 28649 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28650 } else { 28651 rval = sd_send_scsi_MODE_SELECT(un, CDB_GROUP0, select, 28652 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD); 28653 } 28654 28655 kmem_free(sense, sense_buflen); 28656 kmem_free(select, select_buflen); 28657 return (rval); 28658 } 28659 28660 28661 /* 28662 * Function: sr_read_sony_session_offset() 28663 * 28664 * Description: This routine is the driver entry point for handling CD-ROM 28665 * ioctl requests for session offset information. (CDROMREADOFFSET) 28666 * The address of the first track in the last session of a 28667 * multi-session CD-ROM is returned 28668 * 28669 * Note: This routine uses a vendor specific key value in the 28670 * command control field without implementing any vendor check here 28671 * or in the ioctl routine. 28672 * 28673 * Arguments: dev - the device 'dev_t' 28674 * data - pointer to an int to hold the requested address 28675 * flag - this argument is a pass through to ddi_copyxxx() 28676 * directly from the mode argument of ioctl(). 28677 * 28678 * Return Code: the code returned by sd_send_scsi_cmd() 28679 * EFAULT if ddi_copyxxx() fails 28680 * ENXIO if fail ddi_get_soft_state 28681 * EINVAL if data pointer is NULL 28682 */ 28683 28684 static int 28685 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag) 28686 { 28687 struct sd_lun *un; 28688 struct uscsi_cmd *com; 28689 caddr_t buffer; 28690 char cdb[CDB_GROUP1]; 28691 int session_offset = 0; 28692 int rval; 28693 28694 if (data == NULL) { 28695 return (EINVAL); 28696 } 28697 28698 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL || 28699 (un->un_state == SD_STATE_OFFLINE)) { 28700 return (ENXIO); 28701 } 28702 28703 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP); 28704 bzero(cdb, CDB_GROUP1); 28705 cdb[0] = SCMD_READ_TOC; 28706 /* 28707 * Bytes 7 & 8 are the 12 byte allocation length for a single entry. 28708 * (4 byte TOC response header + 8 byte response data) 28709 */ 28710 cdb[8] = SONY_SESSION_OFFSET_LEN; 28711 /* Byte 9 is the control byte. A vendor specific value is used */ 28712 cdb[9] = SONY_SESSION_OFFSET_KEY; 28713 com = kmem_zalloc(sizeof (*com), KM_SLEEP); 28714 com->uscsi_cdb = cdb; 28715 com->uscsi_cdblen = CDB_GROUP1; 28716 com->uscsi_bufaddr = buffer; 28717 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN; 28718 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ; 28719 28720 rval = sd_send_scsi_cmd(dev, com, UIO_SYSSPACE, UIO_SYSSPACE, 28721 UIO_SYSSPACE, SD_PATH_STANDARD); 28722 if (rval != 0) { 28723 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28724 kmem_free(com, sizeof (*com)); 28725 return (rval); 28726 } 28727 if (buffer[1] == SONY_SESSION_OFFSET_VALID) { 28728 session_offset = 28729 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) + 28730 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]); 28731 /* 28732 * Offset returned offset in current lbasize block's. Convert to 28733 * 2k block's to return to the user 28734 */ 28735 if (un->un_tgt_blocksize == CDROM_BLK_512) { 28736 session_offset >>= 2; 28737 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) { 28738 session_offset >>= 1; 28739 } 28740 } 28741 28742 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) { 28743 rval = EFAULT; 28744 } 28745 28746 kmem_free(buffer, SONY_SESSION_OFFSET_LEN); 28747 kmem_free(com, sizeof (*com)); 28748 return (rval); 28749 } 28750 28751 28752 /* 28753 * Function: sd_wm_cache_constructor() 28754 * 28755 * Description: Cache Constructor for the wmap cache for the read/modify/write 28756 * devices. 28757 * 28758 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28759 * un - sd_lun structure for the device. 28760 * flag - the km flags passed to constructor 28761 * 28762 * Return Code: 0 on success. 28763 * -1 on failure. 28764 */ 28765 28766 /*ARGSUSED*/ 28767 static int 28768 sd_wm_cache_constructor(void *wm, void *un, int flags) 28769 { 28770 bzero(wm, sizeof (struct sd_w_map)); 28771 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL); 28772 return (0); 28773 } 28774 28775 28776 /* 28777 * Function: sd_wm_cache_destructor() 28778 * 28779 * Description: Cache destructor for the wmap cache for the read/modify/write 28780 * devices. 28781 * 28782 * Arguments: wm - A pointer to the sd_w_map to be initialized. 28783 * un - sd_lun structure for the device. 28784 */ 28785 /*ARGSUSED*/ 28786 static void 28787 sd_wm_cache_destructor(void *wm, void *un) 28788 { 28789 cv_destroy(&((struct sd_w_map *)wm)->wm_avail); 28790 } 28791 28792 28793 /* 28794 * Function: sd_range_lock() 28795 * 28796 * Description: Lock the range of blocks specified as parameter to ensure 28797 * that read, modify write is atomic and no other i/o writes 28798 * to the same location. The range is specified in terms 28799 * of start and end blocks. Block numbers are the actual 28800 * media block numbers and not system. 28801 * 28802 * Arguments: un - sd_lun structure for the device. 28803 * startb - The starting block number 28804 * endb - The end block number 28805 * typ - type of i/o - simple/read_modify_write 28806 * 28807 * Return Code: wm - pointer to the wmap structure. 28808 * 28809 * Context: This routine can sleep. 28810 */ 28811 28812 static struct sd_w_map * 28813 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ) 28814 { 28815 struct sd_w_map *wmp = NULL; 28816 struct sd_w_map *sl_wmp = NULL; 28817 struct sd_w_map *tmp_wmp; 28818 wm_state state = SD_WM_CHK_LIST; 28819 28820 28821 ASSERT(un != NULL); 28822 ASSERT(!mutex_owned(SD_MUTEX(un))); 28823 28824 mutex_enter(SD_MUTEX(un)); 28825 28826 while (state != SD_WM_DONE) { 28827 28828 switch (state) { 28829 case SD_WM_CHK_LIST: 28830 /* 28831 * This is the starting state. Check the wmap list 28832 * to see if the range is currently available. 28833 */ 28834 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) { 28835 /* 28836 * If this is a simple write and no rmw 28837 * i/o is pending then try to lock the 28838 * range as the range should be available. 28839 */ 28840 state = SD_WM_LOCK_RANGE; 28841 } else { 28842 tmp_wmp = sd_get_range(un, startb, endb); 28843 if (tmp_wmp != NULL) { 28844 if ((wmp != NULL) && ONLIST(un, wmp)) { 28845 /* 28846 * Should not keep onlist wmps 28847 * while waiting this macro 28848 * will also do wmp = NULL; 28849 */ 28850 FREE_ONLIST_WMAP(un, wmp); 28851 } 28852 /* 28853 * sl_wmp is the wmap on which wait 28854 * is done, since the tmp_wmp points 28855 * to the inuse wmap, set sl_wmp to 28856 * tmp_wmp and change the state to sleep 28857 */ 28858 sl_wmp = tmp_wmp; 28859 state = SD_WM_WAIT_MAP; 28860 } else { 28861 state = SD_WM_LOCK_RANGE; 28862 } 28863 28864 } 28865 break; 28866 28867 case SD_WM_LOCK_RANGE: 28868 ASSERT(un->un_wm_cache); 28869 /* 28870 * The range need to be locked, try to get a wmap. 28871 * First attempt it with NO_SLEEP, want to avoid a sleep 28872 * if possible as we will have to release the sd mutex 28873 * if we have to sleep. 28874 */ 28875 if (wmp == NULL) 28876 wmp = kmem_cache_alloc(un->un_wm_cache, 28877 KM_NOSLEEP); 28878 if (wmp == NULL) { 28879 mutex_exit(SD_MUTEX(un)); 28880 _NOTE(DATA_READABLE_WITHOUT_LOCK 28881 (sd_lun::un_wm_cache)) 28882 wmp = kmem_cache_alloc(un->un_wm_cache, 28883 KM_SLEEP); 28884 mutex_enter(SD_MUTEX(un)); 28885 /* 28886 * we released the mutex so recheck and go to 28887 * check list state. 28888 */ 28889 state = SD_WM_CHK_LIST; 28890 } else { 28891 /* 28892 * We exit out of state machine since we 28893 * have the wmap. Do the housekeeping first. 28894 * place the wmap on the wmap list if it is not 28895 * on it already and then set the state to done. 28896 */ 28897 wmp->wm_start = startb; 28898 wmp->wm_end = endb; 28899 wmp->wm_flags = typ | SD_WM_BUSY; 28900 if (typ & SD_WTYPE_RMW) { 28901 un->un_rmw_count++; 28902 } 28903 /* 28904 * If not already on the list then link 28905 */ 28906 if (!ONLIST(un, wmp)) { 28907 wmp->wm_next = un->un_wm; 28908 wmp->wm_prev = NULL; 28909 if (wmp->wm_next) 28910 wmp->wm_next->wm_prev = wmp; 28911 un->un_wm = wmp; 28912 } 28913 state = SD_WM_DONE; 28914 } 28915 break; 28916 28917 case SD_WM_WAIT_MAP: 28918 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY); 28919 /* 28920 * Wait is done on sl_wmp, which is set in the 28921 * check_list state. 28922 */ 28923 sl_wmp->wm_wanted_count++; 28924 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un)); 28925 sl_wmp->wm_wanted_count--; 28926 if (!(sl_wmp->wm_flags & SD_WM_BUSY)) { 28927 if (wmp != NULL) 28928 CHK_N_FREEWMP(un, wmp); 28929 wmp = sl_wmp; 28930 } 28931 sl_wmp = NULL; 28932 /* 28933 * After waking up, need to recheck for availability of 28934 * range. 28935 */ 28936 state = SD_WM_CHK_LIST; 28937 break; 28938 28939 default: 28940 panic("sd_range_lock: " 28941 "Unknown state %d in sd_range_lock", state); 28942 /*NOTREACHED*/ 28943 } /* switch(state) */ 28944 28945 } /* while(state != SD_WM_DONE) */ 28946 28947 mutex_exit(SD_MUTEX(un)); 28948 28949 ASSERT(wmp != NULL); 28950 28951 return (wmp); 28952 } 28953 28954 28955 /* 28956 * Function: sd_get_range() 28957 * 28958 * Description: Find if there any overlapping I/O to this one 28959 * Returns the write-map of 1st such I/O, NULL otherwise. 28960 * 28961 * Arguments: un - sd_lun structure for the device. 28962 * startb - The starting block number 28963 * endb - The end block number 28964 * 28965 * Return Code: wm - pointer to the wmap structure. 28966 */ 28967 28968 static struct sd_w_map * 28969 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb) 28970 { 28971 struct sd_w_map *wmp; 28972 28973 ASSERT(un != NULL); 28974 28975 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) { 28976 if (!(wmp->wm_flags & SD_WM_BUSY)) { 28977 continue; 28978 } 28979 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) { 28980 break; 28981 } 28982 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) { 28983 break; 28984 } 28985 } 28986 28987 return (wmp); 28988 } 28989 28990 28991 /* 28992 * Function: sd_free_inlist_wmap() 28993 * 28994 * Description: Unlink and free a write map struct. 28995 * 28996 * Arguments: un - sd_lun structure for the device. 28997 * wmp - sd_w_map which needs to be unlinked. 28998 */ 28999 29000 static void 29001 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp) 29002 { 29003 ASSERT(un != NULL); 29004 29005 if (un->un_wm == wmp) { 29006 un->un_wm = wmp->wm_next; 29007 } else { 29008 wmp->wm_prev->wm_next = wmp->wm_next; 29009 } 29010 29011 if (wmp->wm_next) { 29012 wmp->wm_next->wm_prev = wmp->wm_prev; 29013 } 29014 29015 wmp->wm_next = wmp->wm_prev = NULL; 29016 29017 kmem_cache_free(un->un_wm_cache, wmp); 29018 } 29019 29020 29021 /* 29022 * Function: sd_range_unlock() 29023 * 29024 * Description: Unlock the range locked by wm. 29025 * Free write map if nobody else is waiting on it. 29026 * 29027 * Arguments: un - sd_lun structure for the device. 29028 * wmp - sd_w_map which needs to be unlinked. 29029 */ 29030 29031 static void 29032 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm) 29033 { 29034 ASSERT(un != NULL); 29035 ASSERT(wm != NULL); 29036 ASSERT(!mutex_owned(SD_MUTEX(un))); 29037 29038 mutex_enter(SD_MUTEX(un)); 29039 29040 if (wm->wm_flags & SD_WTYPE_RMW) { 29041 un->un_rmw_count--; 29042 } 29043 29044 if (wm->wm_wanted_count) { 29045 wm->wm_flags = 0; 29046 /* 29047 * Broadcast that the wmap is available now. 29048 */ 29049 cv_broadcast(&wm->wm_avail); 29050 } else { 29051 /* 29052 * If no one is waiting on the map, it should be free'ed. 29053 */ 29054 sd_free_inlist_wmap(un, wm); 29055 } 29056 29057 mutex_exit(SD_MUTEX(un)); 29058 } 29059 29060 29061 /* 29062 * Function: sd_read_modify_write_task 29063 * 29064 * Description: Called from a taskq thread to initiate the write phase of 29065 * a read-modify-write request. This is used for targets where 29066 * un->un_sys_blocksize != un->un_tgt_blocksize. 29067 * 29068 * Arguments: arg - a pointer to the buf(9S) struct for the write command. 29069 * 29070 * Context: Called under taskq thread context. 29071 */ 29072 29073 static void 29074 sd_read_modify_write_task(void *arg) 29075 { 29076 struct sd_mapblocksize_info *bsp; 29077 struct buf *bp; 29078 struct sd_xbuf *xp; 29079 struct sd_lun *un; 29080 29081 bp = arg; /* The bp is given in arg */ 29082 ASSERT(bp != NULL); 29083 29084 /* Get the pointer to the layer-private data struct */ 29085 xp = SD_GET_XBUF(bp); 29086 ASSERT(xp != NULL); 29087 bsp = xp->xb_private; 29088 ASSERT(bsp != NULL); 29089 29090 un = SD_GET_UN(bp); 29091 ASSERT(un != NULL); 29092 ASSERT(!mutex_owned(SD_MUTEX(un))); 29093 29094 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29095 "sd_read_modify_write_task: entry: buf:0x%p\n", bp); 29096 29097 /* 29098 * This is the write phase of a read-modify-write request, called 29099 * under the context of a taskq thread in response to the completion 29100 * of the read portion of the rmw request completing under interrupt 29101 * context. The write request must be sent from here down the iostart 29102 * chain as if it were being sent from sd_mapblocksize_iostart(), so 29103 * we use the layer index saved in the layer-private data area. 29104 */ 29105 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp); 29106 29107 SD_TRACE(SD_LOG_IO_RMMEDIA, un, 29108 "sd_read_modify_write_task: exit: buf:0x%p\n", bp); 29109 } 29110 29111 29112 /* 29113 * Function: sddump_do_read_of_rmw() 29114 * 29115 * Description: This routine will be called from sddump, If sddump is called 29116 * with an I/O which not aligned on device blocksize boundary 29117 * then the write has to be converted to read-modify-write. 29118 * Do the read part here in order to keep sddump simple. 29119 * Note - That the sd_mutex is held across the call to this 29120 * routine. 29121 * 29122 * Arguments: un - sd_lun 29123 * blkno - block number in terms of media block size. 29124 * nblk - number of blocks. 29125 * bpp - pointer to pointer to the buf structure. On return 29126 * from this function, *bpp points to the valid buffer 29127 * to which the write has to be done. 29128 * 29129 * Return Code: 0 for success or errno-type return code 29130 */ 29131 29132 static int 29133 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk, 29134 struct buf **bpp) 29135 { 29136 int err; 29137 int i; 29138 int rval; 29139 struct buf *bp; 29140 struct scsi_pkt *pkt = NULL; 29141 uint32_t target_blocksize; 29142 29143 ASSERT(un != NULL); 29144 ASSERT(mutex_owned(SD_MUTEX(un))); 29145 29146 target_blocksize = un->un_tgt_blocksize; 29147 29148 mutex_exit(SD_MUTEX(un)); 29149 29150 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL, 29151 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL); 29152 if (bp == NULL) { 29153 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29154 "no resources for dumping; giving up"); 29155 err = ENOMEM; 29156 goto done; 29157 } 29158 29159 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL, 29160 blkno, nblk); 29161 if (rval != 0) { 29162 scsi_free_consistent_buf(bp); 29163 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29164 "no resources for dumping; giving up"); 29165 err = ENOMEM; 29166 goto done; 29167 } 29168 29169 pkt->pkt_flags |= FLAG_NOINTR; 29170 29171 err = EIO; 29172 for (i = 0; i < SD_NDUMP_RETRIES; i++) { 29173 29174 /* 29175 * Scsi_poll returns 0 (success) if the command completes and 29176 * the status block is STATUS_GOOD. We should only check 29177 * errors if this condition is not true. Even then we should 29178 * send our own request sense packet only if we have a check 29179 * condition and auto request sense has not been performed by 29180 * the hba. 29181 */ 29182 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n"); 29183 29184 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) { 29185 err = 0; 29186 break; 29187 } 29188 29189 /* 29190 * Check CMD_DEV_GONE 1st, give up if device is gone, 29191 * no need to read RQS data. 29192 */ 29193 if (pkt->pkt_reason == CMD_DEV_GONE) { 29194 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, 29195 "Device is gone\n"); 29196 break; 29197 } 29198 29199 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) { 29200 SD_INFO(SD_LOG_DUMP, un, 29201 "sddump: read failed with CHECK, try # %d\n", i); 29202 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) { 29203 (void) sd_send_polled_RQS(un); 29204 } 29205 29206 continue; 29207 } 29208 29209 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) { 29210 int reset_retval = 0; 29211 29212 SD_INFO(SD_LOG_DUMP, un, 29213 "sddump: read failed with BUSY, try # %d\n", i); 29214 29215 if (un->un_f_lun_reset_enabled == TRUE) { 29216 reset_retval = scsi_reset(SD_ADDRESS(un), 29217 RESET_LUN); 29218 } 29219 if (reset_retval == 0) { 29220 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET); 29221 } 29222 (void) sd_send_polled_RQS(un); 29223 29224 } else { 29225 SD_INFO(SD_LOG_DUMP, un, 29226 "sddump: read failed with 0x%x, try # %d\n", 29227 SD_GET_PKT_STATUS(pkt), i); 29228 mutex_enter(SD_MUTEX(un)); 29229 sd_reset_target(un, pkt); 29230 mutex_exit(SD_MUTEX(un)); 29231 } 29232 29233 /* 29234 * If we are not getting anywhere with lun/target resets, 29235 * let's reset the bus. 29236 */ 29237 if (i > SD_NDUMP_RETRIES/2) { 29238 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL); 29239 (void) sd_send_polled_RQS(un); 29240 } 29241 29242 } 29243 scsi_destroy_pkt(pkt); 29244 29245 if (err != 0) { 29246 scsi_free_consistent_buf(bp); 29247 *bpp = NULL; 29248 } else { 29249 *bpp = bp; 29250 } 29251 29252 done: 29253 mutex_enter(SD_MUTEX(un)); 29254 return (err); 29255 } 29256 29257 29258 /* 29259 * Function: sd_failfast_flushq 29260 * 29261 * Description: Take all bp's on the wait queue that have B_FAILFAST set 29262 * in b_flags and move them onto the failfast queue, then kick 29263 * off a thread to return all bp's on the failfast queue to 29264 * their owners with an error set. 29265 * 29266 * Arguments: un - pointer to the soft state struct for the instance. 29267 * 29268 * Context: may execute in interrupt context. 29269 */ 29270 29271 static void 29272 sd_failfast_flushq(struct sd_lun *un) 29273 { 29274 struct buf *bp; 29275 struct buf *next_waitq_bp; 29276 struct buf *prev_waitq_bp = NULL; 29277 29278 ASSERT(un != NULL); 29279 ASSERT(mutex_owned(SD_MUTEX(un))); 29280 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE); 29281 ASSERT(un->un_failfast_bp == NULL); 29282 29283 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29284 "sd_failfast_flushq: entry: un:0x%p\n", un); 29285 29286 /* 29287 * Check if we should flush all bufs when entering failfast state, or 29288 * just those with B_FAILFAST set. 29289 */ 29290 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) { 29291 /* 29292 * Move *all* bp's on the wait queue to the failfast flush 29293 * queue, including those that do NOT have B_FAILFAST set. 29294 */ 29295 if (un->un_failfast_headp == NULL) { 29296 ASSERT(un->un_failfast_tailp == NULL); 29297 un->un_failfast_headp = un->un_waitq_headp; 29298 } else { 29299 ASSERT(un->un_failfast_tailp != NULL); 29300 un->un_failfast_tailp->av_forw = un->un_waitq_headp; 29301 } 29302 29303 un->un_failfast_tailp = un->un_waitq_tailp; 29304 un->un_waitq_headp = un->un_waitq_tailp = NULL; 29305 29306 } else { 29307 /* 29308 * Go thru the wait queue, pick off all entries with 29309 * B_FAILFAST set, and move these onto the failfast queue. 29310 */ 29311 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) { 29312 /* 29313 * Save the pointer to the next bp on the wait queue, 29314 * so we get to it on the next iteration of this loop. 29315 */ 29316 next_waitq_bp = bp->av_forw; 29317 29318 /* 29319 * If this bp from the wait queue does NOT have 29320 * B_FAILFAST set, just move on to the next element 29321 * in the wait queue. Note, this is the only place 29322 * where it is correct to set prev_waitq_bp. 29323 */ 29324 if ((bp->b_flags & B_FAILFAST) == 0) { 29325 prev_waitq_bp = bp; 29326 continue; 29327 } 29328 29329 /* 29330 * Remove the bp from the wait queue. 29331 */ 29332 if (bp == un->un_waitq_headp) { 29333 /* The bp is the first element of the waitq. */ 29334 un->un_waitq_headp = next_waitq_bp; 29335 if (un->un_waitq_headp == NULL) { 29336 /* The wait queue is now empty */ 29337 un->un_waitq_tailp = NULL; 29338 } 29339 } else { 29340 /* 29341 * The bp is either somewhere in the middle 29342 * or at the end of the wait queue. 29343 */ 29344 ASSERT(un->un_waitq_headp != NULL); 29345 ASSERT(prev_waitq_bp != NULL); 29346 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST) 29347 == 0); 29348 if (bp == un->un_waitq_tailp) { 29349 /* bp is the last entry on the waitq. */ 29350 ASSERT(next_waitq_bp == NULL); 29351 un->un_waitq_tailp = prev_waitq_bp; 29352 } 29353 prev_waitq_bp->av_forw = next_waitq_bp; 29354 } 29355 bp->av_forw = NULL; 29356 29357 /* 29358 * Now put the bp onto the failfast queue. 29359 */ 29360 if (un->un_failfast_headp == NULL) { 29361 /* failfast queue is currently empty */ 29362 ASSERT(un->un_failfast_tailp == NULL); 29363 un->un_failfast_headp = 29364 un->un_failfast_tailp = bp; 29365 } else { 29366 /* Add the bp to the end of the failfast q */ 29367 ASSERT(un->un_failfast_tailp != NULL); 29368 ASSERT(un->un_failfast_tailp->b_flags & 29369 B_FAILFAST); 29370 un->un_failfast_tailp->av_forw = bp; 29371 un->un_failfast_tailp = bp; 29372 } 29373 } 29374 } 29375 29376 /* 29377 * Now return all bp's on the failfast queue to their owners. 29378 */ 29379 while ((bp = un->un_failfast_headp) != NULL) { 29380 29381 un->un_failfast_headp = bp->av_forw; 29382 if (un->un_failfast_headp == NULL) { 29383 un->un_failfast_tailp = NULL; 29384 } 29385 29386 /* 29387 * We want to return the bp with a failure error code, but 29388 * we do not want a call to sd_start_cmds() to occur here, 29389 * so use sd_return_failed_command_no_restart() instead of 29390 * sd_return_failed_command(). 29391 */ 29392 sd_return_failed_command_no_restart(un, bp, EIO); 29393 } 29394 29395 /* Flush the xbuf queues if required. */ 29396 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) { 29397 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback); 29398 } 29399 29400 SD_TRACE(SD_LOG_IO_FAILFAST, un, 29401 "sd_failfast_flushq: exit: un:0x%p\n", un); 29402 } 29403 29404 29405 /* 29406 * Function: sd_failfast_flushq_callback 29407 * 29408 * Description: Return TRUE if the given bp meets the criteria for failfast 29409 * flushing. Used with ddi_xbuf_flushq(9F). 29410 * 29411 * Arguments: bp - ptr to buf struct to be examined. 29412 * 29413 * Context: Any 29414 */ 29415 29416 static int 29417 sd_failfast_flushq_callback(struct buf *bp) 29418 { 29419 /* 29420 * Return TRUE if (1) we want to flush ALL bufs when the failfast 29421 * state is entered; OR (2) the given bp has B_FAILFAST set. 29422 */ 29423 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) || 29424 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE); 29425 } 29426 29427 29428 #if defined(__i386) || defined(__amd64) 29429 /* 29430 * Function: sd_setup_next_xfer 29431 * 29432 * Description: Prepare next I/O operation using DMA_PARTIAL 29433 * 29434 */ 29435 29436 static int 29437 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp, 29438 struct scsi_pkt *pkt, struct sd_xbuf *xp) 29439 { 29440 ssize_t num_blks_not_xfered; 29441 daddr_t strt_blk_num; 29442 ssize_t bytes_not_xfered; 29443 int rval; 29444 29445 ASSERT(pkt->pkt_resid == 0); 29446 29447 /* 29448 * Calculate next block number and amount to be transferred. 29449 * 29450 * How much data NOT transfered to the HBA yet. 29451 */ 29452 bytes_not_xfered = xp->xb_dma_resid; 29453 29454 /* 29455 * figure how many blocks NOT transfered to the HBA yet. 29456 */ 29457 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered); 29458 29459 /* 29460 * set starting block number to the end of what WAS transfered. 29461 */ 29462 strt_blk_num = xp->xb_blkno + 29463 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered); 29464 29465 /* 29466 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt 29467 * will call scsi_initpkt with NULL_FUNC so we do not have to release 29468 * the disk mutex here. 29469 */ 29470 rval = sd_setup_next_rw_pkt(un, pkt, bp, 29471 strt_blk_num, num_blks_not_xfered); 29472 29473 if (rval == 0) { 29474 29475 /* 29476 * Success. 29477 * 29478 * Adjust things if there are still more blocks to be 29479 * transfered. 29480 */ 29481 xp->xb_dma_resid = pkt->pkt_resid; 29482 pkt->pkt_resid = 0; 29483 29484 return (1); 29485 } 29486 29487 /* 29488 * There's really only one possible return value from 29489 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt 29490 * returns NULL. 29491 */ 29492 ASSERT(rval == SD_PKT_ALLOC_FAILURE); 29493 29494 bp->b_resid = bp->b_bcount; 29495 bp->b_flags |= B_ERROR; 29496 29497 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, 29498 "Error setting up next portion of DMA transfer\n"); 29499 29500 return (0); 29501 } 29502 #endif 29503 29504 /* 29505 * Note: The following sd_faultinjection_ioctl( ) routines implement 29506 * driver support for handling fault injection for error analysis 29507 * causing faults in multiple layers of the driver. 29508 * 29509 */ 29510 29511 #ifdef SD_FAULT_INJECTION 29512 static uint_t sd_fault_injection_on = 0; 29513 29514 /* 29515 * Function: sd_faultinjection_ioctl() 29516 * 29517 * Description: This routine is the driver entry point for handling 29518 * faultinjection ioctls to inject errors into the 29519 * layer model 29520 * 29521 * Arguments: cmd - the ioctl cmd recieved 29522 * arg - the arguments from user and returns 29523 */ 29524 29525 static void 29526 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) { 29527 29528 uint_t i; 29529 uint_t rval; 29530 29531 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n"); 29532 29533 mutex_enter(SD_MUTEX(un)); 29534 29535 switch (cmd) { 29536 case SDIOCRUN: 29537 /* Allow pushed faults to be injected */ 29538 SD_INFO(SD_LOG_SDTEST, un, 29539 "sd_faultinjection_ioctl: Injecting Fault Run\n"); 29540 29541 sd_fault_injection_on = 1; 29542 29543 SD_INFO(SD_LOG_IOERR, un, 29544 "sd_faultinjection_ioctl: run finished\n"); 29545 break; 29546 29547 case SDIOCSTART: 29548 /* Start Injection Session */ 29549 SD_INFO(SD_LOG_SDTEST, un, 29550 "sd_faultinjection_ioctl: Injecting Fault Start\n"); 29551 29552 sd_fault_injection_on = 0; 29553 un->sd_injection_mask = 0xFFFFFFFF; 29554 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29555 un->sd_fi_fifo_pkt[i] = NULL; 29556 un->sd_fi_fifo_xb[i] = NULL; 29557 un->sd_fi_fifo_un[i] = NULL; 29558 un->sd_fi_fifo_arq[i] = NULL; 29559 } 29560 un->sd_fi_fifo_start = 0; 29561 un->sd_fi_fifo_end = 0; 29562 29563 mutex_enter(&(un->un_fi_mutex)); 29564 un->sd_fi_log[0] = '\0'; 29565 un->sd_fi_buf_len = 0; 29566 mutex_exit(&(un->un_fi_mutex)); 29567 29568 SD_INFO(SD_LOG_IOERR, un, 29569 "sd_faultinjection_ioctl: start finished\n"); 29570 break; 29571 29572 case SDIOCSTOP: 29573 /* Stop Injection Session */ 29574 SD_INFO(SD_LOG_SDTEST, un, 29575 "sd_faultinjection_ioctl: Injecting Fault Stop\n"); 29576 sd_fault_injection_on = 0; 29577 un->sd_injection_mask = 0x0; 29578 29579 /* Empty stray or unuseds structs from fifo */ 29580 for (i = 0; i < SD_FI_MAX_ERROR; i++) { 29581 if (un->sd_fi_fifo_pkt[i] != NULL) { 29582 kmem_free(un->sd_fi_fifo_pkt[i], 29583 sizeof (struct sd_fi_pkt)); 29584 } 29585 if (un->sd_fi_fifo_xb[i] != NULL) { 29586 kmem_free(un->sd_fi_fifo_xb[i], 29587 sizeof (struct sd_fi_xb)); 29588 } 29589 if (un->sd_fi_fifo_un[i] != NULL) { 29590 kmem_free(un->sd_fi_fifo_un[i], 29591 sizeof (struct sd_fi_un)); 29592 } 29593 if (un->sd_fi_fifo_arq[i] != NULL) { 29594 kmem_free(un->sd_fi_fifo_arq[i], 29595 sizeof (struct sd_fi_arq)); 29596 } 29597 un->sd_fi_fifo_pkt[i] = NULL; 29598 un->sd_fi_fifo_un[i] = NULL; 29599 un->sd_fi_fifo_xb[i] = NULL; 29600 un->sd_fi_fifo_arq[i] = NULL; 29601 } 29602 un->sd_fi_fifo_start = 0; 29603 un->sd_fi_fifo_end = 0; 29604 29605 SD_INFO(SD_LOG_IOERR, un, 29606 "sd_faultinjection_ioctl: stop finished\n"); 29607 break; 29608 29609 case SDIOCINSERTPKT: 29610 /* Store a packet struct to be pushed onto fifo */ 29611 SD_INFO(SD_LOG_SDTEST, un, 29612 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n"); 29613 29614 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29615 29616 sd_fault_injection_on = 0; 29617 29618 /* No more that SD_FI_MAX_ERROR allowed in Queue */ 29619 if (un->sd_fi_fifo_pkt[i] != NULL) { 29620 kmem_free(un->sd_fi_fifo_pkt[i], 29621 sizeof (struct sd_fi_pkt)); 29622 } 29623 if (arg != NULL) { 29624 un->sd_fi_fifo_pkt[i] = 29625 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP); 29626 if (un->sd_fi_fifo_pkt[i] == NULL) { 29627 /* Alloc failed don't store anything */ 29628 break; 29629 } 29630 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i], 29631 sizeof (struct sd_fi_pkt), 0); 29632 if (rval == -1) { 29633 kmem_free(un->sd_fi_fifo_pkt[i], 29634 sizeof (struct sd_fi_pkt)); 29635 un->sd_fi_fifo_pkt[i] = NULL; 29636 } 29637 } else { 29638 SD_INFO(SD_LOG_IOERR, un, 29639 "sd_faultinjection_ioctl: pkt null\n"); 29640 } 29641 break; 29642 29643 case SDIOCINSERTXB: 29644 /* Store a xb struct to be pushed onto fifo */ 29645 SD_INFO(SD_LOG_SDTEST, un, 29646 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n"); 29647 29648 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29649 29650 sd_fault_injection_on = 0; 29651 29652 if (un->sd_fi_fifo_xb[i] != NULL) { 29653 kmem_free(un->sd_fi_fifo_xb[i], 29654 sizeof (struct sd_fi_xb)); 29655 un->sd_fi_fifo_xb[i] = NULL; 29656 } 29657 if (arg != NULL) { 29658 un->sd_fi_fifo_xb[i] = 29659 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP); 29660 if (un->sd_fi_fifo_xb[i] == NULL) { 29661 /* Alloc failed don't store anything */ 29662 break; 29663 } 29664 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i], 29665 sizeof (struct sd_fi_xb), 0); 29666 29667 if (rval == -1) { 29668 kmem_free(un->sd_fi_fifo_xb[i], 29669 sizeof (struct sd_fi_xb)); 29670 un->sd_fi_fifo_xb[i] = NULL; 29671 } 29672 } else { 29673 SD_INFO(SD_LOG_IOERR, un, 29674 "sd_faultinjection_ioctl: xb null\n"); 29675 } 29676 break; 29677 29678 case SDIOCINSERTUN: 29679 /* Store a un struct to be pushed onto fifo */ 29680 SD_INFO(SD_LOG_SDTEST, un, 29681 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n"); 29682 29683 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29684 29685 sd_fault_injection_on = 0; 29686 29687 if (un->sd_fi_fifo_un[i] != NULL) { 29688 kmem_free(un->sd_fi_fifo_un[i], 29689 sizeof (struct sd_fi_un)); 29690 un->sd_fi_fifo_un[i] = NULL; 29691 } 29692 if (arg != NULL) { 29693 un->sd_fi_fifo_un[i] = 29694 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP); 29695 if (un->sd_fi_fifo_un[i] == NULL) { 29696 /* Alloc failed don't store anything */ 29697 break; 29698 } 29699 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i], 29700 sizeof (struct sd_fi_un), 0); 29701 if (rval == -1) { 29702 kmem_free(un->sd_fi_fifo_un[i], 29703 sizeof (struct sd_fi_un)); 29704 un->sd_fi_fifo_un[i] = NULL; 29705 } 29706 29707 } else { 29708 SD_INFO(SD_LOG_IOERR, un, 29709 "sd_faultinjection_ioctl: un null\n"); 29710 } 29711 29712 break; 29713 29714 case SDIOCINSERTARQ: 29715 /* Store a arq struct to be pushed onto fifo */ 29716 SD_INFO(SD_LOG_SDTEST, un, 29717 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n"); 29718 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR; 29719 29720 sd_fault_injection_on = 0; 29721 29722 if (un->sd_fi_fifo_arq[i] != NULL) { 29723 kmem_free(un->sd_fi_fifo_arq[i], 29724 sizeof (struct sd_fi_arq)); 29725 un->sd_fi_fifo_arq[i] = NULL; 29726 } 29727 if (arg != NULL) { 29728 un->sd_fi_fifo_arq[i] = 29729 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP); 29730 if (un->sd_fi_fifo_arq[i] == NULL) { 29731 /* Alloc failed don't store anything */ 29732 break; 29733 } 29734 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i], 29735 sizeof (struct sd_fi_arq), 0); 29736 if (rval == -1) { 29737 kmem_free(un->sd_fi_fifo_arq[i], 29738 sizeof (struct sd_fi_arq)); 29739 un->sd_fi_fifo_arq[i] = NULL; 29740 } 29741 29742 } else { 29743 SD_INFO(SD_LOG_IOERR, un, 29744 "sd_faultinjection_ioctl: arq null\n"); 29745 } 29746 29747 break; 29748 29749 case SDIOCPUSH: 29750 /* Push stored xb, pkt, un, and arq onto fifo */ 29751 sd_fault_injection_on = 0; 29752 29753 if (arg != NULL) { 29754 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0); 29755 if (rval != -1 && 29756 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29757 un->sd_fi_fifo_end += i; 29758 } 29759 } else { 29760 SD_INFO(SD_LOG_IOERR, un, 29761 "sd_faultinjection_ioctl: push arg null\n"); 29762 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) { 29763 un->sd_fi_fifo_end++; 29764 } 29765 } 29766 SD_INFO(SD_LOG_IOERR, un, 29767 "sd_faultinjection_ioctl: push to end=%d\n", 29768 un->sd_fi_fifo_end); 29769 break; 29770 29771 case SDIOCRETRIEVE: 29772 /* Return buffer of log from Injection session */ 29773 SD_INFO(SD_LOG_SDTEST, un, 29774 "sd_faultinjection_ioctl: Injecting Fault Retreive"); 29775 29776 sd_fault_injection_on = 0; 29777 29778 mutex_enter(&(un->un_fi_mutex)); 29779 rval = ddi_copyout(un->sd_fi_log, (void *)arg, 29780 un->sd_fi_buf_len+1, 0); 29781 mutex_exit(&(un->un_fi_mutex)); 29782 29783 if (rval == -1) { 29784 /* 29785 * arg is possibly invalid setting 29786 * it to NULL for return 29787 */ 29788 arg = NULL; 29789 } 29790 break; 29791 } 29792 29793 mutex_exit(SD_MUTEX(un)); 29794 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:" 29795 " exit\n"); 29796 } 29797 29798 29799 /* 29800 * Function: sd_injection_log() 29801 * 29802 * Description: This routine adds buff to the already existing injection log 29803 * for retrieval via faultinjection_ioctl for use in fault 29804 * detection and recovery 29805 * 29806 * Arguments: buf - the string to add to the log 29807 */ 29808 29809 static void 29810 sd_injection_log(char *buf, struct sd_lun *un) 29811 { 29812 uint_t len; 29813 29814 ASSERT(un != NULL); 29815 ASSERT(buf != NULL); 29816 29817 mutex_enter(&(un->un_fi_mutex)); 29818 29819 len = min(strlen(buf), 255); 29820 /* Add logged value to Injection log to be returned later */ 29821 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) { 29822 uint_t offset = strlen((char *)un->sd_fi_log); 29823 char *destp = (char *)un->sd_fi_log + offset; 29824 int i; 29825 for (i = 0; i < len; i++) { 29826 *destp++ = *buf++; 29827 } 29828 un->sd_fi_buf_len += len; 29829 un->sd_fi_log[un->sd_fi_buf_len] = '\0'; 29830 } 29831 29832 mutex_exit(&(un->un_fi_mutex)); 29833 } 29834 29835 29836 /* 29837 * Function: sd_faultinjection() 29838 * 29839 * Description: This routine takes the pkt and changes its 29840 * content based on error injection scenerio. 29841 * 29842 * Arguments: pktp - packet to be changed 29843 */ 29844 29845 static void 29846 sd_faultinjection(struct scsi_pkt *pktp) 29847 { 29848 uint_t i; 29849 struct sd_fi_pkt *fi_pkt; 29850 struct sd_fi_xb *fi_xb; 29851 struct sd_fi_un *fi_un; 29852 struct sd_fi_arq *fi_arq; 29853 struct buf *bp; 29854 struct sd_xbuf *xb; 29855 struct sd_lun *un; 29856 29857 ASSERT(pktp != NULL); 29858 29859 /* pull bp xb and un from pktp */ 29860 bp = (struct buf *)pktp->pkt_private; 29861 xb = SD_GET_XBUF(bp); 29862 un = SD_GET_UN(bp); 29863 29864 ASSERT(un != NULL); 29865 29866 mutex_enter(SD_MUTEX(un)); 29867 29868 SD_TRACE(SD_LOG_SDTEST, un, 29869 "sd_faultinjection: entry Injection from sdintr\n"); 29870 29871 /* if injection is off return */ 29872 if (sd_fault_injection_on == 0 || 29873 un->sd_fi_fifo_start == un->sd_fi_fifo_end) { 29874 mutex_exit(SD_MUTEX(un)); 29875 return; 29876 } 29877 29878 29879 /* take next set off fifo */ 29880 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR; 29881 29882 fi_pkt = un->sd_fi_fifo_pkt[i]; 29883 fi_xb = un->sd_fi_fifo_xb[i]; 29884 fi_un = un->sd_fi_fifo_un[i]; 29885 fi_arq = un->sd_fi_fifo_arq[i]; 29886 29887 29888 /* set variables accordingly */ 29889 /* set pkt if it was on fifo */ 29890 if (fi_pkt != NULL) { 29891 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags"); 29892 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp"); 29893 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp"); 29894 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state"); 29895 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics"); 29896 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason"); 29897 29898 } 29899 29900 /* set xb if it was on fifo */ 29901 if (fi_xb != NULL) { 29902 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno"); 29903 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid"); 29904 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count"); 29905 SD_CONDSET(xb, xb, xb_victim_retry_count, 29906 "xb_victim_retry_count"); 29907 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status"); 29908 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state"); 29909 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid"); 29910 29911 /* copy in block data from sense */ 29912 if (fi_xb->xb_sense_data[0] != -1) { 29913 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, 29914 SENSE_LENGTH); 29915 } 29916 29917 /* copy in extended sense codes */ 29918 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_code, 29919 "es_code"); 29920 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_key, 29921 "es_key"); 29922 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, es_add_code, 29923 "es_add_code"); 29924 SD_CONDSET(((struct scsi_extended_sense *)xb), xb, 29925 es_qual_code, "es_qual_code"); 29926 } 29927 29928 /* set un if it was on fifo */ 29929 if (fi_un != NULL) { 29930 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb"); 29931 SD_CONDSET(un, un, un_ctype, "un_ctype"); 29932 SD_CONDSET(un, un, un_reset_retry_count, 29933 "un_reset_retry_count"); 29934 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type"); 29935 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status"); 29936 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled"); 29937 SD_CONDSET(un, un, un_f_geometry_is_valid, 29938 "un_f_geometry_is_valid"); 29939 SD_CONDSET(un, un, un_f_allow_bus_device_reset, 29940 "un_f_allow_bus_device_reset"); 29941 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing"); 29942 29943 } 29944 29945 /* copy in auto request sense if it was on fifo */ 29946 if (fi_arq != NULL) { 29947 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq)); 29948 } 29949 29950 /* free structs */ 29951 if (un->sd_fi_fifo_pkt[i] != NULL) { 29952 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt)); 29953 } 29954 if (un->sd_fi_fifo_xb[i] != NULL) { 29955 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb)); 29956 } 29957 if (un->sd_fi_fifo_un[i] != NULL) { 29958 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un)); 29959 } 29960 if (un->sd_fi_fifo_arq[i] != NULL) { 29961 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq)); 29962 } 29963 29964 /* 29965 * kmem_free does not gurantee to set to NULL 29966 * since we uses these to determine if we set 29967 * values or not lets confirm they are always 29968 * NULL after free 29969 */ 29970 un->sd_fi_fifo_pkt[i] = NULL; 29971 un->sd_fi_fifo_un[i] = NULL; 29972 un->sd_fi_fifo_xb[i] = NULL; 29973 un->sd_fi_fifo_arq[i] = NULL; 29974 29975 un->sd_fi_fifo_start++; 29976 29977 mutex_exit(SD_MUTEX(un)); 29978 29979 SD_TRACE(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n"); 29980 } 29981 29982 #endif /* SD_FAULT_INJECTION */ 29983