1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 28 /* 29 * Direct Attached disk driver for SPARC machines. 30 */ 31 32 /* 33 * Includes, Declarations and Local Data 34 */ 35 #include <sys/dada/dada.h> 36 #include <sys/dkbad.h> 37 #include <sys/dklabel.h> 38 #include <sys/dkio.h> 39 #include <sys/cdio.h> 40 #include <sys/vtoc.h> 41 #include <sys/dada/targets/daddef.h> 42 #include <sys/dada/targets/dadpriv.h> 43 #include <sys/file.h> 44 #include <sys/stat.h> 45 #include <sys/kstat.h> 46 #include <sys/vtrace.h> 47 #include <sys/aio_req.h> 48 #include <sys/note.h> 49 #include <sys/cmlb.h> 50 51 /* 52 * Global Error Levels for Error Reporting 53 */ 54 int dcd_error_level = DCD_ERR_RETRYABLE; 55 /* 56 * Local Static Data 57 */ 58 59 static int dcd_io_time = DCD_IO_TIME; 60 static int dcd_retry_count = DCD_RETRY_COUNT; 61 #ifndef lint 62 static int dcd_report_pfa = 1; 63 #endif 64 static int dcd_rot_delay = 4; 65 static int dcd_poll_busycnt = DCD_POLL_TIMEOUT; 66 67 /* 68 * Local Function Prototypes 69 */ 70 71 static int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 72 static int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 73 static int dcdstrategy(struct buf *bp); 74 static int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 75 static int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 76 static int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p); 77 static int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 78 static int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, 79 char *, caddr_t, int *); 80 static int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 81 static int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 82 83 84 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi); 85 static int dcd_doattach(dev_info_t *devi, int (*f)()); 86 static int dcd_validate_geometry(struct dcd_disk *un); 87 static ddi_devid_t dcd_get_devid(struct dcd_disk *un); 88 static ddi_devid_t dcd_create_devid(struct dcd_disk *un); 89 static int dcd_make_devid_from_serial(struct dcd_disk *un); 90 static void dcd_validate_model_serial(char *str, int *retlen, int totallen); 91 static int dcd_read_deviceid(struct dcd_disk *un); 92 static int dcd_write_deviceid(struct dcd_disk *un); 93 static int dcd_poll(struct dcd_pkt *pkt); 94 static char *dcd_rname(int reason); 95 static void dcd_flush_cache(struct dcd_disk *un); 96 97 static int dcd_compute_dk_capacity(struct dcd_device *devp, 98 diskaddr_t *capacity); 99 static int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr, 100 diskaddr_t start_block, size_t reqlength, uchar_t cmd); 101 102 static void dcdmin(struct buf *bp); 103 104 static int dcdioctl_cmd(dev_t, struct udcd_cmd *, 105 enum uio_seg, enum uio_seg); 106 107 static void dcdstart(struct dcd_disk *un); 108 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp); 109 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)()); 110 static void dcdudcdmin(struct buf *bp); 111 112 static int dcdrunout(caddr_t); 113 static int dcd_check_wp(dev_t dev); 114 static int dcd_unit_ready(dev_t dev); 115 static void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, 116 struct dcd_disk *un); 117 static void dcdintr(struct dcd_pkt *pkt); 118 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp); 119 static void dcd_offline(struct dcd_disk *un, int bechatty); 120 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un); 121 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt); 122 static void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp); 123 static int dcdflushdone(struct buf *bp); 124 125 /* Function prototypes for cmlb */ 126 127 static int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 128 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 129 130 static int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp); 131 static int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg, 132 void *tg_cookie); 133 134 135 static cmlb_tg_ops_t dcd_lb_ops = { 136 TG_DK_OPS_VERSION_1, 137 dcd_lb_rdwr, 138 dcd_lb_getinfo 139 }; 140 141 /* 142 * Error and Logging Functions 143 */ 144 #ifndef lint 145 static void clean_print(dev_info_t *dev, char *label, uint_t level, 146 char *title, char *data, int len); 147 static void dcdrestart(void *arg); 148 #endif /* lint */ 149 150 static int dcd_check_error(struct dcd_disk *un, struct buf *bp); 151 152 /* 153 * Error statistics create/update functions 154 */ 155 static int dcd_create_errstats(struct dcd_disk *, int); 156 157 158 159 /*PRINTFLIKE4*/ 160 extern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...) 161 __KPRINTFLIKE(4); 162 extern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t, 163 uchar_t, uint32_t, uchar_t, uchar_t); 164 165 166 /* 167 * Configuration Routines 168 */ 169 static int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 170 void **result); 171 static int dcdprobe(dev_info_t *devi); 172 static int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 173 static int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 174 static int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd); 175 static int dcd_dr_detach(dev_info_t *devi); 176 static int dcdpower(dev_info_t *devi, int component, int level); 177 178 static void *dcd_state; 179 static int dcd_max_instance; 180 static char *dcd_label = "dad"; 181 182 static char *diskokay = "disk okay\n"; 183 184 #if DEBUG || lint 185 #define DCDDEBUG 186 #endif 187 188 int dcd_test_flag = 0; 189 /* 190 * Debugging macros 191 */ 192 #ifdef DCDDEBUG 193 static int dcddebug = 0; 194 #define DEBUGGING (dcddebug > 1) 195 #define DAD_DEBUG if (dcddebug == 1) dcd_log 196 #define DAD_DEBUG2 if (dcddebug > 1) dcd_log 197 #else /* DCDDEBUG */ 198 #define dcddebug (0) 199 #define DEBUGGING (0) 200 #define DAD_DEBUG if (0) dcd_log 201 #define DAD_DEBUG2 if (0) dcd_log 202 #endif 203 204 /* 205 * we use pkt_private area for storing bp and retry_count 206 * XXX: Really is this usefull. 207 */ 208 struct dcd_pkt_private { 209 struct buf *dcdpp_bp; 210 short dcdpp_retry_count; 211 short dcdpp_victim_retry_count; 212 }; 213 214 215 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf)) 216 217 #define PP_LEN (sizeof (struct dcd_pkt_private)) 218 219 #define PKT_SET_BP(pkt, bp) \ 220 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp 221 #define PKT_GET_BP(pkt) \ 222 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp) 223 224 225 #define PKT_SET_RETRY_CNT(pkt, n) \ 226 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n 227 228 #define PKT_GET_RETRY_CNT(pkt) \ 229 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count) 230 231 #define PKT_INCR_RETRY_CNT(pkt, n) \ 232 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n 233 234 #define PKT_SET_VICTIM_RETRY_CNT(pkt, n) \ 235 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \ 236 = n 237 238 #define PKT_GET_VICTIM_RETRY_CNT(pkt) \ 239 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count) 240 #define PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \ 241 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \ 242 += n 243 244 #define DISK_NOT_READY_RETRY_COUNT (dcd_retry_count / 2) 245 246 247 /* 248 * Urk! 249 */ 250 #define SET_BP_ERROR(bp, err) \ 251 bioerror(bp, err); 252 253 #define IOSP KSTAT_IO_PTR(un->un_stats) 254 #define IO_PARTITION_STATS un->un_pstats[DCDPART(bp->b_edev)] 255 #define IOSP_PARTITION KSTAT_IO_PTR(IO_PARTITION_STATS) 256 257 #define DCD_DO_KSTATS(un, kstat_function, bp) \ 258 ASSERT(mutex_owned(DCD_MUTEX)); \ 259 if (bp != un->un_sbufp) { \ 260 if (un->un_stats) { \ 261 kstat_function(IOSP); \ 262 } \ 263 if (IO_PARTITION_STATS) { \ 264 kstat_function(IOSP_PARTITION); \ 265 } \ 266 } 267 268 #define DCD_DO_ERRSTATS(un, x) \ 269 if (un->un_errstats) { \ 270 struct dcd_errstats *dtp; \ 271 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \ 272 dtp->x.value.ui32++; \ 273 } 274 275 #define GET_SOFT_STATE(dev) \ 276 struct dcd_disk *un; \ 277 int instance, part; \ 278 minor_t minor = getminor(dev); \ 279 \ 280 part = minor & DCDPART_MASK; \ 281 instance = minor >> DCDUNIT_SHIFT; \ 282 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) \ 283 return (ENXIO); 284 285 #define LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \ 286 (((blkno) & ((1 << (blknoshift)) - 1)) == 0) 287 288 /* 289 * After the following number of sectors, the cylinder number spills over 290 * 0xFFFF if sectors = 63 and heads = 16. 291 */ 292 #define NUM_SECTORS_32G 0x3EFFC10 293 294 /* 295 * Configuration Data 296 */ 297 298 /* 299 * Device driver ops vector 300 */ 301 302 static struct cb_ops dcd_cb_ops = { 303 dcdopen, /* open */ 304 dcdclose, /* close */ 305 dcdstrategy, /* strategy */ 306 nodev, /* print */ 307 dcddump, /* dump */ 308 dcdread, /* read */ 309 dcdwrite, /* write */ 310 dcdioctl, /* ioctl */ 311 nodev, /* devmap */ 312 nodev, /* mmap */ 313 nodev, /* segmap */ 314 nochpoll, /* poll */ 315 dcd_prop_op, /* cb_prop_op */ 316 0, /* streamtab */ 317 D_64BIT | D_MP | D_NEW, /* Driver compatibility flag */ 318 CB_REV, /* cb_rev */ 319 dcdaread, /* async I/O read entry point */ 320 dcdawrite /* async I/O write entry point */ 321 }; 322 323 static struct dev_ops dcd_ops = { 324 DEVO_REV, /* devo_rev, */ 325 0, /* refcnt */ 326 dcdinfo, /* info */ 327 nulldev, /* identify */ 328 dcdprobe, /* probe */ 329 dcdattach, /* attach */ 330 dcddetach, /* detach */ 331 dcdreset, /* reset */ 332 &dcd_cb_ops, /* driver operations */ 333 (struct bus_ops *)0, /* bus operations */ 334 dcdpower, /* power */ 335 ddi_quiesce_not_supported, /* devo_quiesce */ 336 }; 337 338 339 /* 340 * This is the loadable module wrapper. 341 */ 342 #include <sys/modctl.h> 343 344 static struct modldrv modldrv = { 345 &mod_driverops, /* Type of module. This one is a driver */ 346 "DAD Disk Driver", /* Name of the module. */ 347 &dcd_ops, /* driver ops */ 348 }; 349 350 351 352 static struct modlinkage modlinkage = { 353 MODREV_1, &modldrv, NULL 354 }; 355 356 /* 357 * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded 358 * attach situations 359 */ 360 static kmutex_t dcd_attach_mutex; 361 362 int 363 _init(void) 364 { 365 int e; 366 367 if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk), 368 DCD_MAXUNIT)) != 0) 369 return (e); 370 371 mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL); 372 e = mod_install(&modlinkage); 373 if (e != 0) { 374 mutex_destroy(&dcd_attach_mutex); 375 ddi_soft_state_fini(&dcd_state); 376 return (e); 377 } 378 379 return (e); 380 } 381 382 int 383 _fini(void) 384 { 385 int e; 386 387 if ((e = mod_remove(&modlinkage)) != 0) 388 return (e); 389 390 ddi_soft_state_fini(&dcd_state); 391 mutex_destroy(&dcd_attach_mutex); 392 393 return (e); 394 } 395 396 int 397 _info(struct modinfo *modinfop) 398 { 399 400 return (mod_info(&modlinkage, modinfop)); 401 } 402 403 static int 404 dcdprobe(dev_info_t *devi) 405 { 406 struct dcd_device *devp; 407 int rval = DDI_PROBE_PARTIAL; 408 int instance; 409 410 devp = ddi_get_driver_private(devi); 411 instance = ddi_get_instance(devi); 412 413 /* 414 * Keep a count of how many disks (ie. highest instance no) we have 415 * XXX currently not used but maybe useful later again 416 */ 417 mutex_enter(&dcd_attach_mutex); 418 if (instance > dcd_max_instance) 419 dcd_max_instance = instance; 420 mutex_exit(&dcd_attach_mutex); 421 422 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "dcdprobe:\n"); 423 424 if (ddi_get_soft_state(dcd_state, instance) != NULL) 425 return (DDI_PROBE_PARTIAL); 426 427 /* 428 * Turn around and call utility probe routine 429 * to see whether we actually have a disk at 430 */ 431 432 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 433 "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC)); 434 435 switch (dcd_probe(devp, NULL_FUNC)) { 436 default: 437 case DCDPROBE_NORESP: 438 case DCDPROBE_NONCCS: 439 case DCDPROBE_NOMEM: 440 case DCDPROBE_FAILURE: 441 case DCDPROBE_BUSY: 442 break; 443 444 case DCDPROBE_EXISTS: 445 /* 446 * Check whether it is a ATA device and then 447 * return SUCCESS. 448 */ 449 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 450 "config %x\n", devp->dcd_ident->dcd_config); 451 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 452 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 453 rval = DDI_PROBE_SUCCESS; 454 } else 455 rval = DDI_PROBE_FAILURE; 456 } else { 457 rval = DDI_PROBE_FAILURE; 458 } 459 break; 460 } 461 dcd_unprobe(devp); 462 463 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 464 "dcdprobe returns %x\n", rval); 465 466 return (rval); 467 } 468 469 470 /*ARGSUSED*/ 471 static int 472 dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 473 { 474 int instance, rval; 475 struct dcd_device *devp; 476 struct dcd_disk *un; 477 struct diskhd *dp; 478 char *pm_comp[] = 479 { "NAME=ide-disk", "0=standby", "1=idle", "2=active" }; 480 481 /* CONSTCOND */ 482 ASSERT(NO_COMPETING_THREADS); 483 484 485 devp = ddi_get_driver_private(devi); 486 instance = ddi_get_instance(devi); 487 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n"); 488 489 switch (cmd) { 490 case DDI_ATTACH: 491 break; 492 493 case DDI_RESUME: 494 if (!(un = ddi_get_soft_state(dcd_state, instance))) 495 return (DDI_FAILURE); 496 mutex_enter(DCD_MUTEX); 497 Restore_state(un); 498 /* 499 * Restore the state which was saved to give the 500 * the right state in un_last_state 501 */ 502 un->un_last_state = un->un_save_state; 503 un->un_throttle = 2; 504 cv_broadcast(&un->un_suspend_cv); 505 /* 506 * Raise the power level of the device to active. 507 */ 508 mutex_exit(DCD_MUTEX); 509 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE); 510 mutex_enter(DCD_MUTEX); 511 512 /* 513 * start unit - if this is a low-activity device 514 * commands in queue will have to wait until new 515 * commands come in, which may take awhile. 516 * Also, we specifically don't check un_ncmds 517 * because we know that there really are no 518 * commands in progress after the unit was suspended 519 * and we could have reached the throttle level, been 520 * suspended, and have no new commands coming in for 521 * awhile. Highly unlikely, but so is the low- 522 * activity disk scenario. 523 */ 524 dp = &un->un_utab; 525 if (dp->b_actf && (dp->b_forw == NULL)) { 526 dcdstart(un); 527 } 528 529 mutex_exit(DCD_MUTEX); 530 return (DDI_SUCCESS); 531 532 default: 533 return (DDI_FAILURE); 534 } 535 536 if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) { 537 return (DDI_FAILURE); 538 } 539 540 if (!(un = (struct dcd_disk *) 541 ddi_get_soft_state(dcd_state, instance))) { 542 return (DDI_FAILURE); 543 } 544 devp->dcd_private = (ataopaque_t)un; 545 546 /* 547 * Add a zero-length attribute to tell the world we support 548 * kernel ioctls (for layered drivers) 549 */ 550 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 551 DDI_KERNEL_IOCTL, NULL, 0); 552 553 /* 554 * Since the dad device does not have the 'reg' property, 555 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 556 * The following code is to tell cpr that this device 557 * does need to be suspended and resumed. 558 */ 559 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 560 "pm-hardware-state", (caddr_t)"needs-suspend-resume"); 561 562 /* 563 * Initialize power management bookkeeping; 564 * Create components - In IDE case there are 3 levels and one 565 * component. The levels being - active, idle, standby. 566 */ 567 568 rval = ddi_prop_update_string_array(DDI_DEV_T_NONE, 569 devi, "pm-components", pm_comp, 4); 570 if (rval == DDI_PROP_SUCCESS) { 571 /* 572 * Ignore the return value of pm_raise_power 573 * Even if we check the return values and 574 * remove the property created above, PM 575 * framework will not honour the change after 576 * first call to pm_raise_power. Hence, the 577 * removal of that property does not help if 578 * pm_raise_power fails. 579 */ 580 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE); 581 } 582 583 ddi_report_dev(devi); 584 585 cmlb_alloc_handle(&un->un_dklbhandle); 586 587 if (cmlb_attach(devi, 588 &dcd_lb_ops, 589 0, 590 0, 591 0, 592 DDI_NT_BLOCK_CHAN, 593 CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8, 594 un->un_dklbhandle, 595 0) != 0) { 596 cmlb_free_handle(&un->un_dklbhandle); 597 dcd_free_softstate(un, devi); 598 return (DDI_FAILURE); 599 } 600 601 mutex_enter(DCD_MUTEX); 602 (void) dcd_validate_geometry(un); 603 604 /* Get devid; create a devid ONLY IF could not get ID */ 605 if (dcd_get_devid(un) == NULL) { 606 /* Create the fab'd devid */ 607 (void) dcd_create_devid(un); 608 } 609 mutex_exit(DCD_MUTEX); 610 611 return (DDI_SUCCESS); 612 } 613 614 static void 615 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi) 616 { 617 struct dcd_device *devp; 618 int instance = ddi_get_instance(devi); 619 620 devp = ddi_get_driver_private(devi); 621 622 if (un) { 623 sema_destroy(&un->un_semoclose); 624 cv_destroy(&un->un_sbuf_cv); 625 cv_destroy(&un->un_state_cv); 626 cv_destroy(&un->un_disk_busy_cv); 627 cv_destroy(&un->un_suspend_cv); 628 629 /* 630 * Deallocate command packet resources. 631 */ 632 if (un->un_sbufp) 633 freerbuf(un->un_sbufp); 634 if (un->un_dp) { 635 kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp)); 636 } 637 /* 638 * Unregister the devid and free devid resources allocated 639 */ 640 ddi_devid_unregister(DCD_DEVINFO); 641 if (un->un_devid) { 642 ddi_devid_free(un->un_devid); 643 un->un_devid = NULL; 644 } 645 646 /* 647 * Delete kstats. Kstats for non CD devices are deleted 648 * in dcdclose. 649 */ 650 if (un->un_stats) { 651 kstat_delete(un->un_stats); 652 } 653 654 } 655 656 /* 657 * Cleanup scsi_device resources. 658 */ 659 ddi_soft_state_free(dcd_state, instance); 660 devp->dcd_private = (ataopaque_t)0; 661 /* unprobe scsi device */ 662 dcd_unprobe(devp); 663 664 /* Remove properties created during attach */ 665 ddi_prop_remove_all(devi); 666 } 667 668 static int 669 dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 670 { 671 int instance; 672 struct dcd_disk *un; 673 clock_t wait_cmds_complete; 674 instance = ddi_get_instance(devi); 675 676 if (!(un = ddi_get_soft_state(dcd_state, instance))) 677 return (DDI_FAILURE); 678 679 switch (cmd) { 680 case DDI_DETACH: 681 return (dcd_dr_detach(devi)); 682 683 case DDI_SUSPEND: 684 mutex_enter(DCD_MUTEX); 685 if (un->un_state == DCD_STATE_SUSPENDED) { 686 mutex_exit(DCD_MUTEX); 687 return (DDI_SUCCESS); 688 } 689 un->un_throttle = 0; 690 /* 691 * Save the last state first 692 */ 693 un->un_save_state = un->un_last_state; 694 695 New_state(un, DCD_STATE_SUSPENDED); 696 697 /* 698 * wait till current operation completed. If we are 699 * in the resource wait state (with an intr outstanding) 700 * then we need to wait till the intr completes and 701 * starts the next cmd. We wait for 702 * DCD_WAIT_CMDS_COMPLETE seconds before failing the 703 * DDI_SUSPEND. 704 */ 705 wait_cmds_complete = ddi_get_lbolt(); 706 wait_cmds_complete += 707 DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000); 708 709 while (un->un_ncmds) { 710 if (cv_timedwait(&un->un_disk_busy_cv, 711 DCD_MUTEX, wait_cmds_complete) == -1) { 712 /* 713 * commands Didn't finish in the 714 * specified time, fail the DDI_SUSPEND. 715 */ 716 DAD_DEBUG2(DCD_DEVINFO, dcd_label, 717 DCD_DEBUG, "dcddetach: SUSPEND " 718 "failed due to outstanding cmds\n"); 719 Restore_state(un); 720 mutex_exit(DCD_MUTEX); 721 return (DDI_FAILURE); 722 } 723 } 724 mutex_exit(DCD_MUTEX); 725 return (DDI_SUCCESS); 726 } 727 return (DDI_FAILURE); 728 } 729 730 /* 731 * The reset entry point gets invoked at the system shutdown time or through 732 * CPR code at system suspend. 733 * Will be flushing the cache and expect this to be last I/O operation to the 734 * disk before system reset/power off. 735 */ 736 /*ARGSUSED*/ 737 static int 738 dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd) 739 { 740 struct dcd_disk *un; 741 int instance; 742 743 instance = ddi_get_instance(dip); 744 745 if (!(un = ddi_get_soft_state(dcd_state, instance))) 746 return (DDI_FAILURE); 747 748 dcd_flush_cache(un); 749 750 return (DDI_SUCCESS); 751 } 752 753 754 static int 755 dcd_dr_detach(dev_info_t *devi) 756 { 757 struct dcd_device *devp; 758 struct dcd_disk *un; 759 760 /* 761 * Get scsi_device structure for this instance. 762 */ 763 if ((devp = ddi_get_driver_private(devi)) == NULL) 764 return (DDI_FAILURE); 765 766 /* 767 * Get dcd_disk structure containing target 'private' information 768 */ 769 un = (struct dcd_disk *)devp->dcd_private; 770 771 /* 772 * Verify there are NO outstanding commands issued to this device. 773 * ie, un_ncmds == 0. 774 * It's possible to have outstanding commands through the physio 775 * code path, even though everything's closed. 776 */ 777 #ifndef lint 778 _NOTE(COMPETING_THREADS_NOW); 779 #endif 780 mutex_enter(DCD_MUTEX); 781 if (un->un_ncmds) { 782 mutex_exit(DCD_MUTEX); 783 _NOTE(NO_COMPETING_THREADS_NOW); 784 return (DDI_FAILURE); 785 } 786 787 mutex_exit(DCD_MUTEX); 788 789 cmlb_detach(un->un_dklbhandle, 0); 790 cmlb_free_handle(&un->un_dklbhandle); 791 792 793 /* 794 * Lower the power state of the device 795 * i.e. the minimum power consumption state - sleep. 796 */ 797 (void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY); 798 799 _NOTE(NO_COMPETING_THREADS_NOW); 800 801 /* 802 * at this point there are no competing threads anymore 803 * release active MT locks and all device resources. 804 */ 805 dcd_free_softstate(un, devi); 806 807 return (DDI_SUCCESS); 808 } 809 810 static int 811 dcdpower(dev_info_t *devi, int component, int level) 812 { 813 struct dcd_pkt *pkt; 814 struct dcd_disk *un; 815 int instance; 816 uchar_t cmd; 817 818 819 instance = ddi_get_instance(devi); 820 821 if (!(un = ddi_get_soft_state(dcd_state, instance)) || 822 (DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) || 823 component != 0) { 824 return (DDI_FAILURE); 825 } 826 827 mutex_enter(DCD_MUTEX); 828 /* 829 * if there are active commands for the device or device will be 830 * active soon. At the same time there is request to lower power 831 * return failure. 832 */ 833 if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) { 834 mutex_exit(DCD_MUTEX); 835 return (DDI_FAILURE); 836 } 837 838 if ((un->un_state == DCD_STATE_OFFLINE) || 839 (un->un_state == DCD_STATE_FATAL)) { 840 mutex_exit(DCD_MUTEX); 841 return (DDI_FAILURE); 842 } 843 844 if (level == DCD_DEVICE_ACTIVE) { 845 /* 846 * No need to fire any command, just set the state structure 847 * to indicate previous state and set the level to active 848 */ 849 un->un_power_level = DCD_DEVICE_ACTIVE; 850 if (un->un_state == DCD_STATE_PM_SUSPENDED) 851 Restore_state(un); 852 mutex_exit(DCD_MUTEX); 853 } else { 854 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 855 NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 856 PKT_CONSISTENT, NULL_FUNC, NULL); 857 858 if (pkt == (struct dcd_pkt *)NULL) { 859 mutex_exit(DCD_MUTEX); 860 return (DDI_FAILURE); 861 } 862 863 switch (level) { 864 case DCD_DEVICE_IDLE: 865 cmd = ATA_IDLE_IMMEDIATE; 866 break; 867 868 case DCD_DEVICE_STANDBY: 869 cmd = ATA_STANDBY_IMMEDIATE; 870 break; 871 } 872 873 makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0); 874 mutex_exit(DCD_MUTEX); 875 /* 876 * Issue the appropriate command 877 */ 878 if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) { 879 dcd_destroy_pkt(pkt); 880 return (DDI_FAILURE); 881 } 882 dcd_destroy_pkt(pkt); 883 mutex_enter(DCD_MUTEX); 884 if (un->un_state != DCD_STATE_PM_SUSPENDED) 885 New_state(un, DCD_STATE_PM_SUSPENDED); 886 un->un_power_level = level; 887 mutex_exit(DCD_MUTEX); 888 } 889 890 return (DDI_SUCCESS); 891 } 892 893 static int 894 dcd_doattach(dev_info_t *devi, int (*canwait)()) 895 { 896 struct dcd_device *devp; 897 struct dcd_disk *un = (struct dcd_disk *)0; 898 int instance; 899 int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP; 900 int rval; 901 char *prop_template = "target%x-dcd-options"; 902 int options; 903 char prop_str[32]; 904 int target; 905 diskaddr_t capacity; 906 907 devp = ddi_get_driver_private(devi); 908 909 /* 910 * Call the routine scsi_probe to do some of the dirty work. 911 * If the INQUIRY command succeeds, the field dcd_inq in the 912 * device structure will be filled in. The dcd_sense structure 913 * will also be allocated. 914 */ 915 916 switch (dcd_probe(devp, canwait)) { 917 default: 918 return (DDI_FAILURE); 919 920 case DCDPROBE_EXISTS: 921 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 922 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 923 rval = DDI_SUCCESS; 924 } else { 925 rval = DDI_FAILURE; 926 goto error; 927 } 928 } else { 929 rval = DDI_FAILURE; 930 goto error; 931 } 932 } 933 934 935 instance = ddi_get_instance(devp->dcd_dev); 936 937 if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) { 938 rval = DDI_FAILURE; 939 goto error; 940 } 941 942 un = ddi_get_soft_state(dcd_state, instance); 943 944 un->un_sbufp = getrbuf(km_flags); 945 if (un->un_sbufp == (struct buf *)NULL) { 946 rval = DDI_FAILURE; 947 goto error; 948 } 949 950 951 un->un_dcd = devp; 952 un->un_power_level = -1; 953 un->un_tgattribute.media_is_writable = 1; 954 955 sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 956 cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL); 957 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 958 /* Initialize power management conditional variable */ 959 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 960 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 961 962 if (un->un_dp == 0) { 963 /* 964 * Assume CCS drive, assume parity, but call 965 * it a CDROM if it is a RODIRECT device. 966 */ 967 un->un_dp = (struct dcd_drivetype *) 968 kmem_zalloc(sizeof (struct dcd_drivetype), km_flags); 969 if (!un->un_dp) { 970 rval = DDI_FAILURE; 971 goto error; 972 } 973 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 974 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 975 un->un_dp->ctype = CTYPE_DISK; 976 } 977 } else { 978 rval = DDI_FAILURE; 979 goto error; 980 } 981 un->un_dp->name = "CCS"; 982 un->un_dp->options = 0; 983 } 984 985 /* 986 * Allow I/O requests at un_secsize offset in multiple of un_secsize. 987 */ 988 un->un_secsize = DEV_BSIZE; 989 990 /* 991 * If the device is not a removable media device, make sure that 992 * that the device is ready, by issuing the another identify but 993 * not needed. Get the capacity from identify data and store here. 994 */ 995 if (dcd_compute_dk_capacity(devp, &capacity) == 0) { 996 un->un_diskcapacity = capacity; 997 un->un_lbasize = DEV_BSIZE; 998 } 999 1000 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n"); 1001 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x", 1002 devp->dcd_ident->dcd_fixcyls, 1003 devp->dcd_ident->dcd_heads); 1004 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,", 1005 devp->dcd_ident->dcd_sectors); 1006 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n", 1007 capacity); 1008 1009 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1010 "dcdprobe: drive selected\n"); 1011 1012 /* 1013 * Check for the property target<n>-dcd-options to find the option 1014 * set by the HBA driver for this target so that we can set the 1015 * Unit structure variable so that we can send commands accordingly. 1016 */ 1017 target = devp->dcd_address->da_target; 1018 (void) sprintf(prop_str, prop_template, target); 1019 options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM, 1020 prop_str, -1); 1021 if (options < 0) { 1022 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1023 "No per target properties"); 1024 } else { 1025 if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) { 1026 un->un_dp->options |= DMA_SUPPORTTED; 1027 un->un_dp->dma_mode = (options >> 3) & 0x03; 1028 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1029 "mode %x\n", un->un_dp->dma_mode); 1030 } else { 1031 un->un_dp->options &= ~DMA_SUPPORTTED; 1032 un->un_dp->pio_mode = options & 0x7; 1033 if (options & DCD_BLOCK_MODE) 1034 un->un_dp->options |= BLOCK_MODE; 1035 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1036 "mode %x\n", un->un_dp->pio_mode); 1037 } 1038 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1039 "options %x,", un->un_dp->options); 1040 } 1041 1042 un->un_throttle = 2; 1043 /* 1044 * set default max_xfer_size - This should depend on whether the 1045 * Block mode is supported by the device or not. 1046 */ 1047 un->un_max_xfer_size = MAX_ATA_XFER_SIZE; 1048 1049 /* 1050 * Set write cache enable softstate 1051 * 1052 * WCE is only supported in ATAPI-4 or higher; for 1053 * lower rev devices, must assume write cache is 1054 * enabled. 1055 */ 1056 mutex_enter(DCD_MUTEX); 1057 un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) || 1058 ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) || 1059 (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0; 1060 mutex_exit(DCD_MUTEX); 1061 1062 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1063 "dcd_doattach returns good\n"); 1064 1065 return (rval); 1066 1067 error: 1068 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n"); 1069 dcd_free_softstate(un, devi); 1070 return (rval); 1071 } 1072 1073 #ifdef NOTNEEDED 1074 /* 1075 * This routine is used to set the block mode of operation by issuing the 1076 * Set Block mode ata command with the maximum block mode possible 1077 */ 1078 dcd_set_multiple(struct dcd_disk *un) 1079 { 1080 int status; 1081 struct udcd_cmd ucmd; 1082 struct dcd_cmd cdb; 1083 dev_t dev; 1084 1085 1086 /* Zero all the required structure */ 1087 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 1088 1089 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd)); 1090 1091 cdb.cmd = ATA_SET_MULTIPLE; 1092 /* 1093 * Here we should pass what needs to go into sector count REGISTER. 1094 * Eventhough this field indicates the number of bytes to read we 1095 * need to specify the block factor in terms of bytes so that it 1096 * will be programmed by the HBA driver into the sector count register. 1097 */ 1098 cdb.size = un->un_lbasize * un->un_dp->block_factor; 1099 1100 cdb.sector_num.lba_num = 0; 1101 cdb.address_mode = ADD_LBA_MODE; 1102 cdb.direction = NO_DATA_XFER; 1103 1104 ucmd.udcd_flags = 0; 1105 ucmd.udcd_cmd = &cdb; 1106 ucmd.udcd_bufaddr = NULL; 1107 ucmd.udcd_buflen = 0; 1108 ucmd.udcd_flags |= UDCD_SILENT; 1109 1110 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 1111 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 1112 1113 1114 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 1115 1116 return (status); 1117 } 1118 /* 1119 * The following routine is used only for setting the transfer mode 1120 * and it is not designed for transferring any other features subcommand. 1121 */ 1122 dcd_set_features(struct dcd_disk *un, uchar_t mode) 1123 { 1124 int status; 1125 struct udcd_cmd ucmd; 1126 struct dcd_cmd cdb; 1127 dev_t dev; 1128 1129 1130 /* Zero all the required structure */ 1131 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 1132 1133 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd)); 1134 1135 cdb.cmd = ATA_SET_FEATURES; 1136 /* 1137 * Here we need to pass what needs to go into the sector count register 1138 * But in the case of SET FEATURES command the value taken in the 1139 * sector count register depends what type of subcommand is 1140 * passed in the features register. Since we have defined the size to 1141 * be the size in bytes in this context it does not indicate bytes 1142 * instead it indicates the mode to be programmed. 1143 */ 1144 cdb.size = un->un_lbasize * mode; 1145 1146 cdb.sector_num.lba_num = 0; 1147 cdb.address_mode = ADD_LBA_MODE; 1148 cdb.direction = NO_DATA_XFER; 1149 cdb.features = ATA_FEATURE_SET_MODE; 1150 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1151 "size %x, features %x, cmd %x\n", 1152 cdb.size, cdb.features, cdb.cmd); 1153 1154 ucmd.udcd_flags = 0; 1155 ucmd.udcd_cmd = &cdb; 1156 ucmd.udcd_bufaddr = NULL; 1157 ucmd.udcd_buflen = 0; 1158 ucmd.udcd_flags |= UDCD_SILENT; 1159 1160 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 1161 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 1162 1163 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 1164 1165 return (status); 1166 } 1167 #endif 1168 1169 /* 1170 * Validate the geometry for this disk, e.g., 1171 * see whether it has a valid label. 1172 */ 1173 static int 1174 dcd_validate_geometry(struct dcd_disk *un) 1175 { 1176 int secsize = 0; 1177 struct dcd_device *devp; 1178 int secdiv; 1179 int rval; 1180 1181 ASSERT(mutex_owned(DCD_MUTEX)); 1182 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1183 "dcd_validate_geometry: started \n"); 1184 1185 if (un->un_lbasize < 0) { 1186 return (DCD_BAD_LABEL); 1187 } 1188 1189 if (un->un_state == DCD_STATE_PM_SUSPENDED) { 1190 mutex_exit(DCD_MUTEX); 1191 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) != 1192 DDI_SUCCESS) { 1193 mutex_enter(DCD_MUTEX); 1194 return (DCD_BAD_LABEL); 1195 } 1196 mutex_enter(DCD_MUTEX); 1197 } 1198 1199 secsize = un->un_secsize; 1200 1201 /* 1202 * take a log base 2 of sector size (sorry) 1203 */ 1204 for (secdiv = 0; secsize = secsize >> 1; secdiv++) 1205 ; 1206 un->un_secdiv = secdiv; 1207 1208 /* 1209 * Only DIRECT ACCESS devices will have Sun labels. 1210 * CD's supposedly have a Sun label, too 1211 */ 1212 1213 devp = un->un_dcd; 1214 1215 if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) && 1216 (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) { 1217 mutex_exit(DCD_MUTEX); 1218 rval = cmlb_validate(un->un_dklbhandle, 0, 0); 1219 mutex_enter(DCD_MUTEX); 1220 if (rval == ENOMEM) 1221 return (DCD_NO_MEM_FOR_LABEL); 1222 else if (rval != 0) 1223 return (DCD_BAD_LABEL); 1224 } else { 1225 /* it should never get here. */ 1226 return (DCD_BAD_LABEL); 1227 } 1228 1229 /* 1230 * take a log base 2 of logical block size 1231 */ 1232 secsize = un->un_lbasize; 1233 for (secdiv = 0; secsize = secsize >> 1; secdiv++) 1234 ; 1235 un->un_lbadiv = secdiv; 1236 1237 /* 1238 * take a log base 2 of the multiple of DEV_BSIZE blocks that 1239 * make up one logical block 1240 */ 1241 secsize = un->un_lbasize >> DEV_BSHIFT; 1242 for (secdiv = 0; secsize = secsize >> 1; secdiv++) 1243 ; 1244 un->un_blknoshift = secdiv; 1245 return (0); 1246 } 1247 1248 /* 1249 * Unix Entry Points 1250 */ 1251 1252 /* ARGSUSED3 */ 1253 static int 1254 dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 1255 { 1256 dev_t dev = *dev_p; 1257 int rval = EIO; 1258 int partmask; 1259 int nodelay = (flag & (FNDELAY | FNONBLOCK)); 1260 int i; 1261 char kstatname[KSTAT_STRLEN]; 1262 diskaddr_t lblocks; 1263 char *partname; 1264 1265 GET_SOFT_STATE(dev); 1266 1267 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1268 "Inside Open flag %x, otyp %x\n", flag, otyp); 1269 1270 if (otyp >= OTYPCNT) { 1271 return (EINVAL); 1272 } 1273 1274 partmask = 1 << part; 1275 1276 /* 1277 * We use a semaphore here in order to serialize 1278 * open and close requests on the device. 1279 */ 1280 sema_p(&un->un_semoclose); 1281 1282 mutex_enter(DCD_MUTEX); 1283 1284 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) { 1285 rval = ENXIO; 1286 goto done; 1287 } 1288 1289 while (un->un_state == DCD_STATE_SUSPENDED) { 1290 cv_wait(&un->un_suspend_cv, DCD_MUTEX); 1291 } 1292 1293 if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) { 1294 mutex_exit(DCD_MUTEX); 1295 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) 1296 != DDI_SUCCESS) { 1297 mutex_enter(DCD_MUTEX); 1298 rval = EIO; 1299 goto done; 1300 } 1301 mutex_enter(DCD_MUTEX); 1302 } 1303 1304 /* 1305 * set make_dcd_cmd() flags and stat_size here since these 1306 * are unlikely to change 1307 */ 1308 un->un_cmd_flags = 0; 1309 1310 un->un_cmd_stat_size = 2; 1311 1312 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n", 1313 (void *)un); 1314 /* 1315 * check for previous exclusive open 1316 */ 1317 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1318 "exclopen=%x, flag=%x, regopen=%x\n", 1319 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 1320 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1321 "Exclusive open flag %x, partmask %x\n", 1322 un->un_exclopen, partmask); 1323 1324 if (un->un_exclopen & (partmask)) { 1325 failed_exclusive: 1326 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1327 "exclusive open fails\n"); 1328 rval = EBUSY; 1329 goto done; 1330 } 1331 1332 if (flag & FEXCL) { 1333 int i; 1334 if (un->un_ocmap.lyropen[part]) { 1335 goto failed_exclusive; 1336 } 1337 for (i = 0; i < (OTYPCNT - 1); i++) { 1338 if (un->un_ocmap.regopen[i] & (partmask)) { 1339 goto failed_exclusive; 1340 } 1341 } 1342 } 1343 if (flag & FWRITE) { 1344 mutex_exit(DCD_MUTEX); 1345 if (dcd_check_wp(dev)) { 1346 sema_v(&un->un_semoclose); 1347 return (EROFS); 1348 } 1349 mutex_enter(DCD_MUTEX); 1350 } 1351 1352 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1353 "Check Write Protect handled\n"); 1354 1355 if (!nodelay) { 1356 mutex_exit(DCD_MUTEX); 1357 if ((rval = dcd_ready_and_valid(dev, un)) != 0) { 1358 rval = EIO; 1359 } 1360 (void) pm_idle_component(DCD_DEVINFO, 0); 1361 /* 1362 * Fail if device is not ready or if the number of disk 1363 * blocks is zero or negative for non CD devices. 1364 */ 1365 if (rval || cmlb_partinfo(un->un_dklbhandle, 1366 part, &lblocks, NULL, &partname, NULL, 0) || 1367 lblocks <= 0) { 1368 rval = EIO; 1369 mutex_enter(DCD_MUTEX); 1370 goto done; 1371 } 1372 mutex_enter(DCD_MUTEX); 1373 } 1374 1375 if (otyp == OTYP_LYR) { 1376 un->un_ocmap.lyropen[part]++; 1377 } else { 1378 un->un_ocmap.regopen[otyp] |= partmask; 1379 } 1380 1381 /* 1382 * set up open and exclusive open flags 1383 */ 1384 if (flag & FEXCL) { 1385 un->un_exclopen |= (partmask); 1386 } 1387 1388 1389 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1390 "open of part %d type %d\n", 1391 part, otyp); 1392 1393 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1394 "Kstats getting updated\n"); 1395 /* 1396 * only create kstats for disks, CD kstats created in dcdattach 1397 */ 1398 _NOTE(NO_COMPETING_THREADS_NOW); 1399 mutex_exit(DCD_MUTEX); 1400 if (un->un_stats == (kstat_t *)0) { 1401 un->un_stats = kstat_create("dad", instance, 1402 NULL, "disk", KSTAT_TYPE_IO, 1, 1403 KSTAT_FLAG_PERSISTENT); 1404 if (un->un_stats) { 1405 un->un_stats->ks_lock = DCD_MUTEX; 1406 kstat_install(un->un_stats); 1407 } 1408 1409 /* 1410 * set up partition statistics for each partition 1411 * with number of blocks > 0 1412 */ 1413 if (!nodelay) { 1414 for (i = 0; i < NDKMAP; i++) { 1415 if ((un->un_pstats[i] == (kstat_t *)0) && 1416 (cmlb_partinfo(un->un_dklbhandle, 1417 i, &lblocks, NULL, &partname, 1418 NULL, 0) == 0) && lblocks > 0) { 1419 (void) sprintf(kstatname, "dad%d,%s", 1420 instance, partname); 1421 un->un_pstats[i] = kstat_create("dad", 1422 instance, 1423 kstatname, 1424 "partition", 1425 KSTAT_TYPE_IO, 1426 1, 1427 KSTAT_FLAG_PERSISTENT); 1428 if (un->un_pstats[i]) { 1429 un->un_pstats[i]->ks_lock = 1430 DCD_MUTEX; 1431 kstat_install(un->un_pstats[i]); 1432 } 1433 } 1434 } 1435 } 1436 /* 1437 * set up error kstats 1438 */ 1439 (void) dcd_create_errstats(un, instance); 1440 } 1441 #ifndef lint 1442 _NOTE(COMPETING_THREADS_NOW); 1443 #endif 1444 1445 sema_v(&un->un_semoclose); 1446 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n"); 1447 return (0); 1448 1449 done: 1450 mutex_exit(DCD_MUTEX); 1451 sema_v(&un->un_semoclose); 1452 return (rval); 1453 1454 } 1455 1456 /* 1457 * Test if disk is ready and has a valid geometry. 1458 */ 1459 static int 1460 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un) 1461 { 1462 int rval = 1; 1463 int g_error = 0; 1464 1465 mutex_enter(DCD_MUTEX); 1466 /* 1467 * cmds outstanding 1468 */ 1469 if (un->un_ncmds == 0) { 1470 (void) dcd_unit_ready(dev); 1471 } 1472 1473 /* 1474 * If device is not yet ready here, inform it is offline 1475 */ 1476 if (un->un_state == DCD_STATE_NORMAL) { 1477 rval = dcd_unit_ready(dev); 1478 if (rval != 0 && rval != EACCES) { 1479 dcd_offline(un, 1); 1480 goto done; 1481 } 1482 } 1483 1484 if (un->un_format_in_progress == 0) { 1485 g_error = dcd_validate_geometry(un); 1486 } 1487 1488 /* 1489 * check if geometry was valid. We don't check the validity of 1490 * geometry for CDROMS. 1491 */ 1492 1493 if (g_error == DCD_BAD_LABEL) { 1494 rval = 1; 1495 goto done; 1496 } 1497 1498 1499 /* 1500 * the state has changed; inform the media watch routines 1501 */ 1502 un->un_mediastate = DKIO_INSERTED; 1503 cv_broadcast(&un->un_state_cv); 1504 rval = 0; 1505 1506 done: 1507 mutex_exit(DCD_MUTEX); 1508 return (rval); 1509 } 1510 1511 1512 /*ARGSUSED*/ 1513 static int 1514 dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 1515 { 1516 uchar_t *cp; 1517 int i; 1518 1519 GET_SOFT_STATE(dev); 1520 1521 1522 if (otyp >= OTYPCNT) 1523 return (ENXIO); 1524 1525 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1526 "close of part %d type %d\n", 1527 part, otyp); 1528 sema_p(&un->un_semoclose); 1529 1530 mutex_enter(DCD_MUTEX); 1531 1532 if (un->un_exclopen & (1<<part)) { 1533 un->un_exclopen &= ~(1<<part); 1534 } 1535 1536 if (otyp == OTYP_LYR) { 1537 un->un_ocmap.lyropen[part] -= 1; 1538 } else { 1539 un->un_ocmap.regopen[otyp] &= ~(1<<part); 1540 } 1541 1542 cp = &un->un_ocmap.chkd[0]; 1543 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 1544 if (*cp != (uchar_t)0) { 1545 break; 1546 } 1547 cp++; 1548 } 1549 1550 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 1551 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n"); 1552 if (un->un_state == DCD_STATE_OFFLINE) { 1553 dcd_offline(un, 1); 1554 } 1555 1556 mutex_exit(DCD_MUTEX); 1557 (void) cmlb_close(un->un_dklbhandle, 0); 1558 1559 _NOTE(NO_COMPETING_THREADS_NOW); 1560 if (un->un_stats) { 1561 kstat_delete(un->un_stats); 1562 un->un_stats = 0; 1563 } 1564 for (i = 0; i < NDKMAP; i++) { 1565 if (un->un_pstats[i]) { 1566 kstat_delete(un->un_pstats[i]); 1567 un->un_pstats[i] = (kstat_t *)0; 1568 } 1569 } 1570 1571 if (un->un_errstats) { 1572 kstat_delete(un->un_errstats); 1573 un->un_errstats = (kstat_t *)0; 1574 } 1575 mutex_enter(DCD_MUTEX); 1576 1577 #ifndef lint 1578 _NOTE(COMPETING_THREADS_NOW); 1579 #endif 1580 } 1581 1582 mutex_exit(DCD_MUTEX); 1583 sema_v(&un->un_semoclose); 1584 return (0); 1585 } 1586 1587 static void 1588 dcd_offline(struct dcd_disk *un, int bechatty) 1589 { 1590 if (bechatty) 1591 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n"); 1592 1593 mutex_exit(DCD_MUTEX); 1594 cmlb_invalidate(un->un_dklbhandle, 0); 1595 mutex_enter(DCD_MUTEX); 1596 } 1597 1598 /* 1599 * Given the device number return the devinfo pointer 1600 * from the scsi_device structure. 1601 */ 1602 /*ARGSUSED*/ 1603 static int 1604 dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 1605 { 1606 dev_t dev; 1607 struct dcd_disk *un; 1608 int instance, error; 1609 1610 1611 switch (infocmd) { 1612 case DDI_INFO_DEVT2DEVINFO: 1613 dev = (dev_t)arg; 1614 instance = DCDUNIT(dev); 1615 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) 1616 return (DDI_FAILURE); 1617 *result = (void *) DCD_DEVINFO; 1618 error = DDI_SUCCESS; 1619 break; 1620 case DDI_INFO_DEVT2INSTANCE: 1621 dev = (dev_t)arg; 1622 instance = DCDUNIT(dev); 1623 *result = (void *)(uintptr_t)instance; 1624 error = DDI_SUCCESS; 1625 break; 1626 default: 1627 error = DDI_FAILURE; 1628 } 1629 return (error); 1630 } 1631 1632 /* 1633 * property operation routine. return the number of blocks for the partition 1634 * in question or forward the request to the propery facilities. 1635 */ 1636 static int 1637 dcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1638 char *name, caddr_t valuep, int *lengthp) 1639 { 1640 struct dcd_disk *un; 1641 1642 if ((un = ddi_get_soft_state(dcd_state, ddi_get_instance(dip))) == NULL) 1643 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1644 name, valuep, lengthp)); 1645 1646 return (cmlb_prop_op(un->un_dklbhandle, 1647 dev, dip, prop_op, mod_flags, name, valuep, lengthp, 1648 DCDPART(dev), NULL)); 1649 } 1650 1651 /* 1652 * These routines perform raw i/o operations. 1653 */ 1654 /*ARGSUSED*/ 1655 void 1656 dcduscsimin(struct buf *bp) 1657 { 1658 1659 } 1660 1661 1662 static void 1663 dcdmin(struct buf *bp) 1664 { 1665 struct dcd_disk *un; 1666 int instance; 1667 minor_t minor = getminor(bp->b_edev); 1668 instance = minor >> DCDUNIT_SHIFT; 1669 un = ddi_get_soft_state(dcd_state, instance); 1670 1671 if (bp->b_bcount > un->un_max_xfer_size) 1672 bp->b_bcount = un->un_max_xfer_size; 1673 } 1674 1675 1676 /* ARGSUSED2 */ 1677 static int 1678 dcdread(dev_t dev, struct uio *uio, cred_t *cred_p) 1679 { 1680 int secmask; 1681 GET_SOFT_STATE(dev); 1682 #ifdef lint 1683 part = part; 1684 #endif /* lint */ 1685 secmask = un->un_secsize - 1; 1686 1687 if (uio->uio_loffset & ((offset_t)(secmask))) { 1688 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1689 "file offset not modulo %d\n", 1690 un->un_secsize); 1691 return (EINVAL); 1692 } else if (uio->uio_iov->iov_len & (secmask)) { 1693 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1694 "transfer length not modulo %d\n", un->un_secsize); 1695 return (EINVAL); 1696 } 1697 return (physio(dcdstrategy, (struct buf *)0, dev, B_READ, dcdmin, uio)); 1698 } 1699 1700 /* ARGSUSED2 */ 1701 static int 1702 dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 1703 { 1704 int secmask; 1705 struct uio *uio = aio->aio_uio; 1706 GET_SOFT_STATE(dev); 1707 #ifdef lint 1708 part = part; 1709 #endif /* lint */ 1710 secmask = un->un_secsize - 1; 1711 1712 if (uio->uio_loffset & ((offset_t)(secmask))) { 1713 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1714 "file offset not modulo %d\n", 1715 un->un_secsize); 1716 return (EINVAL); 1717 } else if (uio->uio_iov->iov_len & (secmask)) { 1718 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1719 "transfer length not modulo %d\n", un->un_secsize); 1720 return (EINVAL); 1721 } 1722 return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio)); 1723 } 1724 1725 /* ARGSUSED2 */ 1726 static int 1727 dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 1728 { 1729 int secmask; 1730 GET_SOFT_STATE(dev); 1731 #ifdef lint 1732 part = part; 1733 #endif /* lint */ 1734 secmask = un->un_secsize - 1; 1735 1736 if (uio->uio_loffset & ((offset_t)(secmask))) { 1737 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1738 "file offset not modulo %d\n", 1739 un->un_secsize); 1740 return (EINVAL); 1741 } else if (uio->uio_iov->iov_len & (secmask)) { 1742 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1743 "transfer length not modulo %d\n", un->un_secsize); 1744 return (EINVAL); 1745 } 1746 return (physio(dcdstrategy, (struct buf *)0, dev, B_WRITE, dcdmin, 1747 uio)); 1748 } 1749 1750 /* ARGSUSED2 */ 1751 static int 1752 dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 1753 { 1754 int secmask; 1755 struct uio *uio = aio->aio_uio; 1756 GET_SOFT_STATE(dev); 1757 #ifdef lint 1758 part = part; 1759 #endif /* lint */ 1760 secmask = un->un_secsize - 1; 1761 1762 if (uio->uio_loffset & ((offset_t)(secmask))) { 1763 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1764 "file offset not modulo %d\n", 1765 un->un_secsize); 1766 return (EINVAL); 1767 } else if (uio->uio_iov->iov_len & (secmask)) { 1768 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1769 "transfer length not modulo %d\n", un->un_secsize); 1770 return (EINVAL); 1771 } 1772 return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio)); 1773 } 1774 1775 /* 1776 * strategy routine 1777 */ 1778 static int 1779 dcdstrategy(struct buf *bp) 1780 { 1781 struct dcd_disk *un; 1782 struct diskhd *dp; 1783 int i; 1784 minor_t minor = getminor(bp->b_edev); 1785 diskaddr_t p_lblksrt; 1786 diskaddr_t lblocks; 1787 diskaddr_t bn; 1788 1789 if ((un = ddi_get_soft_state(dcd_state, 1790 minor >> DCDUNIT_SHIFT)) == NULL || 1791 un->un_state == DCD_STATE_DUMPING || 1792 ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)) { 1793 SET_BP_ERROR(bp, ((un) ? ENXIO : EIO)); 1794 error: 1795 bp->b_resid = bp->b_bcount; 1796 biodone(bp); 1797 return (0); 1798 } 1799 1800 /* 1801 * If the request size (buf->b_bcount)is greater than the size 1802 * (un->un_max_xfer_size) supported by the target driver fail 1803 * the request with EINVAL error code. 1804 * 1805 * We are not supposed to receive requests exceeding 1806 * un->un_max_xfer_size size because the caller is expected to 1807 * check what is the maximum size that is supported by this 1808 * driver either through ioctl or dcdmin routine(which is private 1809 * to this driver). 1810 * But we have seen cases (like meta driver(md))where dcdstrategy 1811 * called with more than supported size and cause data corruption. 1812 */ 1813 1814 if (bp->b_bcount > un->un_max_xfer_size) { 1815 SET_BP_ERROR(bp, EINVAL); 1816 goto error; 1817 } 1818 1819 TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START, 1820 "dcdstrategy_start: bp 0x%p un 0x%p", bp, un); 1821 1822 /* 1823 * Commands may sneak in while we released the mutex in 1824 * DDI_SUSPEND, we should block new commands. 1825 */ 1826 mutex_enter(DCD_MUTEX); 1827 while (un->un_state == DCD_STATE_SUSPENDED) { 1828 cv_wait(&un->un_suspend_cv, DCD_MUTEX); 1829 } 1830 1831 if (un->un_state == DCD_STATE_PM_SUSPENDED) { 1832 mutex_exit(DCD_MUTEX); 1833 (void) pm_idle_component(DCD_DEVINFO, 0); 1834 if (pm_raise_power(DCD_DEVINFO, 0, 1835 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) { 1836 SET_BP_ERROR(bp, EIO); 1837 goto error; 1838 } 1839 mutex_enter(DCD_MUTEX); 1840 } 1841 mutex_exit(DCD_MUTEX); 1842 1843 /* 1844 * Map-in the buffer in case starting address is not word aligned. 1845 */ 1846 1847 if (((uintptr_t)bp->b_un.b_addr) & 0x1) 1848 bp_mapin(bp); 1849 1850 bp->b_flags &= ~(B_DONE|B_ERROR); 1851 bp->b_resid = 0; 1852 bp->av_forw = 0; 1853 1854 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1855 "bp->b_bcount %lx\n", bp->b_bcount); 1856 1857 if (bp != un->un_sbufp) { 1858 validated: if (cmlb_partinfo(un->un_dklbhandle, 1859 minor & DCDPART_MASK, 1860 &lblocks, 1861 &p_lblksrt, 1862 NULL, 1863 NULL, 1864 0) == 0) { 1865 1866 bn = dkblock(bp); 1867 1868 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1869 "dkblock(bp) is %llu\n", bn); 1870 1871 i = 0; 1872 if (bn < 0) { 1873 i = -1; 1874 } else if (bn >= lblocks) { 1875 /* 1876 * For proper comparison, file system block 1877 * number has to be scaled to actual CD 1878 * transfer size. 1879 * Since all the CDROM operations 1880 * that have Sun Labels are in the correct 1881 * block size this will work for CD's. This 1882 * will have to change when we have different 1883 * sector sizes. 1884 * 1885 * if bn == lblocks, 1886 * Not an error, resid == count 1887 */ 1888 if (bn > lblocks) { 1889 i = -1; 1890 } else { 1891 i = 1; 1892 } 1893 } else if (bp->b_bcount & (un->un_secsize-1)) { 1894 /* 1895 * This should really be: 1896 * 1897 * ... if (bp->b_bcount & (un->un_lbasize-1)) 1898 * 1899 */ 1900 i = -1; 1901 } else { 1902 if (!bp->b_bcount) { 1903 printf("Waring : Zero read or Write\n"); 1904 goto error; 1905 } 1906 /* 1907 * sort by absolute block number. 1908 */ 1909 bp->b_resid = bn; 1910 bp->b_resid += p_lblksrt; 1911 /* 1912 * zero out av_back - this will be a signal 1913 * to dcdstart to go and fetch the resources 1914 */ 1915 bp->av_back = NO_PKT_ALLOCATED; 1916 } 1917 1918 /* 1919 * Check to see whether or not we are done 1920 * (with or without errors). 1921 */ 1922 1923 if (i != 0) { 1924 if (i < 0) { 1925 bp->b_flags |= B_ERROR; 1926 } 1927 goto error; 1928 } 1929 } else { 1930 /* 1931 * opened in NDELAY/NONBLOCK mode? 1932 * Check if disk is ready and has a valid geometry 1933 */ 1934 if (dcd_ready_and_valid(bp->b_edev, un) == 0) { 1935 goto validated; 1936 } else { 1937 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 1938 "i/o to invalid geometry\n"); 1939 SET_BP_ERROR(bp, EIO); 1940 goto error; 1941 } 1942 } 1943 } else if (BP_HAS_NO_PKT(bp)) { 1944 struct udcd_cmd *tscmdp; 1945 struct dcd_cmd *tcmdp; 1946 /* 1947 * This indicates that it is a special buffer 1948 * This could be a udcd-cmd and hence call bp_mapin just 1949 * in case that it could be a PIO command issued. 1950 */ 1951 tscmdp = (struct udcd_cmd *)bp->b_forw; 1952 tcmdp = tscmdp->udcd_cmd; 1953 if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) && 1954 (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) && 1955 (tcmdp->cmd != IDENTIFY_DMA) && 1956 (tcmdp->cmd != ATA_FLUSH_CACHE)) { 1957 bp_mapin(bp); 1958 } 1959 } 1960 1961 /* 1962 * We are doing it a bit non-standard. That is, the 1963 * head of the b_actf chain is *not* the active command- 1964 * it is just the head of the wait queue. The reason 1965 * we do this is that the head of the b_actf chain is 1966 * guaranteed to not be moved by disksort(), so that 1967 * our restart command (pointed to by 1968 * b_forw) and the head of the wait queue (b_actf) can 1969 * have resources granted without it getting lost in 1970 * the queue at some later point (where we would have 1971 * to go and look for it). 1972 */ 1973 mutex_enter(DCD_MUTEX); 1974 1975 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); 1976 1977 dp = &un->un_utab; 1978 1979 if (dp->b_actf == NULL) { 1980 dp->b_actf = bp; 1981 dp->b_actl = bp; 1982 } else if ((un->un_state == DCD_STATE_SUSPENDED) && 1983 bp == un->un_sbufp) { 1984 bp->b_actf = dp->b_actf; 1985 dp->b_actf = bp; 1986 } else { 1987 TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START, 1988 "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p", 1989 dp, bp, un); 1990 disksort(dp, bp); 1991 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END, 1992 "dcdstrategy_disksort_end"); 1993 } 1994 1995 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1996 "ncmd %x , throttle %x, forw 0x%p\n", 1997 un->un_ncmds, un->un_throttle, (void *)dp->b_forw); 1998 ASSERT(un->un_ncmds >= 0); 1999 ASSERT(un->un_throttle >= 0); 2000 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) { 2001 dcdstart(un); 2002 } else if (BP_HAS_NO_PKT(dp->b_actf)) { 2003 struct buf *cmd_bp; 2004 2005 cmd_bp = dp->b_actf; 2006 cmd_bp->av_back = ALLOCATING_PKT; 2007 mutex_exit(DCD_MUTEX); 2008 /* 2009 * try and map this one 2010 */ 2011 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START, 2012 "dcdstrategy_small_window_call (begin)"); 2013 2014 make_dcd_cmd(un, cmd_bp, NULL_FUNC); 2015 2016 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END, 2017 "dcdstrategy_small_window_call (end)"); 2018 2019 /* 2020 * there is a small window where the active cmd 2021 * completes before make_dcd_cmd returns. 2022 * consequently, this cmd never gets started so 2023 * we start it from here 2024 */ 2025 mutex_enter(DCD_MUTEX); 2026 if ((un->un_ncmds < un->un_throttle) && 2027 (dp->b_forw == NULL)) { 2028 dcdstart(un); 2029 } 2030 } 2031 mutex_exit(DCD_MUTEX); 2032 2033 done: 2034 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end"); 2035 return (0); 2036 } 2037 2038 2039 /* 2040 * Unit start and Completion 2041 * NOTE: we assume that the caller has at least checked for: 2042 * (un->un_ncmds < un->un_throttle) 2043 * if not, there is no real harm done, dcd_transport() will 2044 * return BUSY 2045 */ 2046 static void 2047 dcdstart(struct dcd_disk *un) 2048 { 2049 int status, sort_key; 2050 struct buf *bp; 2051 struct diskhd *dp; 2052 uchar_t state = un->un_last_state; 2053 2054 TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un); 2055 2056 retry: 2057 ASSERT(mutex_owned(DCD_MUTEX)); 2058 2059 dp = &un->un_utab; 2060 if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) || 2061 (dp->b_forw != NULL)) { 2062 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END, 2063 "dcdstart_end (no work)"); 2064 return; 2065 } 2066 2067 /* 2068 * remove from active queue 2069 */ 2070 dp->b_actf = bp->b_actf; 2071 bp->b_actf = 0; 2072 2073 /* 2074 * increment ncmds before calling dcd_transport because dcdintr 2075 * may be called before we return from dcd_transport! 2076 */ 2077 un->un_ncmds++; 2078 2079 /* 2080 * If measuring stats, mark exit from wait queue and 2081 * entrance into run 'queue' if and only if we are 2082 * going to actually start a command. 2083 * Normally the bp already has a packet at this point 2084 */ 2085 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp); 2086 2087 mutex_exit(DCD_MUTEX); 2088 2089 if (BP_HAS_NO_PKT(bp)) { 2090 make_dcd_cmd(un, bp, dcdrunout); 2091 if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) { 2092 mutex_enter(DCD_MUTEX); 2093 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 2094 2095 bp->b_actf = dp->b_actf; 2096 dp->b_actf = bp; 2097 New_state(un, DCD_STATE_RWAIT); 2098 un->un_ncmds--; 2099 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END, 2100 "dcdstart_end (No Resources)"); 2101 goto done; 2102 2103 } else if (bp->b_flags & B_ERROR) { 2104 mutex_enter(DCD_MUTEX); 2105 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2106 2107 un->un_ncmds--; 2108 bp->b_resid = bp->b_bcount; 2109 if (bp->b_error == 0) { 2110 SET_BP_ERROR(bp, EIO); 2111 } 2112 2113 /* 2114 * restore old state 2115 */ 2116 un->un_state = un->un_last_state; 2117 un->un_last_state = state; 2118 2119 mutex_exit(DCD_MUTEX); 2120 2121 biodone(bp); 2122 mutex_enter(DCD_MUTEX); 2123 if (un->un_state == DCD_STATE_SUSPENDED) { 2124 cv_broadcast(&un->un_disk_busy_cv); 2125 } 2126 2127 if ((un->un_ncmds < un->un_throttle) && 2128 (dp->b_forw == NULL)) { 2129 goto retry; 2130 } else { 2131 goto done; 2132 } 2133 } 2134 } 2135 2136 /* 2137 * Restore resid from the packet, b_resid had been the 2138 * disksort key. 2139 */ 2140 sort_key = bp->b_resid; 2141 bp->b_resid = BP_PKT(bp)->pkt_resid; 2142 BP_PKT(bp)->pkt_resid = 0; 2143 2144 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2145 "bp->b_resid %lx, pkt_resid %lx\n", 2146 bp->b_resid, BP_PKT(bp)->pkt_resid); 2147 2148 /* 2149 * We used to check whether or not to try and link commands here. 2150 * Since we have found that there is no performance improvement 2151 * for linked commands, this has not made much sense. 2152 */ 2153 if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp))) 2154 != TRAN_ACCEPT) { 2155 mutex_enter(DCD_MUTEX); 2156 un->un_ncmds--; 2157 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2158 "transport returned %x\n", status); 2159 if (status == TRAN_BUSY) { 2160 DCD_DO_ERRSTATS(un, dcd_transerrs); 2161 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 2162 dcd_handle_tran_busy(bp, dp, un); 2163 if (un->un_ncmds > 0) { 2164 bp->b_resid = sort_key; 2165 } 2166 } else { 2167 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2168 mutex_exit(DCD_MUTEX); 2169 2170 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2171 "transport rejected (%d)\n", 2172 status); 2173 SET_BP_ERROR(bp, EIO); 2174 bp->b_resid = bp->b_bcount; 2175 if (bp != un->un_sbufp) { 2176 dcd_destroy_pkt(BP_PKT(bp)); 2177 } 2178 biodone(bp); 2179 2180 mutex_enter(DCD_MUTEX); 2181 if (un->un_state == DCD_STATE_SUSPENDED) { 2182 cv_broadcast(&un->un_disk_busy_cv); 2183 } 2184 if ((un->un_ncmds < un->un_throttle) && 2185 (dp->b_forw == NULL)) { 2186 goto retry; 2187 } 2188 } 2189 } else { 2190 mutex_enter(DCD_MUTEX); 2191 2192 if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) { 2193 struct buf *cmd_bp; 2194 2195 cmd_bp = dp->b_actf; 2196 cmd_bp->av_back = ALLOCATING_PKT; 2197 mutex_exit(DCD_MUTEX); 2198 /* 2199 * try and map this one 2200 */ 2201 TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START, 2202 "dcdstart_small_window_start"); 2203 2204 make_dcd_cmd(un, cmd_bp, NULL_FUNC); 2205 2206 TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END, 2207 "dcdstart_small_window_end"); 2208 /* 2209 * there is a small window where the active cmd 2210 * completes before make_dcd_cmd returns. 2211 * consequently, this cmd never gets started so 2212 * we start it from here 2213 */ 2214 mutex_enter(DCD_MUTEX); 2215 if ((un->un_ncmds < un->un_throttle) && 2216 (dp->b_forw == NULL)) { 2217 goto retry; 2218 } 2219 } 2220 } 2221 2222 done: 2223 ASSERT(mutex_owned(DCD_MUTEX)); 2224 TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end"); 2225 } 2226 2227 /* 2228 * make_dcd_cmd: create a pkt 2229 */ 2230 static void 2231 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)()) 2232 { 2233 auto int count, com, direction; 2234 struct dcd_pkt *pkt; 2235 int flags, tval; 2236 2237 _NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp)) 2238 TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START, 2239 "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un); 2240 2241 2242 flags = un->un_cmd_flags; 2243 2244 if (bp != un->un_sbufp) { 2245 int partition = DCDPART(bp->b_edev); 2246 diskaddr_t p_lblksrt; 2247 diskaddr_t lblocks; 2248 long secnt; 2249 uint32_t blkno; 2250 int dkl_nblk, delta; 2251 long resid; 2252 2253 if (cmlb_partinfo(un->un_dklbhandle, 2254 partition, 2255 &lblocks, 2256 &p_lblksrt, 2257 NULL, 2258 NULL, 2259 0) != NULL) { 2260 lblocks = 0; 2261 p_lblksrt = 0; 2262 } 2263 2264 dkl_nblk = (int)lblocks; 2265 2266 /* 2267 * Make sure we don't run off the end of a partition. 2268 * 2269 * Put this test here so that we can adjust b_count 2270 * to accurately reflect the actual amount we are 2271 * goint to transfer. 2272 */ 2273 2274 /* 2275 * First, compute partition-relative block number 2276 */ 2277 blkno = dkblock(bp); 2278 secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv; 2279 count = MIN(secnt, dkl_nblk - blkno); 2280 if (count != secnt) { 2281 /* 2282 * We have an overrun 2283 */ 2284 resid = (secnt - count) << un->un_secdiv; 2285 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2286 "overrun by %ld sectors\n", 2287 secnt - count); 2288 bp->b_bcount -= resid; 2289 } else { 2290 resid = 0; 2291 } 2292 2293 /* 2294 * Adjust block number to absolute 2295 */ 2296 delta = (int)p_lblksrt; 2297 blkno += delta; 2298 2299 mutex_enter(DCD_MUTEX); 2300 /* 2301 * This is for devices having block size different from 2302 * from DEV_BSIZE (e.g. 2K CDROMs). 2303 */ 2304 if (un->un_lbasize != un->un_secsize) { 2305 blkno >>= un->un_blknoshift; 2306 count >>= un->un_blknoshift; 2307 } 2308 mutex_exit(DCD_MUTEX); 2309 2310 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START, 2311 "make_dcd_cmd_init_pkt_call (begin)"); 2312 pkt = dcd_init_pkt(ROUTE, NULL, bp, 2313 (uint32_t)sizeof (struct dcd_cmd), 2314 un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT, 2315 func, (caddr_t)un); 2316 TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END, 2317 "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt); 2318 if (!pkt) { 2319 bp->b_bcount += resid; 2320 bp->av_back = NO_PKT_ALLOCATED; 2321 TRACE_0(TR_FAC_DADA, 2322 TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END, 2323 "make_dcd_cmd_end (NO_PKT_ALLOCATED1)"); 2324 return; 2325 } 2326 if (bp->b_flags & B_READ) { 2327 if ((un->un_dp->options & DMA_SUPPORTTED) == 2328 DMA_SUPPORTTED) { 2329 com = ATA_READ_DMA; 2330 } else { 2331 if (un->un_dp->options & BLOCK_MODE) 2332 com = ATA_READ_MULTIPLE; 2333 else 2334 com = ATA_READ; 2335 } 2336 direction = DATA_READ; 2337 } else { 2338 if ((un->un_dp->options & DMA_SUPPORTTED) == 2339 DMA_SUPPORTTED) { 2340 com = ATA_WRITE_DMA; 2341 } else { 2342 if (un->un_dp->options & BLOCK_MODE) 2343 com = ATA_WRITE_MULTIPLE; 2344 else 2345 com = ATA_WRITE; 2346 } 2347 direction = DATA_WRITE; 2348 } 2349 2350 /* 2351 * Save the resid in the packet, temporarily until 2352 * we transport the command. 2353 */ 2354 pkt->pkt_resid = resid; 2355 2356 makecommand(pkt, flags, com, blkno, ADD_LBA_MODE, 2357 bp->b_bcount, direction, 0); 2358 tval = dcd_io_time; 2359 } else { 2360 2361 struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw; 2362 2363 /* 2364 * set options 2365 */ 2366 if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) { 2367 flags |= FLAG_SILENT; 2368 } 2369 if (scmd->udcd_flags & UDCD_DIAGNOSE) 2370 flags |= FLAG_DIAGNOSE; 2371 2372 if (scmd->udcd_flags & UDCD_NOINTR) 2373 flags |= FLAG_NOINTR; 2374 2375 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 2376 (bp->b_bcount)? bp: NULL, 2377 (uint32_t)sizeof (struct dcd_cmd), 2378 2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un); 2379 2380 if (!pkt) { 2381 bp->av_back = NO_PKT_ALLOCATED; 2382 return; 2383 } 2384 2385 makecommand(pkt, 0, scmd->udcd_cmd->cmd, 2386 scmd->udcd_cmd->sector_num.lba_num, 2387 scmd->udcd_cmd->address_mode, 2388 scmd->udcd_cmd->size, 2389 scmd->udcd_cmd->direction, scmd->udcd_cmd->features); 2390 2391 pkt->pkt_flags = flags; 2392 if (scmd->udcd_timeout == 0) 2393 tval = dcd_io_time; 2394 else 2395 tval = scmd->udcd_timeout; 2396 /* UDAD interface should be decided. */ 2397 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2398 "udcd interface\n"); 2399 } 2400 2401 pkt->pkt_comp = dcdintr; 2402 pkt->pkt_time = tval; 2403 PKT_SET_BP(pkt, bp); 2404 bp->av_back = (struct buf *)pkt; 2405 2406 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end"); 2407 } 2408 2409 /* 2410 * Command completion processing 2411 */ 2412 static void 2413 dcdintr(struct dcd_pkt *pkt) 2414 { 2415 struct dcd_disk *un; 2416 struct buf *bp; 2417 int action; 2418 int status; 2419 2420 bp = PKT_GET_BP(pkt); 2421 un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev)); 2422 2423 TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un); 2424 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n"); 2425 2426 mutex_enter(DCD_MUTEX); 2427 un->un_ncmds--; 2428 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2429 ASSERT(un->un_ncmds >= 0); 2430 2431 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2432 "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt)); 2433 2434 /* 2435 * do most common case first 2436 */ 2437 if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) { 2438 int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp); 2439 2440 if (un->un_state == DCD_STATE_OFFLINE) { 2441 un->un_state = un->un_last_state; 2442 dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE, 2443 (const char *) diskokay); 2444 } 2445 /* 2446 * If the command is a read or a write, and we have 2447 * a non-zero pkt_resid, that is an error. We should 2448 * attempt to retry the operation if possible. 2449 */ 2450 action = COMMAND_DONE; 2451 if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) { 2452 DCD_DO_ERRSTATS(un, dcd_harderrs); 2453 if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) { 2454 PKT_INCR_RETRY_CNT(pkt, 1); 2455 action = QUE_COMMAND; 2456 } else { 2457 /* 2458 * if we have exhausted retries 2459 * a command with a residual is in error in 2460 * this case. 2461 */ 2462 action = COMMAND_DONE_ERROR; 2463 } 2464 dcd_log(DCD_DEVINFO, dcd_label, 2465 CE_WARN, "incomplete %s- %s\n", 2466 (bp->b_flags & B_READ)? "read" : "write", 2467 (action == QUE_COMMAND)? "retrying" : 2468 "giving up"); 2469 } 2470 2471 /* 2472 * pkt_resid will reflect, at this point, a residual 2473 * of how many bytes left to be transferred there were 2474 * from the actual scsi command. Add this to b_resid i.e 2475 * the amount this driver could not see to transfer, 2476 * to get the total number of bytes not transfered. 2477 */ 2478 if (action != QUE_COMMAND) { 2479 bp->b_resid += pkt->pkt_resid; 2480 } 2481 2482 } else if (pkt->pkt_reason != CMD_CMPLT) { 2483 action = dcd_handle_incomplete(un, bp); 2484 } 2485 2486 /* 2487 * If we are in the middle of syncing or dumping, we have got 2488 * here because dcd_transport has called us explictly after 2489 * completing the command in a polled mode. We don't want to 2490 * have a recursive call into dcd_transport again. 2491 */ 2492 if (ddi_in_panic() && (action == QUE_COMMAND)) { 2493 action = COMMAND_DONE_ERROR; 2494 } 2495 2496 /* 2497 * save pkt reason; consecutive failures are not reported unless 2498 * fatal 2499 * do not reset last_pkt_reason when the cmd was retried and 2500 * succeeded because 2501 * there maybe more commands comming back with last_pkt_reason 2502 */ 2503 if ((un->un_last_pkt_reason != pkt->pkt_reason) && 2504 ((pkt->pkt_reason != CMD_CMPLT) || 2505 (PKT_GET_RETRY_CNT(pkt) == 0))) { 2506 un->un_last_pkt_reason = pkt->pkt_reason; 2507 } 2508 2509 switch (action) { 2510 case COMMAND_DONE_ERROR: 2511 error: 2512 if (bp->b_resid == 0) { 2513 bp->b_resid = bp->b_bcount; 2514 } 2515 if (bp->b_error == 0) { 2516 struct dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp; 2517 if (cdbp->cmd == ATA_FLUSH_CACHE && 2518 (pkt->pkt_scbp[0] & STATUS_ATA_ERR) && 2519 (pkt->pkt_scbp[1] & ERR_ABORT)) { 2520 SET_BP_ERROR(bp, ENOTSUP); 2521 un->un_flush_not_supported = 1; 2522 } else { 2523 SET_BP_ERROR(bp, EIO); 2524 } 2525 } 2526 bp->b_flags |= B_ERROR; 2527 /*FALLTHROUGH*/ 2528 case COMMAND_DONE: 2529 dcddone_and_mutex_exit(un, bp); 2530 2531 TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END, 2532 "dcdintr_end (COMMAND_DONE)"); 2533 return; 2534 2535 case QUE_COMMAND: 2536 if (un->un_ncmds >= un->un_throttle) { 2537 struct diskhd *dp = &un->un_utab; 2538 2539 bp->b_actf = dp->b_actf; 2540 dp->b_actf = bp; 2541 2542 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); 2543 2544 mutex_exit(DCD_MUTEX); 2545 goto exit; 2546 } 2547 2548 un->un_ncmds++; 2549 /* reset the pkt reason again */ 2550 pkt->pkt_reason = 0; 2551 DCD_DO_KSTATS(un, kstat_runq_enter, bp); 2552 mutex_exit(DCD_MUTEX); 2553 if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) { 2554 struct diskhd *dp = &un->un_utab; 2555 2556 mutex_enter(DCD_MUTEX); 2557 un->un_ncmds--; 2558 if (status == TRAN_BUSY) { 2559 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 2560 dcd_handle_tran_busy(bp, dp, un); 2561 mutex_exit(DCD_MUTEX); 2562 goto exit; 2563 } 2564 DCD_DO_ERRSTATS(un, dcd_transerrs); 2565 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2566 2567 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2568 "requeue of command fails (%x)\n", status); 2569 SET_BP_ERROR(bp, EIO); 2570 bp->b_resid = bp->b_bcount; 2571 2572 dcddone_and_mutex_exit(un, bp); 2573 goto exit; 2574 } 2575 break; 2576 2577 case JUST_RETURN: 2578 default: 2579 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); 2580 mutex_exit(DCD_MUTEX); 2581 break; 2582 } 2583 2584 exit: 2585 TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end"); 2586 } 2587 2588 2589 /* 2590 * Done with a command. 2591 */ 2592 static void 2593 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp) 2594 { 2595 struct diskhd *dp; 2596 2597 TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un); 2598 2599 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex)); 2600 2601 dp = &un->un_utab; 2602 if (bp == dp->b_forw) { 2603 dp->b_forw = NULL; 2604 } 2605 2606 if (un->un_stats) { 2607 ulong_t n_done = bp->b_bcount - bp->b_resid; 2608 if (bp->b_flags & B_READ) { 2609 IOSP->reads++; 2610 IOSP->nread += n_done; 2611 } else { 2612 IOSP->writes++; 2613 IOSP->nwritten += n_done; 2614 } 2615 } 2616 if (IO_PARTITION_STATS) { 2617 ulong_t n_done = bp->b_bcount - bp->b_resid; 2618 if (bp->b_flags & B_READ) { 2619 IOSP_PARTITION->reads++; 2620 IOSP_PARTITION->nread += n_done; 2621 } else { 2622 IOSP_PARTITION->writes++; 2623 IOSP_PARTITION->nwritten += n_done; 2624 } 2625 } 2626 2627 /* 2628 * Start the next one before releasing resources on this one 2629 */ 2630 if (un->un_state == DCD_STATE_SUSPENDED) { 2631 cv_broadcast(&un->un_disk_busy_cv); 2632 } else if (dp->b_actf && (un->un_ncmds < un->un_throttle) && 2633 (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) { 2634 dcdstart(un); 2635 } 2636 2637 mutex_exit(DCD_MUTEX); 2638 2639 if (bp != un->un_sbufp) { 2640 dcd_destroy_pkt(BP_PKT(bp)); 2641 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2642 "regular done: resid %ld\n", bp->b_resid); 2643 } else { 2644 ASSERT(un->un_sbuf_busy); 2645 } 2646 TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call"); 2647 2648 biodone(bp); 2649 2650 (void) pm_idle_component(DCD_DEVINFO, 0); 2651 2652 TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end"); 2653 } 2654 2655 2656 /* 2657 * reset the disk unless the transport layer has already 2658 * cleared the problem 2659 */ 2660 #define C1 (STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED) 2661 static void 2662 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt) 2663 { 2664 2665 if ((pkt->pkt_statistics & C1) == 0) { 2666 mutex_exit(DCD_MUTEX); 2667 if (!dcd_reset(ROUTE, RESET_ALL)) { 2668 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2669 "Reset failed"); 2670 } 2671 mutex_enter(DCD_MUTEX); 2672 } 2673 } 2674 2675 static int 2676 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp) 2677 { 2678 static char *fail = "ATA transport failed: reason '%s': %s\n"; 2679 static char *notresp = "disk not responding to selection\n"; 2680 int rval = COMMAND_DONE_ERROR; 2681 int action = COMMAND_SOFT_ERROR; 2682 struct dcd_pkt *pkt = BP_PKT(bp); 2683 int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) && 2684 (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT)); 2685 2686 ASSERT(mutex_owned(DCD_MUTEX)); 2687 2688 switch (pkt->pkt_reason) { 2689 2690 case CMD_TIMEOUT: 2691 /* 2692 * This Indicates the already the HBA would have reset 2693 * so Just indicate to retry the command 2694 */ 2695 break; 2696 2697 case CMD_INCOMPLETE: 2698 action = dcd_check_error(un, bp); 2699 DCD_DO_ERRSTATS(un, dcd_transerrs); 2700 if (action == COMMAND_HARD_ERROR) { 2701 (void) dcd_reset_disk(un, pkt); 2702 } 2703 break; 2704 2705 case CMD_FATAL: 2706 /* 2707 * Something drastic has gone wrong 2708 */ 2709 break; 2710 case CMD_DMA_DERR: 2711 case CMD_DATA_OVR: 2712 /* FALLTHROUGH */ 2713 2714 default: 2715 /* 2716 * the target may still be running the command, 2717 * so we should try and reset that target. 2718 */ 2719 DCD_DO_ERRSTATS(un, dcd_transerrs); 2720 if ((pkt->pkt_reason != CMD_RESET) && 2721 (pkt->pkt_reason != CMD_ABORTED)) { 2722 (void) dcd_reset_disk(un, pkt); 2723 } 2724 break; 2725 } 2726 2727 /* 2728 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got 2729 * reset/aborted because another disk on this bus caused it. 2730 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics 2731 * of STAT_TIMEOUT/STAT_DEV_RESET 2732 */ 2733 if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) { 2734 /* To be written : XXX */ 2735 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2736 "Command aborted\n"); 2737 } 2738 2739 if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) { 2740 rval = COMMAND_DONE_ERROR; 2741 } else { 2742 if ((rval == COMMAND_DONE_ERROR) && 2743 (action == COMMAND_SOFT_ERROR) && 2744 ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) { 2745 PKT_INCR_RETRY_CNT(pkt, 1); 2746 rval = QUE_COMMAND; 2747 } 2748 } 2749 2750 if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) { 2751 /* 2752 * Looks like someone turned off this shoebox. 2753 */ 2754 if (un->un_state != DCD_STATE_OFFLINE) { 2755 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2756 (const char *) notresp); 2757 New_state(un, DCD_STATE_OFFLINE); 2758 } 2759 } else if (pkt->pkt_reason == CMD_FATAL) { 2760 /* 2761 * Suppressing the following message for the time being 2762 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2763 * (const char *) notresp); 2764 */ 2765 PKT_INCR_RETRY_CNT(pkt, 6); 2766 rval = COMMAND_DONE_ERROR; 2767 New_state(un, DCD_STATE_FATAL); 2768 } else if (be_chatty) { 2769 int in_panic = ddi_in_panic(); 2770 if (!in_panic || (rval == COMMAND_DONE_ERROR)) { 2771 if (((pkt->pkt_reason != un->un_last_pkt_reason) && 2772 (pkt->pkt_reason != CMD_RESET)) || 2773 (rval == COMMAND_DONE_ERROR) || 2774 (dcd_error_level == DCD_ERR_ALL)) { 2775 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2776 fail, dcd_rname(pkt->pkt_reason), 2777 (rval == COMMAND_DONE_ERROR) ? 2778 "giving up": "retrying command"); 2779 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2780 "retrycount=%x\n", 2781 PKT_GET_RETRY_CNT(pkt)); 2782 } 2783 } 2784 } 2785 error: 2786 return (rval); 2787 } 2788 2789 static int 2790 dcd_check_error(struct dcd_disk *un, struct buf *bp) 2791 { 2792 struct diskhd *dp = &un->un_utab; 2793 struct dcd_pkt *pkt = BP_PKT(bp); 2794 int rval = 0; 2795 unsigned char status; 2796 unsigned char error; 2797 2798 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start"); 2799 ASSERT(mutex_owned(DCD_MUTEX)); 2800 2801 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2802 "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp); 2803 2804 /* 2805 * Here we need to check status first and then if error is indicated 2806 * Then the error register. 2807 */ 2808 2809 status = (pkt->pkt_scbp)[0]; 2810 if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) { 2811 /* 2812 * There has been a Device Fault - reason for such error 2813 * is vendor specific 2814 * Action to be taken is - Indicate error and reset device. 2815 */ 2816 2817 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n"); 2818 rval = COMMAND_HARD_ERROR; 2819 } else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) { 2820 2821 /* 2822 * The sector read or written is marginal and hence ECC 2823 * Correction has been applied. Indicate to repair 2824 * Here we need to probably re-assign based on the badblock 2825 * mapping. 2826 */ 2827 2828 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2829 "Soft Error on block %x\n", 2830 ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num); 2831 rval = COMMAND_SOFT_ERROR; 2832 } else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) { 2833 error = pkt->pkt_scbp[1]; 2834 2835 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2836 "Command:0x%x,Error:0x%x,Status:0x%x\n", 2837 GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp), 2838 error, status); 2839 if ((error & ERR_AMNF) == ERR_AMNF) { 2840 /* Address make not found */ 2841 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2842 "Address Mark Not Found"); 2843 } else if ((error & ERR_TKONF) == ERR_TKONF) { 2844 /* Track 0 Not found */ 2845 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2846 "Track 0 Not found \n"); 2847 } else if ((error & ERR_IDNF) == ERR_IDNF) { 2848 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2849 " ID not found \n"); 2850 } else if ((error & ERR_UNC) == ERR_UNC) { 2851 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2852 "Uncorrectable data Error: Block %x\n", 2853 ((struct dcd_cmd *)pkt->pkt_cdbp)-> 2854 sector_num.lba_num); 2855 } else if ((error & ERR_BBK) == ERR_BBK) { 2856 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2857 "Bad block detected: Block %x\n", 2858 ((struct dcd_cmd *)pkt->pkt_cdbp)-> 2859 sector_num.lba_num); 2860 } else if ((error & ERR_ABORT) == ERR_ABORT) { 2861 /* Aborted Command */ 2862 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2863 " Aborted Command \n"); 2864 } 2865 /* 2866 * Return the soft error so that the command 2867 * will be retried. 2868 */ 2869 rval = COMMAND_SOFT_ERROR; 2870 } 2871 2872 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end"); 2873 return (rval); 2874 } 2875 2876 2877 /* 2878 * System Crash Dump routine 2879 */ 2880 2881 #define NDUMP_RETRIES 5 2882 2883 static int 2884 dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 2885 { 2886 struct dcd_pkt *pkt; 2887 int i; 2888 struct buf local, *bp; 2889 int err; 2890 unsigned char com; 2891 diskaddr_t p_lblksrt; 2892 diskaddr_t lblocks; 2893 2894 GET_SOFT_STATE(dev); 2895 #ifdef lint 2896 part = part; 2897 #endif /* lint */ 2898 2899 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 2900 2901 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) 2902 return (ENXIO); 2903 2904 if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev), 2905 &lblocks, &p_lblksrt, NULL, NULL, 0)) 2906 return (ENXIO); 2907 2908 if (blkno+nblk > lblocks) { 2909 return (EINVAL); 2910 } 2911 2912 2913 if ((un->un_state == DCD_STATE_SUSPENDED) || 2914 (un->un_state == DCD_STATE_PM_SUSPENDED)) { 2915 if (pm_raise_power(DCD_DEVINFO, 0, 2916 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) { 2917 return (EIO); 2918 } 2919 } 2920 2921 /* 2922 * When cpr calls dcddump, we know that dad is in a 2923 * a good state, so no bus reset is required 2924 */ 2925 un->un_throttle = 0; 2926 2927 if ((un->un_state != DCD_STATE_SUSPENDED) && 2928 (un->un_state != DCD_STATE_DUMPING)) { 2929 2930 New_state(un, DCD_STATE_DUMPING); 2931 2932 /* 2933 * Reset the bus. I'd like to not have to do this, 2934 * but this is the safest thing to do... 2935 */ 2936 2937 if (dcd_reset(ROUTE, RESET_ALL) == 0) { 2938 return (EIO); 2939 } 2940 2941 } 2942 2943 blkno += p_lblksrt; 2944 2945 /* 2946 * It should be safe to call the allocator here without 2947 * worrying about being locked for DVMA mapping because 2948 * the address we're passed is already a DVMA mapping 2949 * 2950 * We are also not going to worry about semaphore ownership 2951 * in the dump buffer. Dumping is single threaded at present. 2952 */ 2953 2954 bp = &local; 2955 bzero((caddr_t)bp, sizeof (*bp)); 2956 bp->b_flags = B_BUSY; 2957 bp->b_un.b_addr = addr; 2958 bp->b_bcount = nblk << DEV_BSHIFT; 2959 bp->b_resid = 0; 2960 2961 for (i = 0; i < NDUMP_RETRIES; i++) { 2962 bp->b_flags &= ~B_ERROR; 2963 if ((pkt = dcd_init_pkt(ROUTE, NULL, bp, 2964 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 2965 PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) { 2966 break; 2967 } 2968 if (i == 0) { 2969 if (bp->b_flags & B_ERROR) { 2970 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2971 "no resources for dumping; " 2972 "error code: 0x%x, retrying", 2973 geterror(bp)); 2974 } else { 2975 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2976 "no resources for dumping; retrying"); 2977 } 2978 } else if (i != (NDUMP_RETRIES - 1)) { 2979 if (bp->b_flags & B_ERROR) { 2980 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no " 2981 "resources for dumping; error code: 0x%x, " 2982 "retrying\n", geterror(bp)); 2983 } 2984 } else { 2985 if (bp->b_flags & B_ERROR) { 2986 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, 2987 "no resources for dumping; " 2988 "error code: 0x%x, retries failed, " 2989 "giving up.\n", geterror(bp)); 2990 } else { 2991 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, 2992 "no resources for dumping; " 2993 "retries failed, giving up.\n"); 2994 } 2995 return (EIO); 2996 } 2997 delay(10); 2998 } 2999 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 3000 com = ATA_WRITE_DMA; 3001 } else { 3002 if (un->un_dp->options & BLOCK_MODE) 3003 com = ATA_WRITE_MULTIPLE; 3004 else 3005 com = ATA_WRITE; 3006 } 3007 3008 makecommand(pkt, 0, com, blkno, ADD_LBA_MODE, 3009 (int)nblk*un->un_secsize, DATA_WRITE, 0); 3010 3011 for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) { 3012 3013 if (dcd_poll(pkt) == 0) { 3014 switch (SCBP_C(pkt)) { 3015 case STATUS_GOOD: 3016 if (pkt->pkt_resid == 0) { 3017 err = 0; 3018 } 3019 break; 3020 case STATUS_ATA_BUSY: 3021 (void) dcd_reset(ROUTE, RESET_TARGET); 3022 break; 3023 default: 3024 mutex_enter(DCD_MUTEX); 3025 (void) dcd_reset_disk(un, pkt); 3026 mutex_exit(DCD_MUTEX); 3027 break; 3028 } 3029 } else if (i > NDUMP_RETRIES/2) { 3030 (void) dcd_reset(ROUTE, RESET_ALL); 3031 } 3032 3033 } 3034 dcd_destroy_pkt(pkt); 3035 return (err); 3036 } 3037 3038 /* 3039 * This routine implements the ioctl calls. It is called 3040 * from the device switch at normal priority. 3041 */ 3042 /* ARGSUSED3 */ 3043 static int 3044 dcdioctl(dev_t dev, int cmd, intptr_t arg, int flag, 3045 cred_t *cred_p, int *rval_p) 3046 { 3047 auto int32_t data[512 / (sizeof (int32_t))]; 3048 struct dk_cinfo *info; 3049 struct dk_minfo media_info; 3050 struct udcd_cmd *scmd; 3051 int i, err; 3052 enum uio_seg uioseg = 0; 3053 enum dkio_state state = 0; 3054 #ifdef _MULTI_DATAMODEL 3055 struct dadkio_rwcmd rwcmd; 3056 #endif 3057 struct dadkio_rwcmd32 rwcmd32; 3058 struct dcd_cmd dcdcmd; 3059 3060 GET_SOFT_STATE(dev); 3061 #ifdef lint 3062 part = part; 3063 state = state; 3064 uioseg = uioseg; 3065 #endif /* lint */ 3066 3067 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3068 "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg); 3069 3070 bzero((caddr_t)data, sizeof (data)); 3071 3072 switch (cmd) { 3073 3074 #ifdef DCDDEBUG 3075 /* 3076 * Following ioctl are for testing RESET/ABORTS 3077 */ 3078 #define DKIOCRESET (DKIOC|14) 3079 #define DKIOCABORT (DKIOC|15) 3080 3081 case DKIOCRESET: 3082 if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag)) 3083 return (EFAULT); 3084 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3085 "DKIOCRESET: data = 0x%x\n", data[0]); 3086 if (dcd_reset(ROUTE, data[0])) { 3087 return (0); 3088 } else { 3089 return (EIO); 3090 } 3091 case DKIOCABORT: 3092 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3093 "DKIOCABORT:\n"); 3094 if (dcd_abort(ROUTE, (struct dcd_pkt *)0)) { 3095 return (0); 3096 } else { 3097 return (EIO); 3098 } 3099 #endif 3100 3101 case DKIOCINFO: 3102 /* 3103 * Controller Information 3104 */ 3105 info = (struct dk_cinfo *)data; 3106 3107 mutex_enter(DCD_MUTEX); 3108 switch (un->un_dp->ctype) { 3109 default: 3110 info->dki_ctype = DKC_DIRECT; 3111 break; 3112 } 3113 mutex_exit(DCD_MUTEX); 3114 info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO)); 3115 (void) strcpy(info->dki_cname, 3116 ddi_get_name(ddi_get_parent(DCD_DEVINFO))); 3117 /* 3118 * Unit Information 3119 */ 3120 info->dki_unit = ddi_get_instance(DCD_DEVINFO); 3121 info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3); 3122 (void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO)); 3123 info->dki_flags = DKI_FMTVOL; 3124 info->dki_partition = DCDPART(dev); 3125 3126 /* 3127 * Max Transfer size of this device in blocks 3128 */ 3129 info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE; 3130 3131 /* 3132 * We can't get from here to there yet 3133 */ 3134 info->dki_addr = 0; 3135 info->dki_space = 0; 3136 info->dki_prio = 0; 3137 info->dki_vec = 0; 3138 3139 i = sizeof (struct dk_cinfo); 3140 if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag)) 3141 return (EFAULT); 3142 else 3143 return (0); 3144 3145 case DKIOCGMEDIAINFO: 3146 /* 3147 * As dad target driver is used for IDE disks only 3148 * Can keep the return value hardcoded to FIXED_DISK 3149 */ 3150 media_info.dki_media_type = DK_FIXED_DISK; 3151 3152 mutex_enter(DCD_MUTEX); 3153 media_info.dki_lbsize = un->un_lbasize; 3154 media_info.dki_capacity = un->un_diskcapacity; 3155 mutex_exit(DCD_MUTEX); 3156 3157 if (ddi_copyout(&media_info, (caddr_t)arg, 3158 sizeof (struct dk_minfo), flag)) 3159 return (EFAULT); 3160 else 3161 return (0); 3162 3163 case DKIOCGGEOM: 3164 case DKIOCGVTOC: 3165 case DKIOCGETEFI: 3166 3167 mutex_enter(DCD_MUTEX); 3168 if (un->un_ncmds == 0) { 3169 if ((err = dcd_unit_ready(dev)) != 0) { 3170 mutex_exit(DCD_MUTEX); 3171 return (err); 3172 } 3173 } 3174 3175 mutex_exit(DCD_MUTEX); 3176 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd, 3177 arg, flag, cred_p, rval_p, 0); 3178 return (err); 3179 3180 case DKIOCGAPART: 3181 case DKIOCSAPART: 3182 case DKIOCSGEOM: 3183 case DKIOCSVTOC: 3184 case DKIOCSETEFI: 3185 case DKIOCPARTITION: 3186 case DKIOCPARTINFO: 3187 case DKIOCGMBOOT: 3188 case DKIOCSMBOOT: 3189 3190 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd, 3191 arg, flag, cred_p, rval_p, 0); 3192 return (err); 3193 3194 case DIOCTL_RWCMD: 3195 if (drv_priv(cred_p) != 0) { 3196 return (EPERM); 3197 } 3198 3199 #ifdef _MULTI_DATAMODEL 3200 switch (ddi_model_convert_from(flag & FMODELS)) { 3201 case DDI_MODEL_NONE: 3202 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd, 3203 sizeof (struct dadkio_rwcmd), flag)) { 3204 return (EFAULT); 3205 } 3206 rwcmd32.cmd = rwcmd.cmd; 3207 rwcmd32.flags = rwcmd.flags; 3208 rwcmd32.blkaddr = rwcmd.blkaddr; 3209 rwcmd32.buflen = rwcmd.buflen; 3210 rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr; 3211 break; 3212 case DDI_MODEL_ILP32: 3213 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32, 3214 sizeof (struct dadkio_rwcmd32), flag)) { 3215 return (EFAULT); 3216 } 3217 break; 3218 } 3219 #else 3220 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32, 3221 sizeof (struct dadkio_rwcmd32), flag)) { 3222 return (EFAULT); 3223 } 3224 #endif 3225 mutex_enter(DCD_MUTEX); 3226 3227 uioseg = UIO_SYSSPACE; 3228 scmd = (struct udcd_cmd *)data; 3229 scmd->udcd_cmd = &dcdcmd; 3230 /* 3231 * Convert the dadkio_rwcmd structure to udcd_cmd so that 3232 * it can take the normal path to get the io done 3233 */ 3234 if (rwcmd32.cmd == DADKIO_RWCMD_READ) { 3235 if ((un->un_dp->options & DMA_SUPPORTTED) == 3236 DMA_SUPPORTTED) 3237 scmd->udcd_cmd->cmd = ATA_READ_DMA; 3238 else 3239 scmd->udcd_cmd->cmd = ATA_READ; 3240 scmd->udcd_cmd->address_mode = ADD_LBA_MODE; 3241 scmd->udcd_cmd->direction = DATA_READ; 3242 scmd->udcd_flags |= UDCD_READ|UDCD_SILENT; 3243 } else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) { 3244 if ((un->un_dp->options & DMA_SUPPORTTED) == 3245 DMA_SUPPORTTED) 3246 scmd->udcd_cmd->cmd = ATA_WRITE_DMA; 3247 else 3248 scmd->udcd_cmd->cmd = ATA_WRITE; 3249 scmd->udcd_cmd->direction = DATA_WRITE; 3250 scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT; 3251 } else { 3252 mutex_exit(DCD_MUTEX); 3253 return (EINVAL); 3254 } 3255 3256 scmd->udcd_cmd->address_mode = ADD_LBA_MODE; 3257 scmd->udcd_cmd->features = 0; 3258 scmd->udcd_cmd->size = rwcmd32.buflen; 3259 scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr; 3260 scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr; 3261 scmd->udcd_buflen = rwcmd32.buflen; 3262 scmd->udcd_timeout = (ushort_t)dcd_io_time; 3263 scmd->udcd_resid = 0ULL; 3264 scmd->udcd_status = 0; 3265 scmd->udcd_error_reg = 0; 3266 scmd->udcd_status_reg = 0; 3267 3268 mutex_exit(DCD_MUTEX); 3269 3270 i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE); 3271 mutex_enter(DCD_MUTEX); 3272 /* 3273 * After return convert the status from scmd to 3274 * dadkio_status 3275 */ 3276 (void) dcd_translate(&(rwcmd32.status), scmd); 3277 rwcmd32.status.resid = scmd->udcd_resid; 3278 mutex_exit(DCD_MUTEX); 3279 3280 #ifdef _MULTI_DATAMODEL 3281 switch (ddi_model_convert_from(flag & FMODELS)) { 3282 case DDI_MODEL_NONE: { 3283 int counter; 3284 rwcmd.status.status = rwcmd32.status.status; 3285 rwcmd.status.resid = rwcmd32.status.resid; 3286 rwcmd.status.failed_blk_is_valid = 3287 rwcmd32.status.failed_blk_is_valid; 3288 rwcmd.status.failed_blk = rwcmd32.status.failed_blk; 3289 rwcmd.status.fru_code_is_valid = 3290 rwcmd32.status.fru_code_is_valid; 3291 rwcmd.status.fru_code = rwcmd32.status.fru_code; 3292 for (counter = 0; 3293 counter < DADKIO_ERROR_INFO_LEN; counter++) 3294 rwcmd.status.add_error_info[counter] = 3295 rwcmd32.status.add_error_info[counter]; 3296 } 3297 /* Copy out the result back to the user program */ 3298 if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg, 3299 sizeof (struct dadkio_rwcmd), flag)) { 3300 if (i != 0) { 3301 i = EFAULT; 3302 } 3303 } 3304 break; 3305 case DDI_MODEL_ILP32: 3306 /* Copy out the result back to the user program */ 3307 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg, 3308 sizeof (struct dadkio_rwcmd32), flag)) { 3309 if (i != 0) { 3310 i = EFAULT; 3311 } 3312 } 3313 break; 3314 } 3315 #else 3316 /* Copy out the result back to the user program */ 3317 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg, 3318 sizeof (struct dadkio_rwcmd32), flag)) { 3319 if (i != 0) 3320 i = EFAULT; 3321 } 3322 #endif 3323 return (i); 3324 3325 case UDCDCMD: { 3326 #ifdef _MULTI_DATAMODEL 3327 /* 3328 * For use when a 32 bit app makes a call into a 3329 * 64 bit ioctl 3330 */ 3331 struct udcd_cmd32 udcd_cmd_32_for_64; 3332 struct udcd_cmd32 *ucmd32 = &udcd_cmd_32_for_64; 3333 model_t model; 3334 #endif /* _MULTI_DATAMODEL */ 3335 3336 if (drv_priv(cred_p) != 0) { 3337 return (EPERM); 3338 } 3339 3340 scmd = (struct udcd_cmd *)data; 3341 3342 #ifdef _MULTI_DATAMODEL 3343 switch (model = ddi_model_convert_from(flag & FMODELS)) { 3344 case DDI_MODEL_ILP32: 3345 if (ddi_copyin((caddr_t)arg, ucmd32, 3346 sizeof (struct udcd_cmd32), flag)) { 3347 return (EFAULT); 3348 } 3349 /* 3350 * Convert the ILP32 uscsi data from the 3351 * application to LP64 for internal use. 3352 */ 3353 udcd_cmd32toudcd_cmd(ucmd32, scmd); 3354 break; 3355 case DDI_MODEL_NONE: 3356 if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd), 3357 flag)) { 3358 return (EFAULT); 3359 } 3360 break; 3361 } 3362 #else /* ! _MULTI_DATAMODEL */ 3363 if (ddi_copyin((caddr_t)arg, (caddr_t)scmd, 3364 sizeof (*scmd), flag)) { 3365 return (EFAULT); 3366 } 3367 #endif /* ! _MULTI_DATAMODEL */ 3368 3369 scmd->udcd_flags &= ~UDCD_NOINTR; 3370 uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE; 3371 3372 i = dcdioctl_cmd(dev, scmd, uioseg, uioseg); 3373 #ifdef _MULTI_DATAMODEL 3374 switch (model) { 3375 case DDI_MODEL_ILP32: 3376 /* 3377 * Convert back to ILP32 before copyout to the 3378 * application 3379 */ 3380 udcd_cmdtoudcd_cmd32(scmd, ucmd32); 3381 if (ddi_copyout(ucmd32, (caddr_t)arg, 3382 sizeof (*ucmd32), flag)) { 3383 if (i != 0) 3384 i = EFAULT; 3385 } 3386 break; 3387 case DDI_MODEL_NONE: 3388 if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd), 3389 flag)) { 3390 if (i != 0) 3391 i = EFAULT; 3392 } 3393 break; 3394 } 3395 #else /* ! _MULTI_DATAMODE */ 3396 if (ddi_copyout((caddr_t)scmd, (caddr_t)arg, 3397 sizeof (*scmd), flag)) { 3398 if (i != 0) 3399 i = EFAULT; 3400 } 3401 #endif 3402 return (i); 3403 } 3404 case DKIOCFLUSHWRITECACHE: { 3405 struct dk_callback *dkc = (struct dk_callback *)arg; 3406 struct dcd_pkt *pkt; 3407 struct buf *bp; 3408 int is_sync = 1; 3409 3410 mutex_enter(DCD_MUTEX); 3411 if (un->un_flush_not_supported || 3412 ! un->un_write_cache_enabled) { 3413 i = un->un_flush_not_supported ? ENOTSUP : 0; 3414 mutex_exit(DCD_MUTEX); 3415 /* 3416 * If a callback was requested: a callback will 3417 * always be done if the caller saw the 3418 * DKIOCFLUSHWRITECACHE ioctl return 0, and 3419 * never done if the caller saw the ioctl return 3420 * an error. 3421 */ 3422 if ((flag & FKIOCTL) && dkc != NULL && 3423 dkc->dkc_callback != NULL) { 3424 (*dkc->dkc_callback)(dkc->dkc_cookie, i); 3425 /* 3426 * Did callback and reported error. 3427 * Since we did a callback, ioctl 3428 * should return 0. 3429 */ 3430 i = 0; 3431 } 3432 return (i); 3433 } 3434 3435 /* 3436 * Get the special buffer 3437 */ 3438 while (un->un_sbuf_busy) { 3439 cv_wait(&un->un_sbuf_cv, DCD_MUTEX); 3440 } 3441 un->un_sbuf_busy = 1; 3442 bp = un->un_sbufp; 3443 mutex_exit(DCD_MUTEX); 3444 3445 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 3446 NULL, (uint32_t)sizeof (struct dcd_cmd), 3447 2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un); 3448 ASSERT(pkt != NULL); 3449 3450 makecommand(pkt, un->un_cmd_flags | FLAG_SILENT, 3451 ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0); 3452 3453 pkt->pkt_comp = dcdintr; 3454 pkt->pkt_time = DCD_FLUSH_TIME; 3455 PKT_SET_BP(pkt, bp); 3456 3457 bp->av_back = (struct buf *)pkt; 3458 bp->b_forw = NULL; 3459 bp->b_flags = B_BUSY; 3460 bp->b_error = 0; 3461 bp->b_edev = dev; 3462 bp->b_dev = cmpdev(dev); 3463 bp->b_bcount = 0; 3464 bp->b_blkno = 0; 3465 bp->b_un.b_addr = 0; 3466 bp->b_iodone = NULL; 3467 bp->b_list = NULL; 3468 3469 if ((flag & FKIOCTL) && dkc != NULL && 3470 dkc->dkc_callback != NULL) { 3471 struct dk_callback *dkc2 = (struct dk_callback *) 3472 kmem_zalloc(sizeof (*dkc2), KM_SLEEP); 3473 bcopy(dkc, dkc2, sizeof (*dkc2)); 3474 3475 bp->b_list = (struct buf *)dkc2; 3476 bp->b_iodone = dcdflushdone; 3477 is_sync = 0; 3478 } 3479 3480 (void) dcdstrategy(bp); 3481 3482 i = 0; 3483 if (is_sync) { 3484 i = biowait(bp); 3485 (void) dcdflushdone(bp); 3486 } 3487 3488 return (i); 3489 } 3490 default: 3491 break; 3492 } 3493 return (ENOTTY); 3494 } 3495 3496 3497 static int 3498 dcdflushdone(struct buf *bp) 3499 { 3500 struct dcd_disk *un = ddi_get_soft_state(dcd_state, 3501 DCDUNIT(bp->b_edev)); 3502 struct dcd_pkt *pkt = BP_PKT(bp); 3503 struct dk_callback *dkc = (struct dk_callback *)bp->b_list; 3504 3505 ASSERT(un != NULL); 3506 ASSERT(bp == un->un_sbufp); 3507 ASSERT(pkt != NULL); 3508 3509 dcd_destroy_pkt(pkt); 3510 bp->av_back = NO_PKT_ALLOCATED; 3511 3512 if (dkc != NULL) { 3513 ASSERT(bp->b_iodone != NULL); 3514 (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp)); 3515 kmem_free(dkc, sizeof (*dkc)); 3516 bp->b_iodone = NULL; 3517 bp->b_list = NULL; 3518 } 3519 3520 /* 3521 * Tell anybody who cares that the buffer is now free 3522 */ 3523 mutex_enter(DCD_MUTEX); 3524 un->un_sbuf_busy = 0; 3525 cv_signal(&un->un_sbuf_cv); 3526 mutex_exit(DCD_MUTEX); 3527 return (0); 3528 } 3529 3530 /* 3531 * dcdrunout: 3532 * the callback function for resource allocation 3533 * 3534 * XXX it would be preferable that dcdrunout() scans the whole 3535 * list for possible candidates for dcdstart(); this avoids 3536 * that a bp at the head of the list whose request cannot be 3537 * satisfied is retried again and again 3538 */ 3539 /*ARGSUSED*/ 3540 static int 3541 dcdrunout(caddr_t arg) 3542 { 3543 int serviced; 3544 struct dcd_disk *un; 3545 struct diskhd *dp; 3546 3547 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p", 3548 arg); 3549 serviced = 1; 3550 3551 un = (struct dcd_disk *)arg; 3552 dp = &un->un_utab; 3553 3554 /* 3555 * We now support passing a structure to the callback 3556 * routine. 3557 */ 3558 ASSERT(un != NULL); 3559 mutex_enter(DCD_MUTEX); 3560 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) { 3561 dcdstart(un); 3562 } 3563 if (un->un_state == DCD_STATE_RWAIT) { 3564 serviced = 0; 3565 } 3566 mutex_exit(DCD_MUTEX); 3567 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END, 3568 "dcdrunout_end: serviced %d", serviced); 3569 return (serviced); 3570 } 3571 3572 3573 /* 3574 * This routine called to see whether unit is (still) there. Must not 3575 * be called when un->un_sbufp is in use, and must not be called with 3576 * an unattached disk. Soft state of disk is restored to what it was 3577 * upon entry- up to caller to set the correct state. 3578 * 3579 * We enter with the disk mutex held. 3580 */ 3581 3582 /* ARGSUSED0 */ 3583 static int 3584 dcd_unit_ready(dev_t dev) 3585 { 3586 #ifndef lint 3587 auto struct udcd_cmd dcmd, *com = &dcmd; 3588 auto struct dcd_cmd cmdblk; 3589 #endif 3590 int error; 3591 #ifndef lint 3592 GET_SOFT_STATE(dev); 3593 #endif 3594 3595 /* 3596 * Now that we protect the special buffer with 3597 * a mutex, we could probably do a mutex_tryenter 3598 * on it here and return failure if it were held... 3599 */ 3600 3601 error = 0; 3602 return (error); 3603 } 3604 3605 /* ARGSUSED0 */ 3606 int 3607 dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace, 3608 enum uio_seg dataspace) 3609 { 3610 3611 struct buf *bp; 3612 struct udcd_cmd *scmd; 3613 struct dcd_pkt *pkt; 3614 int err, rw; 3615 caddr_t cdb; 3616 int flags = 0; 3617 3618 GET_SOFT_STATE(devp); 3619 3620 #ifdef lint 3621 part = part; 3622 #endif 3623 3624 /* 3625 * Is this a request to reset the bus? 3626 * if so, we need to do reseting. 3627 */ 3628 3629 if (in->udcd_flags & UDCD_RESET) { 3630 int flag = RESET_TARGET; 3631 err = dcd_reset(ROUTE, flag) ? 0: EIO; 3632 return (err); 3633 } 3634 3635 scmd = in; 3636 3637 3638 /* Do some sanity checks */ 3639 if (scmd->udcd_buflen <= 0) { 3640 if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) { 3641 return (EINVAL); 3642 } else { 3643 scmd->udcd_buflen = 0; 3644 } 3645 } 3646 3647 /* Make a copy of the dcd_cmd passed */ 3648 cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP); 3649 if (cdbspace == UIO_SYSSPACE) { 3650 flags |= FKIOCTL; 3651 } 3652 3653 if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd), 3654 flags)) { 3655 kmem_free(cdb, sizeof (struct dcd_cmd)); 3656 return (EFAULT); 3657 } 3658 scmd = (struct udcd_cmd *)kmem_alloc(sizeof (*scmd), KM_SLEEP); 3659 bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd)); 3660 scmd->udcd_cmd = (struct dcd_cmd *)cdb; 3661 rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE; 3662 3663 3664 /* 3665 * Get the special buffer 3666 */ 3667 3668 mutex_enter(DCD_MUTEX); 3669 while (un->un_sbuf_busy) { 3670 if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) { 3671 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd)); 3672 kmem_free((caddr_t)scmd, sizeof (*scmd)); 3673 mutex_exit(DCD_MUTEX); 3674 return (EINTR); 3675 } 3676 } 3677 3678 un->un_sbuf_busy = 1; 3679 bp = un->un_sbufp; 3680 mutex_exit(DCD_MUTEX); 3681 3682 3683 /* 3684 * If we are going to do actual I/O, let physio do all the 3685 * things 3686 */ 3687 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3688 "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen); 3689 3690 if (scmd->udcd_buflen) { 3691 auto struct iovec aiov; 3692 auto struct uio auio; 3693 struct uio *uio = &auio; 3694 3695 bzero((caddr_t)&auio, sizeof (struct uio)); 3696 bzero((caddr_t)&aiov, sizeof (struct iovec)); 3697 3698 aiov.iov_base = scmd->udcd_bufaddr; 3699 aiov.iov_len = scmd->udcd_buflen; 3700 3701 uio->uio_iov = &aiov; 3702 uio->uio_iovcnt = 1; 3703 uio->uio_resid = scmd->udcd_buflen; 3704 uio->uio_segflg = dataspace; 3705 3706 /* 3707 * Let physio do the rest... 3708 */ 3709 bp->av_back = NO_PKT_ALLOCATED; 3710 bp->b_forw = (struct buf *)scmd; 3711 err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio); 3712 } else { 3713 /* 3714 * We have to mimic what physio would do here. 3715 */ 3716 bp->av_back = NO_PKT_ALLOCATED; 3717 bp->b_forw = (struct buf *)scmd; 3718 bp->b_flags = B_BUSY | rw; 3719 bp->b_edev = devp; 3720 bp->b_dev = cmpdev(devp); 3721 bp->b_bcount = bp->b_blkno = 0; 3722 (void) dcdstrategy(bp); 3723 err = biowait(bp); 3724 } 3725 3726 done: 3727 if ((pkt = BP_PKT(bp)) != NULL) { 3728 bp->av_back = NO_PKT_ALLOCATED; 3729 /* we need to update the completion status of udcd command */ 3730 in->udcd_resid = bp->b_resid; 3731 in->udcd_status_reg = SCBP_C(pkt); 3732 /* XXX: we need to give error_reg also */ 3733 dcd_destroy_pkt(pkt); 3734 } 3735 /* 3736 * Tell anybody who cares that the buffer is now free 3737 */ 3738 mutex_enter(DCD_MUTEX); 3739 un->un_sbuf_busy = 0; 3740 cv_signal(&un->un_sbuf_cv); 3741 mutex_exit(DCD_MUTEX); 3742 3743 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd)); 3744 kmem_free((caddr_t)scmd, sizeof (*scmd)); 3745 return (err); 3746 } 3747 3748 static void 3749 dcdudcdmin(struct buf *bp) 3750 { 3751 3752 #ifdef lint 3753 bp = bp; 3754 #endif 3755 3756 } 3757 3758 /* 3759 * restart a cmd from timeout() context 3760 * 3761 * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero 3762 * a restart timeout request has been issued and no new timeouts should 3763 * be requested. b_forw is reset when the cmd eventually completes in 3764 * dcddone_and_mutex_exit() 3765 */ 3766 void 3767 dcdrestart(void *arg) 3768 { 3769 struct dcd_disk *un = (struct dcd_disk *)arg; 3770 struct buf *bp; 3771 int status; 3772 3773 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n"); 3774 3775 mutex_enter(DCD_MUTEX); 3776 bp = un->un_utab.b_forw; 3777 if (bp) { 3778 un->un_ncmds++; 3779 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp); 3780 } 3781 3782 3783 if (bp) { 3784 struct dcd_pkt *pkt = BP_PKT(bp); 3785 3786 mutex_exit(DCD_MUTEX); 3787 3788 pkt->pkt_flags = 0; 3789 3790 if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) { 3791 mutex_enter(DCD_MUTEX); 3792 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 3793 un->un_ncmds--; 3794 if (status == TRAN_BUSY) { 3795 /* XXX : To be checked */ 3796 /* 3797 * if (un->un_throttle > 1) { 3798 * ASSERT(un->un_ncmds >= 0); 3799 * un->un_throttle = un->un_ncmds; 3800 * } 3801 */ 3802 un->un_reissued_timeid = 3803 timeout(dcdrestart, (caddr_t)un, 3804 DCD_BSY_TIMEOUT/500); 3805 mutex_exit(DCD_MUTEX); 3806 return; 3807 } 3808 DCD_DO_ERRSTATS(un, dcd_transerrs); 3809 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 3810 "dcdrestart transport failed (%x)\n", status); 3811 bp->b_resid = bp->b_bcount; 3812 SET_BP_ERROR(bp, EIO); 3813 3814 DCD_DO_KSTATS(un, kstat_waitq_exit, bp); 3815 un->un_reissued_timeid = 0L; 3816 dcddone_and_mutex_exit(un, bp); 3817 return; 3818 } 3819 mutex_enter(DCD_MUTEX); 3820 } 3821 un->un_reissued_timeid = 0L; 3822 mutex_exit(DCD_MUTEX); 3823 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n"); 3824 } 3825 3826 /* 3827 * This routine gets called to reset the throttle to its saved 3828 * value wheneven we lower the throttle. 3829 */ 3830 void 3831 dcd_reset_throttle(caddr_t arg) 3832 { 3833 struct dcd_disk *un = (struct dcd_disk *)arg; 3834 struct diskhd *dp; 3835 3836 mutex_enter(DCD_MUTEX); 3837 dp = &un->un_utab; 3838 3839 /* 3840 * start any commands that didn't start while throttling. 3841 */ 3842 if (dp->b_actf && (un->un_ncmds < un->un_throttle) && 3843 (dp->b_forw == NULL)) { 3844 dcdstart(un); 3845 } 3846 mutex_exit(DCD_MUTEX); 3847 } 3848 3849 3850 /* 3851 * This routine handles the case when a TRAN_BUSY is 3852 * returned by HBA. 3853 * 3854 * If there are some commands already in the transport, the 3855 * bp can be put back on queue and it will 3856 * be retried when the queue is emptied after command 3857 * completes. But if there is no command in the tranport 3858 * and it still return busy, we have to retry the command 3859 * after some time like 10ms. 3860 */ 3861 /* ARGSUSED0 */ 3862 static void 3863 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un) 3864 { 3865 ASSERT(mutex_owned(DCD_MUTEX)); 3866 3867 3868 if (dp->b_forw == NULL || dp->b_forw == bp) { 3869 dp->b_forw = bp; 3870 } else if (dp->b_forw != bp) { 3871 bp->b_actf = dp->b_actf; 3872 dp->b_actf = bp; 3873 3874 } 3875 if (!un->un_reissued_timeid) { 3876 un->un_reissued_timeid = 3877 timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500); 3878 } 3879 } 3880 3881 static int 3882 dcd_write_deviceid(struct dcd_disk *un) 3883 { 3884 3885 int status; 3886 diskaddr_t blk; 3887 struct udcd_cmd ucmd; 3888 struct dcd_cmd cdb; 3889 struct dk_devid *dkdevid; 3890 uint_t *ip, chksum; 3891 int i; 3892 dev_t dev; 3893 3894 mutex_exit(DCD_MUTEX); 3895 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) { 3896 mutex_enter(DCD_MUTEX); 3897 return (EINVAL); 3898 } 3899 mutex_enter(DCD_MUTEX); 3900 3901 /* Allocate the buffer */ 3902 dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP); 3903 3904 /* Fill in the revision */ 3905 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 3906 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 3907 3908 /* Copy in the device id */ 3909 bcopy(un->un_devid, &dkdevid->dkd_devid, 3910 ddi_devid_sizeof(un->un_devid)); 3911 3912 /* Calculate the chksum */ 3913 chksum = 0; 3914 ip = (uint_t *)dkdevid; 3915 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++) 3916 chksum ^= ip[i]; 3917 3918 /* Fill in the checksum */ 3919 DKD_FORMCHKSUM(chksum, dkdevid); 3920 3921 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 3922 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd)); 3923 3924 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 3925 cdb.cmd = ATA_WRITE_DMA; 3926 } else { 3927 if (un->un_dp->options & BLOCK_MODE) 3928 cdb.cmd = ATA_WRITE_MULTIPLE; 3929 else 3930 cdb.cmd = ATA_WRITE; 3931 } 3932 cdb.size = un->un_secsize; 3933 cdb.sector_num.lba_num = blk; 3934 cdb.address_mode = ADD_LBA_MODE; 3935 cdb.direction = DATA_WRITE; 3936 3937 ucmd.udcd_flags = UDCD_WRITE; 3938 ucmd.udcd_cmd = &cdb; 3939 ucmd.udcd_bufaddr = (caddr_t)dkdevid; 3940 ucmd.udcd_buflen = un->un_secsize; 3941 ucmd.udcd_flags |= UDCD_SILENT; 3942 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 3943 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 3944 mutex_exit(DCD_MUTEX); 3945 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 3946 mutex_enter(DCD_MUTEX); 3947 3948 kmem_free(dkdevid, un->un_secsize); 3949 return (status); 3950 } 3951 3952 static int 3953 dcd_read_deviceid(struct dcd_disk *un) 3954 { 3955 int status; 3956 diskaddr_t blk; 3957 struct udcd_cmd ucmd; 3958 struct dcd_cmd cdb; 3959 struct dk_devid *dkdevid; 3960 uint_t *ip; 3961 int chksum; 3962 int i, sz; 3963 dev_t dev; 3964 3965 mutex_exit(DCD_MUTEX); 3966 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) { 3967 mutex_enter(DCD_MUTEX); 3968 return (EINVAL); 3969 } 3970 mutex_enter(DCD_MUTEX); 3971 3972 dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP); 3973 3974 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 3975 (void) bzero((caddr_t)&cdb, sizeof (cdb)); 3976 3977 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 3978 cdb.cmd = ATA_READ_DMA; 3979 } else { 3980 if (un->un_dp->options & BLOCK_MODE) 3981 cdb.cmd = ATA_READ_MULTIPLE; 3982 else 3983 cdb.cmd = ATA_READ; 3984 } 3985 cdb.size = un->un_secsize; 3986 cdb.sector_num.lba_num = blk; 3987 cdb.address_mode = ADD_LBA_MODE; 3988 cdb.direction = DATA_READ; 3989 3990 ucmd.udcd_flags = UDCD_READ; 3991 ucmd.udcd_cmd = &cdb; 3992 ucmd.udcd_bufaddr = (caddr_t)dkdevid; 3993 ucmd.udcd_buflen = un->un_secsize; 3994 ucmd.udcd_flags |= UDCD_SILENT; 3995 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 3996 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 3997 mutex_exit(DCD_MUTEX); 3998 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 3999 mutex_enter(DCD_MUTEX); 4000 4001 if (status != 0) { 4002 kmem_free((caddr_t)dkdevid, un->un_secsize); 4003 return (status); 4004 } 4005 4006 /* Validate the revision */ 4007 4008 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4009 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4010 kmem_free((caddr_t)dkdevid, un->un_secsize); 4011 return (EINVAL); 4012 } 4013 4014 /* Calculate the checksum */ 4015 chksum = 0; 4016 ip = (uint_t *)dkdevid; 4017 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++) 4018 chksum ^= ip[i]; 4019 4020 /* Compare the checksums */ 4021 4022 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4023 kmem_free((caddr_t)dkdevid, un->un_secsize); 4024 return (EINVAL); 4025 } 4026 4027 /* VAlidate the device id */ 4028 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4029 kmem_free((caddr_t)dkdevid, un->un_secsize); 4030 return (EINVAL); 4031 } 4032 4033 /* return a copy of the device id */ 4034 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4035 un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP); 4036 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4037 kmem_free((caddr_t)dkdevid, un->un_secsize); 4038 4039 return (0); 4040 } 4041 4042 /* 4043 * Return the device id for the device. 4044 * 1. If the device ID exists then just return it - nothing to do in that case. 4045 * 2. Build one from the drives model number and serial number. 4046 * 3. If there is a problem in building it from serial/model #, then try 4047 * to read it from the acyl region of the disk. 4048 * Note: If this function is unable to return a valid ID then the calling 4049 * point will invoke the routine to create a fabricated ID ans stor it on the 4050 * acyl region of the disk. 4051 */ 4052 static ddi_devid_t 4053 dcd_get_devid(struct dcd_disk *un) 4054 { 4055 int rc; 4056 4057 /* If already registered, return that value */ 4058 if (un->un_devid != NULL) 4059 return (un->un_devid); 4060 4061 /* Build a devid from model and serial number, if present */ 4062 rc = dcd_make_devid_from_serial(un); 4063 4064 if (rc != DDI_SUCCESS) { 4065 /* Read the devid from the disk. */ 4066 if (dcd_read_deviceid(un)) 4067 return (NULL); 4068 } 4069 4070 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid); 4071 return (un->un_devid); 4072 } 4073 4074 4075 static ddi_devid_t 4076 dcd_create_devid(struct dcd_disk *un) 4077 { 4078 if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *) 4079 &un->un_devid) == DDI_FAILURE) 4080 return (NULL); 4081 4082 if (dcd_write_deviceid(un)) { 4083 ddi_devid_free(un->un_devid); 4084 un->un_devid = NULL; 4085 return (NULL); 4086 } 4087 4088 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid); 4089 return (un->un_devid); 4090 } 4091 4092 /* 4093 * Build a devid from the model and serial number, if present 4094 * Return DDI_SUCCESS or DDI_FAILURE. 4095 */ 4096 static int 4097 dcd_make_devid_from_serial(struct dcd_disk *un) 4098 { 4099 int rc = DDI_SUCCESS; 4100 char *hwid; 4101 char *model; 4102 int model_len; 4103 char *serno; 4104 int serno_len; 4105 int total_len; 4106 4107 /* initialize the model and serial number information */ 4108 model = un->un_dcd->dcd_ident->dcd_model; 4109 model_len = DCD_MODEL_NUMBER_LENGTH; 4110 serno = un->un_dcd->dcd_ident->dcd_drvser; 4111 serno_len = DCD_SERIAL_NUMBER_LENGTH; 4112 4113 /* Verify the model and serial number */ 4114 dcd_validate_model_serial(model, &model_len, model_len); 4115 if (model_len == 0) { 4116 rc = DDI_FAILURE; 4117 goto out; 4118 } 4119 dcd_validate_model_serial(serno, &serno_len, serno_len); 4120 if (serno_len == 0) { 4121 rc = DDI_FAILURE; 4122 goto out; 4123 } 4124 4125 /* 4126 * The device ID will be concatenation of the model number, 4127 * the '=' separator, the serial number. Allocate 4128 * the string and concatenate the components. 4129 */ 4130 total_len = model_len + 1 + serno_len; 4131 hwid = kmem_alloc(total_len, KM_SLEEP); 4132 bcopy((caddr_t)model, (caddr_t)hwid, model_len); 4133 bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1); 4134 bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len); 4135 4136 /* Initialize the device ID, trailing NULL not included */ 4137 rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len, 4138 hwid, (ddi_devid_t *)&un->un_devid); 4139 4140 /* Free the allocated string */ 4141 kmem_free(hwid, total_len); 4142 4143 out: return (rc); 4144 } 4145 4146 /* 4147 * Test for a valid model or serial number. Assume that a valid representation 4148 * contains at least one character that is neither a space, 0 digit, or NULL. 4149 * Trim trailing blanks and NULLS from returned length. 4150 */ 4151 static void 4152 dcd_validate_model_serial(char *str, int *retlen, int totallen) 4153 { 4154 char ch; 4155 boolean_t ret = B_FALSE; 4156 int i; 4157 int tb; 4158 4159 for (i = 0, tb = 0; i < totallen; i++) { 4160 ch = *str++; 4161 if ((ch != ' ') && (ch != '\0') && (ch != '0')) 4162 ret = B_TRUE; 4163 if ((ch == ' ') || (ch == '\0')) 4164 tb++; 4165 else 4166 tb = 0; 4167 } 4168 4169 if (ret == B_TRUE) { 4170 /* Atleast one non 0 or blank character. */ 4171 *retlen = totallen - tb; 4172 } else { 4173 *retlen = 0; 4174 } 4175 } 4176 4177 #ifndef lint 4178 void 4179 clean_print(dev_info_t *dev, char *label, uint_t level, 4180 char *title, char *data, int len) 4181 { 4182 int i; 4183 char buf[256]; 4184 4185 (void) sprintf(buf, "%s:", title); 4186 for (i = 0; i < len; i++) { 4187 (void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff)); 4188 } 4189 (void) sprintf(&buf[strlen(buf)], "\n"); 4190 4191 dcd_log(dev, label, level, "%s", buf); 4192 } 4193 #endif /* Not lint */ 4194 4195 #ifndef lint 4196 /* 4197 * Print a piece of inquiry data- cleaned up for non-printable characters 4198 * and stopping at the first space character after the beginning of the 4199 * passed string; 4200 */ 4201 4202 void 4203 inq_fill(char *p, int l, char *s) 4204 { 4205 unsigned i = 0; 4206 char c; 4207 4208 while (i++ < l) { 4209 if ((c = *p++) < ' ' || c >= 0177) { 4210 c = '*'; 4211 } else if (i != 1 && c == ' ') { 4212 break; 4213 } 4214 *s++ = c; 4215 } 4216 *s++ = 0; 4217 } 4218 #endif /* Not lint */ 4219 4220 char * 4221 dcd_sname(uchar_t status) 4222 { 4223 switch (status & STATUS_ATA_MASK) { 4224 case STATUS_GOOD: 4225 return ("good status"); 4226 4227 case STATUS_ATA_BUSY: 4228 return ("busy"); 4229 4230 default: 4231 return ("<unknown status>"); 4232 } 4233 } 4234 4235 /* ARGSUSED0 */ 4236 char * 4237 dcd_rname(int reason) 4238 { 4239 static char *rnames[] = { 4240 "cmplt", 4241 "incomplete", 4242 "dma_derr", 4243 "tran_err", 4244 "reset", 4245 "aborted", 4246 "timeout", 4247 "data_ovr", 4248 }; 4249 if (reason > CMD_DATA_OVR) { 4250 return ("<unknown reason>"); 4251 } else { 4252 return (rnames[reason]); 4253 } 4254 } 4255 4256 4257 4258 /* ARGSUSED0 */ 4259 int 4260 dcd_check_wp(dev_t dev) 4261 { 4262 4263 return (0); 4264 } 4265 4266 /* 4267 * Create device error kstats 4268 */ 4269 static int 4270 dcd_create_errstats(struct dcd_disk *un, int instance) 4271 { 4272 4273 char kstatname[KSTAT_STRLEN]; 4274 4275 if (un->un_errstats == (kstat_t *)0) { 4276 (void) sprintf(kstatname, "dad%d,error", instance); 4277 un->un_errstats = kstat_create("daderror", instance, kstatname, 4278 "device_error", KSTAT_TYPE_NAMED, 4279 sizeof (struct dcd_errstats)/ sizeof (kstat_named_t), 4280 KSTAT_FLAG_PERSISTENT); 4281 4282 if (un->un_errstats) { 4283 struct dcd_errstats *dtp; 4284 4285 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; 4286 kstat_named_init(&dtp->dcd_softerrs, "Soft Errors", 4287 KSTAT_DATA_UINT32); 4288 kstat_named_init(&dtp->dcd_harderrs, "Hard Errors", 4289 KSTAT_DATA_UINT32); 4290 kstat_named_init(&dtp->dcd_transerrs, 4291 "Transport Errors", KSTAT_DATA_UINT32); 4292 kstat_named_init(&dtp->dcd_model, "Model", 4293 KSTAT_DATA_CHAR); 4294 kstat_named_init(&dtp->dcd_revision, "Revision", 4295 KSTAT_DATA_CHAR); 4296 kstat_named_init(&dtp->dcd_serial, "Serial No", 4297 KSTAT_DATA_CHAR); 4298 kstat_named_init(&dtp->dcd_capacity, "Size", 4299 KSTAT_DATA_ULONGLONG); 4300 kstat_named_init(&dtp->dcd_rq_media_err, "Media Error", 4301 KSTAT_DATA_UINT32); 4302 kstat_named_init(&dtp->dcd_rq_ntrdy_err, 4303 "Device Not Ready", KSTAT_DATA_UINT32); 4304 kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device", 4305 KSTAT_DATA_UINT32); 4306 kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable", 4307 KSTAT_DATA_UINT32); 4308 kstat_named_init(&dtp->dcd_rq_illrq_err, 4309 "Illegal Request", KSTAT_DATA_UINT32); 4310 4311 un->un_errstats->ks_private = un; 4312 un->un_errstats->ks_update = nulldev; 4313 kstat_install(un->un_errstats); 4314 4315 (void) strncpy(&dtp->dcd_model.value.c[0], 4316 un->un_dcd->dcd_ident->dcd_model, 16); 4317 (void) strncpy(&dtp->dcd_serial.value.c[0], 4318 un->un_dcd->dcd_ident->dcd_drvser, 16); 4319 (void) strncpy(&dtp->dcd_revision.value.c[0], 4320 un->un_dcd->dcd_ident->dcd_fw, 8); 4321 dtp->dcd_capacity.value.ui64 = 4322 (uint64_t)((uint64_t)un->un_diskcapacity * 4323 (uint64_t)un->un_lbasize); 4324 } 4325 } 4326 return (0); 4327 } 4328 4329 4330 /* 4331 * This has been moved from DADA layer as this does not do anything other than 4332 * retrying the command when it is busy or it does not complete 4333 */ 4334 int 4335 dcd_poll(struct dcd_pkt *pkt) 4336 { 4337 int busy_count, rval = -1, savef; 4338 clock_t savet; 4339 void (*savec)(); 4340 4341 4342 /* 4343 * Save old flags 4344 */ 4345 savef = pkt->pkt_flags; 4346 savec = pkt->pkt_comp; 4347 savet = pkt->pkt_time; 4348 4349 pkt->pkt_flags |= FLAG_NOINTR; 4350 4351 4352 /* 4353 * Set the Pkt_comp to NULL 4354 */ 4355 4356 pkt->pkt_comp = 0; 4357 4358 /* 4359 * Set the Pkt time for the polled command 4360 */ 4361 if (pkt->pkt_time == 0) { 4362 pkt->pkt_time = DCD_POLL_TIMEOUT; 4363 } 4364 4365 4366 /* Now transport the command */ 4367 for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) { 4368 if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) { 4369 if (pkt->pkt_reason == CMD_INCOMPLETE && 4370 pkt->pkt_state == 0) { 4371 delay(100); 4372 } else if (pkt->pkt_reason == CMD_CMPLT) { 4373 rval = 0; 4374 break; 4375 } 4376 } 4377 if (rval == TRAN_BUSY) { 4378 delay(100); 4379 continue; 4380 } 4381 } 4382 4383 pkt->pkt_flags = savef; 4384 pkt->pkt_comp = savec; 4385 pkt->pkt_time = savet; 4386 return (rval); 4387 } 4388 4389 4390 void 4391 dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp) 4392 { 4393 if (cmdp->udcd_status_reg & STATUS_ATA_BUSY) 4394 statp->status = DADKIO_STAT_NOT_READY; 4395 else if (cmdp->udcd_status_reg & STATUS_ATA_DWF) 4396 statp->status = DADKIO_STAT_HARDWARE_ERROR; 4397 else if (cmdp->udcd_status_reg & STATUS_ATA_CORR) 4398 statp->status = DADKIO_STAT_SOFT_ERROR; 4399 else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) { 4400 /* 4401 * The error register is valid only when BSY and DRQ not set 4402 * Assumed that HBA has checked this before it gives the data 4403 */ 4404 if (cmdp->udcd_error_reg & ERR_AMNF) 4405 statp->status = DADKIO_STAT_NOT_FORMATTED; 4406 else if (cmdp->udcd_error_reg & ERR_TKONF) 4407 statp->status = DADKIO_STAT_NOT_FORMATTED; 4408 else if (cmdp->udcd_error_reg & ERR_ABORT) 4409 statp->status = DADKIO_STAT_ILLEGAL_REQUEST; 4410 else if (cmdp->udcd_error_reg & ERR_IDNF) 4411 statp->status = DADKIO_STAT_NOT_FORMATTED; 4412 else if (cmdp->udcd_error_reg & ERR_UNC) 4413 statp->status = DADKIO_STAT_BUS_ERROR; 4414 else if (cmdp->udcd_error_reg & ERR_BBK) 4415 statp->status = DADKIO_STAT_MEDIUM_ERROR; 4416 } else 4417 statp->status = DADKIO_STAT_NO_ERROR; 4418 } 4419 4420 static void 4421 dcd_flush_cache(struct dcd_disk *un) 4422 { 4423 struct dcd_pkt *pkt; 4424 int retry_count; 4425 4426 4427 if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL, 4428 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 4429 PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) { 4430 return; 4431 } 4432 4433 makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, 4434 NO_DATA_XFER, 0); 4435 4436 /* 4437 * Send the command. There are chances it might fail on some 4438 * disks since it is not a mandatory command as per ata-4. Try 4439 * 3 times if it fails. The retry count has been randomly selected. 4440 * There is a need for retry since as per the spec FLUSH CACHE can fail 4441 * as a result of unrecoverable error encountered during execution 4442 * of writing data and subsequent command should continue flushing 4443 * cache. 4444 */ 4445 for (retry_count = 0; retry_count < 3; retry_count++) { 4446 /* 4447 * Set the packet fields. 4448 */ 4449 pkt->pkt_comp = 0; 4450 pkt->pkt_time = DCD_POLL_TIMEOUT; 4451 pkt->pkt_flags |= FLAG_FORCENOINTR; 4452 pkt->pkt_flags |= FLAG_NOINTR; 4453 if (dcd_transport(pkt) == TRAN_ACCEPT) { 4454 if (pkt->pkt_reason == CMD_CMPLT) { 4455 break; 4456 } 4457 } 4458 /* 4459 * Note the wait time value of 100ms is same as in the 4460 * dcd_poll routine. 4461 */ 4462 drv_usecwait(1000000); 4463 } 4464 (void) dcd_destroy_pkt(pkt); 4465 } 4466 4467 static int 4468 dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr, 4469 diskaddr_t start_block, size_t reqlength, uchar_t cmd) 4470 { 4471 struct dcd_pkt *pkt; 4472 struct buf *bp; 4473 diskaddr_t real_addr = start_block; 4474 size_t buffer_size = reqlength; 4475 uchar_t command, tmp; 4476 int i, rval = 0; 4477 struct dcd_disk *un; 4478 4479 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi)); 4480 if (un == NULL) 4481 return (ENXIO); 4482 4483 bp = dcd_alloc_consistent_buf(ROUTE, (struct buf *)NULL, 4484 buffer_size, B_READ, NULL_FUNC, NULL); 4485 if (!bp) { 4486 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 4487 "no bp for disk label\n"); 4488 return (ENOMEM); 4489 } 4490 4491 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 4492 bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 4493 PKT_CONSISTENT, NULL_FUNC, NULL); 4494 4495 if (!pkt) { 4496 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 4497 "no memory for disk label\n"); 4498 dcd_free_consistent_buf(bp); 4499 return (ENOMEM); 4500 } 4501 4502 if (cmd == TG_READ) { 4503 bzero(bp->b_un.b_addr, buffer_size); 4504 tmp = DATA_READ; 4505 } else { 4506 bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size); 4507 tmp = DATA_WRITE; 4508 } 4509 4510 mutex_enter(DCD_MUTEX); 4511 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 4512 if (cmd == TG_READ) { 4513 command = ATA_READ_DMA; 4514 } else { 4515 command = ATA_WRITE_DMA; 4516 } 4517 } else { 4518 if (cmd == TG_READ) { 4519 if (un->un_dp->options & BLOCK_MODE) 4520 command = ATA_READ_MULTIPLE; 4521 else 4522 command = ATA_READ; 4523 } else { 4524 if (un->un_dp->options & BLOCK_MODE) 4525 command = ATA_READ_MULTIPLE; 4526 else 4527 command = ATA_WRITE; 4528 } 4529 } 4530 mutex_exit(DCD_MUTEX); 4531 (void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE, 4532 buffer_size, tmp, 0); 4533 4534 for (i = 0; i < 3; i++) { 4535 if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD || 4536 (pkt->pkt_state & STATE_XFERRED_DATA) == 0 || 4537 (pkt->pkt_resid != 0)) { 4538 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 4539 "Status %x, state %x, resid %lx\n", 4540 SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid); 4541 rval = EIO; 4542 } else { 4543 break; 4544 } 4545 } 4546 4547 if (rval != 0) { 4548 dcd_destroy_pkt(pkt); 4549 dcd_free_consistent_buf(bp); 4550 return (EIO); 4551 } 4552 4553 if (cmd == TG_READ) { 4554 bcopy(bp->b_un.b_addr, bufaddr, reqlength); 4555 rval = 0; 4556 } 4557 4558 dcd_destroy_pkt(pkt); 4559 dcd_free_consistent_buf(bp); 4560 return (rval); 4561 } 4562 4563 static int dcd_compute_dk_capacity(struct dcd_device *devp, 4564 diskaddr_t *capacity) 4565 { 4566 diskaddr_t cap; 4567 diskaddr_t no_of_lbasec; 4568 4569 cap = devp->dcd_ident->dcd_fixcyls * 4570 devp->dcd_ident->dcd_heads * 4571 devp->dcd_ident->dcd_sectors; 4572 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1]; 4573 no_of_lbasec = no_of_lbasec << 16; 4574 no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0]; 4575 4576 if (no_of_lbasec > cap) { 4577 cap = no_of_lbasec; 4578 } 4579 4580 if (cap != ((uint32_t)-1)) 4581 *capacity = cap; 4582 else 4583 return (EINVAL); 4584 return (0); 4585 } 4586 4587 /*ARGSUSED5*/ 4588 static int 4589 dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 4590 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 4591 { 4592 if (cmd != TG_READ && cmd != TG_WRITE) 4593 return (EINVAL); 4594 4595 return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block, 4596 reqlength, cmd)); 4597 } 4598 4599 static int 4600 dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp) 4601 { 4602 struct dcd_device *devp; 4603 uint32_t no_of_lbasec, capacity, calculated_cylinders; 4604 4605 devp = ddi_get_driver_private(devi); 4606 4607 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 4608 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 4609 phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2; 4610 phygeomp->g_acyl = 2; 4611 phygeomp->g_nhead = devp->dcd_ident->dcd_heads; 4612 phygeomp->g_nsect = devp->dcd_ident->dcd_sectors; 4613 4614 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1]; 4615 no_of_lbasec = no_of_lbasec << 16; 4616 no_of_lbasec = no_of_lbasec | 4617 devp->dcd_ident->dcd_addrsec[0]; 4618 capacity = devp->dcd_ident->dcd_fixcyls * 4619 devp->dcd_ident->dcd_heads * 4620 devp->dcd_ident->dcd_sectors; 4621 if (no_of_lbasec > capacity) { 4622 capacity = no_of_lbasec; 4623 if (capacity > NUM_SECTORS_32G) { 4624 /* 4625 * if the capacity is greater than 32G, 4626 * then 255 is the sectors per track. 4627 * This should be good until 128G disk 4628 * capacity, which is the current ATA-4 4629 * limitation. 4630 */ 4631 phygeomp->g_nsect = 255; 4632 } 4633 4634 /* 4635 * If the disk capacity is >= 128GB then no. of 4636 * addressable sectors will be set to 0xfffffff 4637 * in the IDENTIFY info. In that case set the 4638 * no. of pcyl to the Max. 16bit value. 4639 */ 4640 4641 calculated_cylinders = (capacity) / 4642 (phygeomp->g_nhead * phygeomp->g_nsect); 4643 if (calculated_cylinders >= USHRT_MAX) { 4644 phygeomp->g_ncyl = USHRT_MAX - 2; 4645 } else { 4646 phygeomp->g_ncyl = 4647 calculated_cylinders - 2; 4648 } 4649 } 4650 4651 phygeomp->g_capacity = capacity; 4652 phygeomp->g_intrlv = 0; 4653 phygeomp->g_rpm = 5400; 4654 phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz; 4655 4656 return (0); 4657 } else 4658 return (ENOTSUP); 4659 } else { 4660 return (EINVAL); 4661 } 4662 } 4663 4664 4665 /*ARGSUSED3*/ 4666 static int 4667 dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 4668 { 4669 struct dcd_disk *un; 4670 4671 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi)); 4672 4673 if (un == NULL) 4674 return (ENXIO); 4675 4676 switch (cmd) { 4677 case TG_GETPHYGEOM: 4678 return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg)); 4679 4680 case TG_GETVIRTGEOM: 4681 return (-1); 4682 4683 case TG_GETCAPACITY: 4684 case TG_GETBLOCKSIZE: 4685 mutex_enter(DCD_MUTEX); 4686 if (un->un_diskcapacity <= 0) { 4687 mutex_exit(DCD_MUTEX); 4688 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 4689 "invalid disk capacity\n"); 4690 return (EIO); 4691 } 4692 if (cmd == TG_GETCAPACITY) 4693 *(diskaddr_t *)arg = un->un_diskcapacity; 4694 else 4695 *(uint32_t *)arg = DEV_BSIZE; 4696 4697 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n", 4698 un->un_diskcapacity); 4699 mutex_exit(DCD_MUTEX); 4700 return (0); 4701 4702 case TG_GETATTR: 4703 mutex_enter(DCD_MUTEX); 4704 *(tg_attribute_t *)arg = un->un_tgattribute; 4705 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 4706 "media_is_writable %x\n", 4707 un->un_tgattribute.media_is_writable); 4708 mutex_exit(DCD_MUTEX); 4709 return (0); 4710 default: 4711 return (ENOTTY); 4712 } 4713 } 4714