1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Direct Attached disk driver for SPARC machines. 31 */ 32 33 /* 34 * Includes, Declarations and Local Data 35 */ 36 #include <sys/dada/dada.h> 37 #include <sys/dkbad.h> 38 #include <sys/dklabel.h> 39 #include <sys/dkio.h> 40 #include <sys/cdio.h> 41 #include <sys/vtoc.h> 42 #include <sys/dada/targets/daddef.h> 43 #include <sys/dada/targets/dadpriv.h> 44 #include <sys/file.h> 45 #include <sys/stat.h> 46 #include <sys/kstat.h> 47 #include <sys/vtrace.h> 48 #include <sys/aio_req.h> 49 #include <sys/note.h> 50 #include <sys/cmlb.h> 51 52 /* 53 * Global Error Levels for Error Reporting 54 */ 55 int dcd_error_level = DCD_ERR_RETRYABLE; 56 /* 57 * Local Static Data 58 */ 59 60 static int dcd_io_time = DCD_IO_TIME; 61 static int dcd_retry_count = DCD_RETRY_COUNT; 62 #ifndef lint 63 static int dcd_report_pfa = 1; 64 #endif 65 static int dcd_rot_delay = 4; 66 static int dcd_poll_busycnt = DCD_POLL_TIMEOUT; 67 68 /* 69 * Local Function Prototypes 70 */ 71 72 static int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p); 73 static int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p); 74 static int dcdstrategy(struct buf *bp); 75 static int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk); 76 static int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *); 77 static int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p); 78 static int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p); 79 static int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, 80 char *, caddr_t, int *); 81 static int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p); 82 static int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p); 83 84 85 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi); 86 static int dcd_doattach(dev_info_t *devi, int (*f)()); 87 static int dcd_validate_geometry(struct dcd_disk *un); 88 static ddi_devid_t dcd_get_devid(struct dcd_disk *un); 89 static ddi_devid_t dcd_create_devid(struct dcd_disk *un); 90 static int dcd_make_devid_from_serial(struct dcd_disk *un); 91 static void dcd_validate_model_serial(char *str, int *retlen, int totallen); 92 static int dcd_read_deviceid(struct dcd_disk *un); 93 static int dcd_write_deviceid(struct dcd_disk *un); 94 static int dcd_poll(struct dcd_pkt *pkt); 95 static char *dcd_rname(int reason); 96 static void dcd_flush_cache(struct dcd_disk *un); 97 98 static int dcd_compute_dk_capacity(struct dcd_device *devp, 99 diskaddr_t *capacity); 100 static int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr, 101 diskaddr_t start_block, size_t reqlength, uchar_t cmd); 102 103 static void dcdmin(struct buf *bp); 104 105 static int dcdioctl_cmd(dev_t, struct udcd_cmd *, 106 enum uio_seg, enum uio_seg); 107 108 static void dcdstart(struct dcd_disk *un); 109 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp); 110 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)()); 111 static void dcdudcdmin(struct buf *bp); 112 113 static int dcdrunout(caddr_t); 114 static int dcd_check_wp(dev_t dev); 115 static int dcd_unit_ready(dev_t dev); 116 static void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, 117 struct dcd_disk *un); 118 static void dcdintr(struct dcd_pkt *pkt); 119 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp); 120 static void dcd_offline(struct dcd_disk *un, int bechatty); 121 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un); 122 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt); 123 static void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp); 124 static int dcdflushdone(struct buf *bp); 125 126 /* Function prototypes for cmlb */ 127 128 static int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 129 diskaddr_t start_block, size_t reqlength, void *tg_cookie); 130 131 static int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp); 132 static int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg, 133 void *tg_cookie); 134 135 136 static cmlb_tg_ops_t dcd_lb_ops = { 137 TG_DK_OPS_VERSION_1, 138 dcd_lb_rdwr, 139 dcd_lb_getinfo 140 }; 141 142 /* 143 * Error and Logging Functions 144 */ 145 #ifndef lint 146 static void clean_print(dev_info_t *dev, char *label, uint_t level, 147 char *title, char *data, int len); 148 static void dcdrestart(void *arg); 149 #endif /* lint */ 150 151 static int dcd_check_error(struct dcd_disk *un, struct buf *bp); 152 153 /* 154 * Error statistics create/update functions 155 */ 156 static int dcd_create_errstats(struct dcd_disk *, int); 157 158 159 160 /*PRINTFLIKE4*/ 161 extern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...) 162 __KPRINTFLIKE(4); 163 extern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t, 164 uchar_t, uint32_t, uchar_t, uchar_t); 165 166 167 /* 168 * Configuration Routines 169 */ 170 static int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 171 void **result); 172 static int dcdprobe(dev_info_t *devi); 173 static int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd); 174 static int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd); 175 static int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd); 176 static int dcd_dr_detach(dev_info_t *devi); 177 static int dcdpower(dev_info_t *devi, int component, int level); 178 179 static void *dcd_state; 180 static int dcd_max_instance; 181 static char *dcd_label = "dad"; 182 183 static char *diskokay = "disk okay\n"; 184 185 #if DEBUG || lint 186 #define DCDDEBUG 187 #endif 188 189 int dcd_test_flag = 0; 190 /* 191 * Debugging macros 192 */ 193 #ifdef DCDDEBUG 194 static int dcddebug = 0; 195 #define DEBUGGING (dcddebug > 1) 196 #define DAD_DEBUG if (dcddebug == 1) dcd_log 197 #define DAD_DEBUG2 if (dcddebug > 1) dcd_log 198 #else /* DCDDEBUG */ 199 #define dcddebug (0) 200 #define DEBUGGING (0) 201 #define DAD_DEBUG if (0) dcd_log 202 #define DAD_DEBUG2 if (0) dcd_log 203 #endif 204 205 /* 206 * we use pkt_private area for storing bp and retry_count 207 * XXX: Really is this usefull. 208 */ 209 struct dcd_pkt_private { 210 struct buf *dcdpp_bp; 211 short dcdpp_retry_count; 212 short dcdpp_victim_retry_count; 213 }; 214 215 216 _NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf)) 217 218 #define PP_LEN (sizeof (struct dcd_pkt_private)) 219 220 #define PKT_SET_BP(pkt, bp) \ 221 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp 222 #define PKT_GET_BP(pkt) \ 223 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp) 224 225 226 #define PKT_SET_RETRY_CNT(pkt, n) \ 227 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n 228 229 #define PKT_GET_RETRY_CNT(pkt) \ 230 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count) 231 232 #define PKT_INCR_RETRY_CNT(pkt, n) \ 233 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n 234 235 #define PKT_SET_VICTIM_RETRY_CNT(pkt, n) \ 236 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \ 237 = n 238 239 #define PKT_GET_VICTIM_RETRY_CNT(pkt) \ 240 (((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count) 241 #define PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \ 242 ((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \ 243 += n 244 245 #define DISK_NOT_READY_RETRY_COUNT (dcd_retry_count / 2) 246 247 248 /* 249 * Urk! 250 */ 251 #define SET_BP_ERROR(bp, err) \ 252 bioerror(bp, err); 253 254 #define IOSP KSTAT_IO_PTR(un->un_stats) 255 #define IO_PARTITION_STATS un->un_pstats[DCDPART(bp->b_edev)] 256 #define IOSP_PARTITION KSTAT_IO_PTR(IO_PARTITION_STATS) 257 258 #define DCD_DO_KSTATS(un, kstat_function, bp) \ 259 ASSERT(mutex_owned(DCD_MUTEX)); \ 260 if (bp != un->un_sbufp) { \ 261 if (un->un_stats) { \ 262 kstat_function(IOSP); \ 263 } \ 264 if (IO_PARTITION_STATS) { \ 265 kstat_function(IOSP_PARTITION); \ 266 } \ 267 } 268 269 #define DCD_DO_ERRSTATS(un, x) \ 270 if (un->un_errstats) { \ 271 struct dcd_errstats *dtp; \ 272 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \ 273 dtp->x.value.ui32++; \ 274 } 275 276 #define GET_SOFT_STATE(dev) \ 277 struct dcd_disk *un; \ 278 int instance, part; \ 279 minor_t minor = getminor(dev); \ 280 \ 281 part = minor & DCDPART_MASK; \ 282 instance = minor >> DCDUNIT_SHIFT; \ 283 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) \ 284 return (ENXIO); 285 286 #define LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \ 287 (((blkno) & ((1 << (blknoshift)) - 1)) == 0) 288 289 /* 290 * After the following number of sectors, the cylinder number spills over 291 * 0xFFFF if sectors = 63 and heads = 16. 292 */ 293 #define NUM_SECTORS_32G 0x3EFFC10 294 295 /* 296 * Configuration Data 297 */ 298 299 /* 300 * Device driver ops vector 301 */ 302 303 static struct cb_ops dcd_cb_ops = { 304 dcdopen, /* open */ 305 dcdclose, /* close */ 306 dcdstrategy, /* strategy */ 307 nodev, /* print */ 308 dcddump, /* dump */ 309 dcdread, /* read */ 310 dcdwrite, /* write */ 311 dcdioctl, /* ioctl */ 312 nodev, /* devmap */ 313 nodev, /* mmap */ 314 nodev, /* segmap */ 315 nochpoll, /* poll */ 316 dcd_prop_op, /* cb_prop_op */ 317 0, /* streamtab */ 318 D_64BIT | D_MP | D_NEW, /* Driver compatibility flag */ 319 CB_REV, /* cb_rev */ 320 dcdaread, /* async I/O read entry point */ 321 dcdawrite /* async I/O write entry point */ 322 }; 323 324 static struct dev_ops dcd_ops = { 325 DEVO_REV, /* devo_rev, */ 326 0, /* refcnt */ 327 dcdinfo, /* info */ 328 nulldev, /* identify */ 329 dcdprobe, /* probe */ 330 dcdattach, /* attach */ 331 dcddetach, /* detach */ 332 dcdreset, /* reset */ 333 &dcd_cb_ops, /* driver operations */ 334 (struct bus_ops *)0, /* bus operations */ 335 dcdpower /* power */ 336 }; 337 338 339 /* 340 * This is the loadable module wrapper. 341 */ 342 #include <sys/modctl.h> 343 344 static struct modldrv modldrv = { 345 &mod_driverops, /* Type of module. This one is a driver */ 346 "DAD Disk Driver %I%", /* Name of the module. */ 347 &dcd_ops, /* driver ops */ 348 }; 349 350 351 352 static struct modlinkage modlinkage = { 353 MODREV_1, &modldrv, NULL 354 }; 355 356 /* 357 * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded 358 * attach situations 359 */ 360 static kmutex_t dcd_attach_mutex; 361 362 int 363 _init(void) 364 { 365 int e; 366 367 if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk), 368 DCD_MAXUNIT)) != 0) 369 return (e); 370 371 mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL); 372 e = mod_install(&modlinkage); 373 if (e != 0) { 374 mutex_destroy(&dcd_attach_mutex); 375 ddi_soft_state_fini(&dcd_state); 376 return (e); 377 } 378 379 return (e); 380 } 381 382 int 383 _fini(void) 384 { 385 int e; 386 387 if ((e = mod_remove(&modlinkage)) != 0) 388 return (e); 389 390 ddi_soft_state_fini(&dcd_state); 391 mutex_destroy(&dcd_attach_mutex); 392 393 return (e); 394 } 395 396 int 397 _info(struct modinfo *modinfop) 398 { 399 400 return (mod_info(&modlinkage, modinfop)); 401 } 402 403 static int 404 dcdprobe(dev_info_t *devi) 405 { 406 struct dcd_device *devp; 407 int rval = DDI_PROBE_PARTIAL; 408 int instance; 409 410 devp = ddi_get_driver_private(devi); 411 instance = ddi_get_instance(devi); 412 413 /* 414 * Keep a count of how many disks (ie. highest instance no) we have 415 * XXX currently not used but maybe useful later again 416 */ 417 mutex_enter(&dcd_attach_mutex); 418 if (instance > dcd_max_instance) 419 dcd_max_instance = instance; 420 mutex_exit(&dcd_attach_mutex); 421 422 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 423 "dcdprobe:\n"); 424 425 if (ddi_get_soft_state(dcd_state, instance) != NULL) 426 return (DDI_PROBE_PARTIAL); 427 428 /* 429 * Turn around and call utility probe routine 430 * to see whether we actually have a disk at 431 */ 432 433 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 434 "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC)); 435 436 switch (dcd_probe(devp, NULL_FUNC)) { 437 default: 438 case DCDPROBE_NORESP: 439 case DCDPROBE_NONCCS: 440 case DCDPROBE_NOMEM: 441 case DCDPROBE_FAILURE: 442 case DCDPROBE_BUSY: 443 break; 444 445 case DCDPROBE_EXISTS: 446 /* 447 * Check whether it is a ATA device and then 448 * return SUCCESS. 449 */ 450 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 451 "config %x\n", devp->dcd_ident->dcd_config); 452 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 453 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 454 rval = DDI_PROBE_SUCCESS; 455 } else 456 rval = DDI_PROBE_FAILURE; 457 } else { 458 rval = DDI_PROBE_FAILURE; 459 } 460 break; 461 } 462 dcd_unprobe(devp); 463 464 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, 465 "dcdprobe returns %x\n", rval); 466 467 return (rval); 468 } 469 470 471 /*ARGSUSED*/ 472 static int 473 dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd) 474 { 475 int instance, rval; 476 struct dcd_device *devp; 477 struct dcd_disk *un; 478 struct diskhd *dp; 479 char *pm_comp[] = 480 { "NAME=ide-disk", "0=standby", "1=idle", "2=active" }; 481 482 /* CONSTCOND */ 483 ASSERT(NO_COMPETING_THREADS); 484 485 486 devp = ddi_get_driver_private(devi); 487 instance = ddi_get_instance(devi); 488 DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n"); 489 490 switch (cmd) { 491 case DDI_ATTACH: 492 break; 493 494 case DDI_RESUME: 495 if (!(un = ddi_get_soft_state(dcd_state, instance))) 496 return (DDI_FAILURE); 497 mutex_enter(DCD_MUTEX); 498 Restore_state(un); 499 /* 500 * Restore the state which was saved to give the 501 * the right state in un_last_state 502 */ 503 un->un_last_state = un->un_save_state; 504 un->un_throttle = 2; 505 cv_broadcast(&un->un_suspend_cv); 506 /* 507 * Raise the power level of the device to active. 508 */ 509 mutex_exit(DCD_MUTEX); 510 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE); 511 mutex_enter(DCD_MUTEX); 512 513 /* 514 * start unit - if this is a low-activity device 515 * commands in queue will have to wait until new 516 * commands come in, which may take awhile. 517 * Also, we specifically don't check un_ncmds 518 * because we know that there really are no 519 * commands in progress after the unit was suspended 520 * and we could have reached the throttle level, been 521 * suspended, and have no new commands coming in for 522 * awhile. Highly unlikely, but so is the low- 523 * activity disk scenario. 524 */ 525 dp = &un->un_utab; 526 if (dp->b_actf && (dp->b_forw == NULL)) { 527 dcdstart(un); 528 } 529 530 mutex_exit(DCD_MUTEX); 531 return (DDI_SUCCESS); 532 533 default: 534 return (DDI_FAILURE); 535 } 536 537 if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) { 538 return (DDI_FAILURE); 539 } 540 541 if (!(un = (struct dcd_disk *) 542 ddi_get_soft_state(dcd_state, instance))) { 543 return (DDI_FAILURE); 544 } 545 devp->dcd_private = (ataopaque_t)un; 546 547 /* 548 * Add a zero-length attribute to tell the world we support 549 * kernel ioctls (for layered drivers) 550 */ 551 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP, 552 DDI_KERNEL_IOCTL, NULL, 0); 553 554 /* 555 * Since the dad device does not have the 'reg' property, 556 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries. 557 * The following code is to tell cpr that this device 558 * does need to be suspended and resumed. 559 */ 560 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, 561 "pm-hardware-state", (caddr_t)"needs-suspend-resume"); 562 563 /* 564 * Initialize power management bookkeeping; 565 * Create components - In IDE case there are 3 levels and one 566 * component. The levels being - active, idle, standby. 567 */ 568 569 rval = ddi_prop_update_string_array(DDI_DEV_T_NONE, 570 devi, "pm-components", pm_comp, 4); 571 if (rval == DDI_PROP_SUCCESS) { 572 /* 573 * Ignore the return value of pm_raise_power 574 * Even if we check the return values and 575 * remove the property created above, PM 576 * framework will not honour the change after 577 * first call to pm_raise_power. Hence, the 578 * removal of that property does not help if 579 * pm_raise_power fails. 580 */ 581 (void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE); 582 } 583 584 ddi_report_dev(devi); 585 586 cmlb_alloc_handle(&un->un_dklbhandle); 587 588 if (cmlb_attach(devi, 589 &dcd_lb_ops, 590 0, 591 0, 592 0, 593 DDI_NT_BLOCK_CHAN, 594 CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8, 595 un->un_dklbhandle, 596 0) != 0) { 597 cmlb_free_handle(&un->un_dklbhandle); 598 dcd_free_softstate(un, devi); 599 return (DDI_FAILURE); 600 } 601 602 mutex_enter(DCD_MUTEX); 603 (void) dcd_validate_geometry(un); 604 605 /* Get devid; create a devid ONLY IF could not get ID */ 606 if (dcd_get_devid(un) == NULL) { 607 /* Create the fab'd devid */ 608 (void) dcd_create_devid(un); 609 } 610 mutex_exit(DCD_MUTEX); 611 612 return (DDI_SUCCESS); 613 } 614 615 static void 616 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi) 617 { 618 struct dcd_device *devp; 619 int instance = ddi_get_instance(devi); 620 621 devp = ddi_get_driver_private(devi); 622 623 if (un) { 624 sema_destroy(&un->un_semoclose); 625 cv_destroy(&un->un_sbuf_cv); 626 cv_destroy(&un->un_state_cv); 627 cv_destroy(&un->un_disk_busy_cv); 628 cv_destroy(&un->un_suspend_cv); 629 630 /* 631 * Deallocate command packet resources. 632 */ 633 if (un->un_sbufp) 634 freerbuf(un->un_sbufp); 635 if (un->un_dp) { 636 kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp)); 637 } 638 /* 639 * Unregister the devid and free devid resources allocated 640 */ 641 ddi_devid_unregister(DCD_DEVINFO); 642 if (un->un_devid) { 643 ddi_devid_free(un->un_devid); 644 un->un_devid = NULL; 645 } 646 647 /* 648 * Delete kstats. Kstats for non CD devices are deleted 649 * in dcdclose. 650 */ 651 if (un->un_stats) { 652 kstat_delete(un->un_stats); 653 } 654 655 } 656 657 /* 658 * Cleanup scsi_device resources. 659 */ 660 ddi_soft_state_free(dcd_state, instance); 661 devp->dcd_private = (ataopaque_t)0; 662 /* unprobe scsi device */ 663 dcd_unprobe(devp); 664 665 /* Remove properties created during attach */ 666 ddi_prop_remove_all(devi); 667 } 668 669 static int 670 dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd) 671 { 672 int instance; 673 struct dcd_disk *un; 674 clock_t wait_cmds_complete; 675 instance = ddi_get_instance(devi); 676 677 if (!(un = ddi_get_soft_state(dcd_state, instance))) 678 return (DDI_FAILURE); 679 680 switch (cmd) { 681 case DDI_DETACH: 682 return (dcd_dr_detach(devi)); 683 684 case DDI_SUSPEND: 685 mutex_enter(DCD_MUTEX); 686 if (un->un_state == DCD_STATE_SUSPENDED) { 687 mutex_exit(DCD_MUTEX); 688 return (DDI_SUCCESS); 689 } 690 un->un_throttle = 0; 691 /* 692 * Save the last state first 693 */ 694 un->un_save_state = un->un_last_state; 695 696 New_state(un, DCD_STATE_SUSPENDED); 697 698 /* 699 * wait till current operation completed. If we are 700 * in the resource wait state (with an intr outstanding) 701 * then we need to wait till the intr completes and 702 * starts the next cmd. We wait for 703 * DCD_WAIT_CMDS_COMPLETE seconds before failing the 704 * DDI_SUSPEND. 705 */ 706 wait_cmds_complete = ddi_get_lbolt(); 707 wait_cmds_complete += 708 DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000); 709 710 while (un->un_ncmds) { 711 if (cv_timedwait(&un->un_disk_busy_cv, 712 DCD_MUTEX, wait_cmds_complete) == -1) { 713 /* 714 * commands Didn't finish in the 715 * specified time, fail the DDI_SUSPEND. 716 */ 717 DAD_DEBUG2(DCD_DEVINFO, dcd_label, 718 DCD_DEBUG, "dcddetach: SUSPEND " 719 "failed due to outstanding cmds\n"); 720 Restore_state(un); 721 mutex_exit(DCD_MUTEX); 722 return (DDI_FAILURE); 723 } 724 } 725 mutex_exit(DCD_MUTEX); 726 return (DDI_SUCCESS); 727 } 728 return (DDI_FAILURE); 729 } 730 731 /* 732 * The reset entry point gets invoked at the system shutdown time or through 733 * CPR code at system suspend. 734 * Will be flushing the cache and expect this to be last I/O operation to the 735 * disk before system reset/power off. 736 */ 737 /*ARGSUSED*/ 738 static int 739 dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd) 740 { 741 struct dcd_disk *un; 742 int instance; 743 744 instance = ddi_get_instance(dip); 745 746 if (!(un = ddi_get_soft_state(dcd_state, instance))) 747 return (DDI_FAILURE); 748 749 dcd_flush_cache(un); 750 751 return (DDI_SUCCESS); 752 } 753 754 755 static int 756 dcd_dr_detach(dev_info_t *devi) 757 { 758 struct dcd_device *devp; 759 struct dcd_disk *un; 760 761 /* 762 * Get scsi_device structure for this instance. 763 */ 764 if ((devp = ddi_get_driver_private(devi)) == NULL) 765 return (DDI_FAILURE); 766 767 /* 768 * Get dcd_disk structure containing target 'private' information 769 */ 770 un = (struct dcd_disk *)devp->dcd_private; 771 772 /* 773 * Verify there are NO outstanding commands issued to this device. 774 * ie, un_ncmds == 0. 775 * It's possible to have outstanding commands through the physio 776 * code path, even though everything's closed. 777 */ 778 #ifndef lint 779 _NOTE(COMPETING_THREADS_NOW); 780 #endif 781 mutex_enter(DCD_MUTEX); 782 if (un->un_ncmds) { 783 mutex_exit(DCD_MUTEX); 784 _NOTE(NO_COMPETING_THREADS_NOW); 785 return (DDI_FAILURE); 786 } 787 788 mutex_exit(DCD_MUTEX); 789 790 cmlb_detach(un->un_dklbhandle, 0); 791 cmlb_free_handle(&un->un_dklbhandle); 792 793 794 /* 795 * Lower the power state of the device 796 * i.e. the minimum power consumption state - sleep. 797 */ 798 (void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY); 799 800 _NOTE(NO_COMPETING_THREADS_NOW); 801 802 /* 803 * at this point there are no competing threads anymore 804 * release active MT locks and all device resources. 805 */ 806 dcd_free_softstate(un, devi); 807 808 return (DDI_SUCCESS); 809 } 810 811 static int 812 dcdpower(dev_info_t *devi, int component, int level) 813 { 814 struct dcd_pkt *pkt; 815 struct dcd_disk *un; 816 int instance; 817 uchar_t cmd; 818 819 820 instance = ddi_get_instance(devi); 821 822 if (!(un = ddi_get_soft_state(dcd_state, instance)) || 823 (DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) || 824 component != 0) { 825 return (DDI_FAILURE); 826 } 827 828 mutex_enter(DCD_MUTEX); 829 /* 830 * if there are active commands for the device or device will be 831 * active soon. At the same time there is request to lower power 832 * return failure. 833 */ 834 if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) { 835 mutex_exit(DCD_MUTEX); 836 return (DDI_FAILURE); 837 } 838 839 if ((un->un_state == DCD_STATE_OFFLINE) || 840 (un->un_state == DCD_STATE_FATAL)) { 841 mutex_exit(DCD_MUTEX); 842 return (DDI_FAILURE); 843 } 844 845 if (level == DCD_DEVICE_ACTIVE) { 846 /* 847 * No need to fire any command, just set the state structure 848 * to indicate previous state and set the level to active 849 */ 850 un->un_power_level = DCD_DEVICE_ACTIVE; 851 if (un->un_state == DCD_STATE_PM_SUSPENDED) 852 Restore_state(un); 853 mutex_exit(DCD_MUTEX); 854 } else { 855 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 856 NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 857 PKT_CONSISTENT, NULL_FUNC, NULL); 858 859 if (pkt == (struct dcd_pkt *)NULL) { 860 mutex_exit(DCD_MUTEX); 861 return (DDI_FAILURE); 862 } 863 864 switch (level) { 865 case DCD_DEVICE_IDLE: 866 cmd = ATA_IDLE_IMMEDIATE; 867 break; 868 869 case DCD_DEVICE_STANDBY: 870 cmd = ATA_STANDBY_IMMEDIATE; 871 break; 872 } 873 874 makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0); 875 mutex_exit(DCD_MUTEX); 876 /* 877 * Issue the appropriate command 878 */ 879 if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) { 880 dcd_destroy_pkt(pkt); 881 return (DDI_FAILURE); 882 } 883 dcd_destroy_pkt(pkt); 884 mutex_enter(DCD_MUTEX); 885 if (un->un_state != DCD_STATE_PM_SUSPENDED) 886 New_state(un, DCD_STATE_PM_SUSPENDED); 887 un->un_power_level = level; 888 mutex_exit(DCD_MUTEX); 889 } 890 891 return (DDI_SUCCESS); 892 } 893 894 static int 895 dcd_doattach(dev_info_t *devi, int (*canwait)()) 896 { 897 struct dcd_device *devp; 898 struct dcd_disk *un = (struct dcd_disk *)0; 899 int instance; 900 int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP; 901 int rval; 902 char *prop_template = "target%x-dcd-options"; 903 int options; 904 char prop_str[32]; 905 int target; 906 diskaddr_t capacity; 907 908 devp = ddi_get_driver_private(devi); 909 910 /* 911 * Call the routine scsi_probe to do some of the dirty work. 912 * If the INQUIRY command succeeds, the field dcd_inq in the 913 * device structure will be filled in. The dcd_sense structure 914 * will also be allocated. 915 */ 916 917 switch (dcd_probe(devp, canwait)) { 918 default: 919 return (DDI_FAILURE); 920 921 case DCDPROBE_EXISTS: 922 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 923 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 924 rval = DDI_SUCCESS; 925 } else { 926 rval = DDI_FAILURE; 927 goto error; 928 } 929 } else { 930 rval = DDI_FAILURE; 931 goto error; 932 } 933 } 934 935 936 instance = ddi_get_instance(devp->dcd_dev); 937 938 if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) { 939 rval = DDI_FAILURE; 940 goto error; 941 } 942 943 un = ddi_get_soft_state(dcd_state, instance); 944 945 un->un_sbufp = getrbuf(km_flags); 946 if (un->un_sbufp == (struct buf *)NULL) { 947 rval = DDI_FAILURE; 948 goto error; 949 } 950 951 952 un->un_dcd = devp; 953 un->un_power_level = -1; 954 un->un_tgattribute.media_is_writable = 1; 955 956 sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); 957 cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL); 958 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); 959 /* Initialize power management conditional variable */ 960 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); 961 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); 962 963 if (un->un_dp == 0) { 964 /* 965 * Assume CCS drive, assume parity, but call 966 * it a CDROM if it is a RODIRECT device. 967 */ 968 un->un_dp = (struct dcd_drivetype *) 969 kmem_zalloc(sizeof (struct dcd_drivetype), km_flags); 970 if (!un->un_dp) { 971 rval = DDI_FAILURE; 972 goto error; 973 } 974 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 975 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 976 un->un_dp->ctype = CTYPE_DISK; 977 } 978 } else { 979 rval = DDI_FAILURE; 980 goto error; 981 } 982 un->un_dp->name = "CCS"; 983 un->un_dp->options = 0; 984 } 985 986 /* 987 * Allow I/O requests at un_secsize offset in multiple of un_secsize. 988 */ 989 un->un_secsize = DEV_BSIZE; 990 991 /* 992 * If the device is not a removable media device, make sure that 993 * that the device is ready, by issuing the another identify but 994 * not needed. Get the capacity from identify data and store here. 995 */ 996 if (dcd_compute_dk_capacity(devp, &capacity) == 0) { 997 un->un_diskcapacity = capacity; 998 un->un_lbasize = DEV_BSIZE; 999 } 1000 1001 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n"); 1002 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x", 1003 devp->dcd_ident->dcd_fixcyls, 1004 devp->dcd_ident->dcd_heads); 1005 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,", 1006 devp->dcd_ident->dcd_sectors); 1007 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n", 1008 capacity); 1009 1010 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1011 "dcdprobe: drive selected\n"); 1012 1013 /* 1014 * Check for the property target<n>-dcd-options to find the option 1015 * set by the HBA driver for this target so that we can set the 1016 * Unit structure variable so that we can send commands accordingly. 1017 */ 1018 target = devp->dcd_address->a_target; 1019 (void) sprintf(prop_str, prop_template, target); 1020 options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM, 1021 prop_str, -1); 1022 if (options < 0) { 1023 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1024 "No per target properties"); 1025 } else { 1026 if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) { 1027 un->un_dp->options |= DMA_SUPPORTTED; 1028 un->un_dp->dma_mode = (options >> 3) & 0x03; 1029 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1030 "mode %x\n", un->un_dp->dma_mode); 1031 } else { 1032 un->un_dp->options &= ~DMA_SUPPORTTED; 1033 un->un_dp->pio_mode = options & 0x7; 1034 if (options & DCD_BLOCK_MODE) 1035 un->un_dp->options |= BLOCK_MODE; 1036 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1037 "mode %x\n", un->un_dp->pio_mode); 1038 } 1039 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1040 "options %x,", un->un_dp->options); 1041 } 1042 1043 un->un_throttle = 2; 1044 /* 1045 * set default max_xfer_size - This should depend on whether the 1046 * Block mode is supported by the device or not. 1047 */ 1048 un->un_max_xfer_size = MAX_ATA_XFER_SIZE; 1049 1050 /* 1051 * Set write cache enable softstate 1052 * 1053 * WCE is only supported in ATAPI-4 or higher; for 1054 * lower rev devices, must assume write cache is 1055 * enabled. 1056 */ 1057 mutex_enter(DCD_MUTEX); 1058 un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) || 1059 ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) || 1060 (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0; 1061 mutex_exit(DCD_MUTEX); 1062 1063 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1064 "dcd_doattach returns good\n"); 1065 1066 return (rval); 1067 1068 error: 1069 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n"); 1070 dcd_free_softstate(un, devi); 1071 return (rval); 1072 } 1073 1074 #ifdef NOTNEEDED 1075 /* 1076 * This routine is used to set the block mode of operation by issuing the 1077 * Set Block mode ata command with the maximum block mode possible 1078 */ 1079 dcd_set_multiple(struct dcd_disk *un) 1080 { 1081 int status; 1082 struct udcd_cmd ucmd; 1083 struct dcd_cmd cdb; 1084 dev_t dev; 1085 1086 1087 /* Zero all the required structure */ 1088 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 1089 1090 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd)); 1091 1092 cdb.cmd = ATA_SET_MULTIPLE; 1093 /* 1094 * Here we should pass what needs to go into sector count REGISTER. 1095 * Eventhough this field indicates the number of bytes to read we 1096 * need to specify the block factor in terms of bytes so that it 1097 * will be programmed by the HBA driver into the sector count register. 1098 */ 1099 cdb.size = un->un_lbasize * un->un_dp->block_factor; 1100 1101 cdb.sector_num.lba_num = 0; 1102 cdb.address_mode = ADD_LBA_MODE; 1103 cdb.direction = NO_DATA_XFER; 1104 1105 ucmd.udcd_flags = 0; 1106 ucmd.udcd_cmd = &cdb; 1107 ucmd.udcd_bufaddr = NULL; 1108 ucmd.udcd_buflen = 0; 1109 ucmd.udcd_flags |= UDCD_SILENT; 1110 1111 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 1112 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 1113 1114 1115 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 1116 1117 return (status); 1118 } 1119 /* 1120 * The following routine is used only for setting the transfer mode 1121 * and it is not designed for transferring any other features subcommand. 1122 */ 1123 dcd_set_features(struct dcd_disk *un, uchar_t mode) 1124 { 1125 int status; 1126 struct udcd_cmd ucmd; 1127 struct dcd_cmd cdb; 1128 dev_t dev; 1129 1130 1131 /* Zero all the required structure */ 1132 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 1133 1134 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd)); 1135 1136 cdb.cmd = ATA_SET_FEATURES; 1137 /* 1138 * Here we need to pass what needs to go into the sector count register 1139 * But in the case of SET FEATURES command the value taken in the 1140 * sector count register depends what type of subcommand is 1141 * passed in the features register. Since we have defined the size to 1142 * be the size in bytes in this context it does not indicate bytes 1143 * instead it indicates the mode to be programmed. 1144 */ 1145 cdb.size = un->un_lbasize * mode; 1146 1147 cdb.sector_num.lba_num = 0; 1148 cdb.address_mode = ADD_LBA_MODE; 1149 cdb.direction = NO_DATA_XFER; 1150 cdb.features = ATA_FEATURE_SET_MODE; 1151 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1152 "size %x, features %x, cmd %x\n", 1153 cdb.size, cdb.features, cdb.cmd); 1154 1155 ucmd.udcd_flags = 0; 1156 ucmd.udcd_cmd = &cdb; 1157 ucmd.udcd_bufaddr = NULL; 1158 ucmd.udcd_buflen = 0; 1159 ucmd.udcd_flags |= UDCD_SILENT; 1160 1161 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 1162 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 1163 1164 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 1165 1166 return (status); 1167 } 1168 #endif 1169 1170 /* 1171 * Validate the geometry for this disk, e.g., 1172 * see whether it has a valid label. 1173 */ 1174 static int 1175 dcd_validate_geometry(struct dcd_disk *un) 1176 { 1177 int secsize = 0; 1178 struct dcd_device *devp; 1179 int secdiv; 1180 int rval; 1181 1182 ASSERT(mutex_owned(DCD_MUTEX)); 1183 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1184 "dcd_validate_geometry: started \n"); 1185 1186 if (un->un_lbasize < 0) { 1187 return (DCD_BAD_LABEL); 1188 } 1189 1190 if (un->un_state == DCD_STATE_PM_SUSPENDED) { 1191 mutex_exit(DCD_MUTEX); 1192 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) 1193 != DDI_SUCCESS) { 1194 mutex_enter(DCD_MUTEX); 1195 return (DCD_BAD_LABEL); 1196 } 1197 mutex_enter(DCD_MUTEX); 1198 } 1199 1200 secsize = un->un_secsize; 1201 1202 /* 1203 * take a log base 2 of sector size (sorry) 1204 */ 1205 for (secdiv = 0; secsize = secsize >> 1; secdiv++) 1206 ; 1207 un->un_secdiv = secdiv; 1208 1209 /* 1210 * Only DIRECT ACCESS devices will have Sun labels. 1211 * CD's supposedly have a Sun label, too 1212 */ 1213 1214 devp = un->un_dcd; 1215 1216 if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) && 1217 (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) { 1218 mutex_exit(DCD_MUTEX); 1219 rval = cmlb_validate(un->un_dklbhandle, 0, 0); 1220 mutex_enter(DCD_MUTEX); 1221 if (rval == ENOMEM) 1222 return (DCD_NO_MEM_FOR_LABEL); 1223 else if (rval != 0) 1224 return (DCD_BAD_LABEL); 1225 } else { 1226 /* it should never get here. */ 1227 return (DCD_BAD_LABEL); 1228 } 1229 1230 /* 1231 * take a log base 2 of logical block size 1232 */ 1233 secsize = un->un_lbasize; 1234 for (secdiv = 0; secsize = secsize >> 1; secdiv++) 1235 ; 1236 un->un_lbadiv = secdiv; 1237 1238 /* 1239 * take a log base 2 of the multiple of DEV_BSIZE blocks that 1240 * make up one logical block 1241 */ 1242 secsize = un->un_lbasize >> DEV_BSHIFT; 1243 for (secdiv = 0; secsize = secsize >> 1; secdiv++) 1244 ; 1245 un->un_blknoshift = secdiv; 1246 return (0); 1247 } 1248 1249 /* 1250 * Unix Entry Points 1251 */ 1252 1253 /* ARGSUSED3 */ 1254 static int 1255 dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p) 1256 { 1257 dev_t dev = *dev_p; 1258 int rval = EIO; 1259 int partmask; 1260 int nodelay = (flag & (FNDELAY | FNONBLOCK)); 1261 int i; 1262 char kstatname[KSTAT_STRLEN]; 1263 diskaddr_t lblocks; 1264 char *partname; 1265 1266 GET_SOFT_STATE(dev); 1267 1268 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1269 "Inside Open flag %x, otyp %x\n", flag, otyp); 1270 1271 if (otyp >= OTYPCNT) { 1272 return (EINVAL); 1273 } 1274 1275 partmask = 1 << part; 1276 1277 /* 1278 * We use a semaphore here in order to serialize 1279 * open and close requests on the device. 1280 */ 1281 sema_p(&un->un_semoclose); 1282 1283 mutex_enter(DCD_MUTEX); 1284 1285 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) { 1286 rval = ENXIO; 1287 goto done; 1288 } 1289 1290 while (un->un_state == DCD_STATE_SUSPENDED) { 1291 cv_wait(&un->un_suspend_cv, DCD_MUTEX); 1292 } 1293 1294 if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) { 1295 mutex_exit(DCD_MUTEX); 1296 if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) 1297 != DDI_SUCCESS) { 1298 mutex_enter(DCD_MUTEX); 1299 rval = EIO; 1300 goto done; 1301 } 1302 mutex_enter(DCD_MUTEX); 1303 } 1304 1305 /* 1306 * set make_dcd_cmd() flags and stat_size here since these 1307 * are unlikely to change 1308 */ 1309 un->un_cmd_flags = 0; 1310 1311 un->un_cmd_stat_size = 2; 1312 1313 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n", 1314 (void *)un); 1315 /* 1316 * check for previous exclusive open 1317 */ 1318 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1319 "exclopen=%x, flag=%x, regopen=%x\n", 1320 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); 1321 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1322 "Exclusive open flag %x, partmask %x\n", 1323 un->un_exclopen, partmask); 1324 1325 if (un->un_exclopen & (partmask)) { 1326 failed_exclusive: 1327 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1328 "exclusive open fails\n"); 1329 rval = EBUSY; 1330 goto done; 1331 } 1332 1333 if (flag & FEXCL) { 1334 int i; 1335 if (un->un_ocmap.lyropen[part]) { 1336 goto failed_exclusive; 1337 } 1338 for (i = 0; i < (OTYPCNT - 1); i++) { 1339 if (un->un_ocmap.regopen[i] & (partmask)) { 1340 goto failed_exclusive; 1341 } 1342 } 1343 } 1344 if (flag & FWRITE) { 1345 mutex_exit(DCD_MUTEX); 1346 if (dcd_check_wp(dev)) { 1347 sema_v(&un->un_semoclose); 1348 return (EROFS); 1349 } 1350 mutex_enter(DCD_MUTEX); 1351 } 1352 1353 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1354 "Check Write Protect handled\n"); 1355 1356 if (!nodelay) { 1357 mutex_exit(DCD_MUTEX); 1358 if ((rval = dcd_ready_and_valid(dev, un)) != 0) { 1359 rval = EIO; 1360 } 1361 (void) pm_idle_component(DCD_DEVINFO, 0); 1362 /* 1363 * Fail if device is not ready or if the number of disk 1364 * blocks is zero or negative for non CD devices. 1365 */ 1366 if (rval || cmlb_partinfo(un->un_dklbhandle, 1367 part, &lblocks, NULL, &partname, NULL, 0) || 1368 lblocks <= 0) { 1369 rval = EIO; 1370 mutex_enter(DCD_MUTEX); 1371 goto done; 1372 } 1373 mutex_enter(DCD_MUTEX); 1374 } 1375 1376 if (otyp == OTYP_LYR) { 1377 un->un_ocmap.lyropen[part]++; 1378 } else { 1379 un->un_ocmap.regopen[otyp] |= partmask; 1380 } 1381 1382 /* 1383 * set up open and exclusive open flags 1384 */ 1385 if (flag & FEXCL) { 1386 un->un_exclopen |= (partmask); 1387 } 1388 1389 1390 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1391 "open of part %d type %d\n", 1392 part, otyp); 1393 1394 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1395 "Kstats getting updated\n"); 1396 /* 1397 * only create kstats for disks, CD kstats created in dcdattach 1398 */ 1399 _NOTE(NO_COMPETING_THREADS_NOW); 1400 mutex_exit(DCD_MUTEX); 1401 if (un->un_stats == (kstat_t *)0) { 1402 un->un_stats = kstat_create("dad", instance, 1403 NULL, "disk", KSTAT_TYPE_IO, 1, 1404 KSTAT_FLAG_PERSISTENT); 1405 if (un->un_stats) { 1406 un->un_stats->ks_lock = DCD_MUTEX; 1407 kstat_install(un->un_stats); 1408 } 1409 1410 /* 1411 * set up partition statistics for each partition 1412 * with number of blocks > 0 1413 */ 1414 if (!nodelay) { 1415 for (i = 0; i < NDKMAP; i++) { 1416 if ((un->un_pstats[i] == (kstat_t *)0) && 1417 (cmlb_partinfo(un->un_dklbhandle, 1418 i, &lblocks, NULL, &partname, 1419 NULL, 0) == 0) && lblocks > 0) { 1420 (void) sprintf(kstatname, "dad%d,%s", 1421 instance, partname); 1422 un->un_pstats[i] = kstat_create("dad", 1423 instance, 1424 kstatname, 1425 "partition", 1426 KSTAT_TYPE_IO, 1427 1, 1428 KSTAT_FLAG_PERSISTENT); 1429 if (un->un_pstats[i]) { 1430 un->un_pstats[i]->ks_lock = 1431 DCD_MUTEX; 1432 kstat_install(un->un_pstats[i]); 1433 } 1434 } 1435 } 1436 } 1437 /* 1438 * set up error kstats 1439 */ 1440 (void) dcd_create_errstats(un, instance); 1441 } 1442 #ifndef lint 1443 _NOTE(COMPETING_THREADS_NOW); 1444 #endif 1445 1446 sema_v(&un->un_semoclose); 1447 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n"); 1448 return (0); 1449 1450 done: 1451 mutex_exit(DCD_MUTEX); 1452 sema_v(&un->un_semoclose); 1453 return (rval); 1454 1455 } 1456 1457 /* 1458 * Test if disk is ready and has a valid geometry. 1459 */ 1460 static int 1461 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un) 1462 { 1463 int rval = 1; 1464 int g_error = 0; 1465 1466 mutex_enter(DCD_MUTEX); 1467 /* 1468 * cmds outstanding 1469 */ 1470 if (un->un_ncmds == 0) { 1471 (void) dcd_unit_ready(dev); 1472 } 1473 1474 /* 1475 * If device is not yet ready here, inform it is offline 1476 */ 1477 if (un->un_state == DCD_STATE_NORMAL) { 1478 rval = dcd_unit_ready(dev); 1479 if (rval != 0 && rval != EACCES) { 1480 dcd_offline(un, 1); 1481 goto done; 1482 } 1483 } 1484 1485 if (un->un_format_in_progress == 0) { 1486 g_error = dcd_validate_geometry(un); 1487 } 1488 1489 /* 1490 * check if geometry was valid. We don't check the validity of 1491 * geometry for CDROMS. 1492 */ 1493 1494 if (g_error == DCD_BAD_LABEL) { 1495 rval = 1; 1496 goto done; 1497 } 1498 1499 1500 /* 1501 * the state has changed; inform the media watch routines 1502 */ 1503 un->un_mediastate = DKIO_INSERTED; 1504 cv_broadcast(&un->un_state_cv); 1505 rval = 0; 1506 1507 done: 1508 mutex_exit(DCD_MUTEX); 1509 return (rval); 1510 } 1511 1512 1513 /*ARGSUSED*/ 1514 static int 1515 dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p) 1516 { 1517 uchar_t *cp; 1518 int i; 1519 1520 GET_SOFT_STATE(dev); 1521 1522 1523 if (otyp >= OTYPCNT) 1524 return (ENXIO); 1525 1526 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1527 "close of part %d type %d\n", 1528 part, otyp); 1529 sema_p(&un->un_semoclose); 1530 1531 mutex_enter(DCD_MUTEX); 1532 1533 if (un->un_exclopen & (1<<part)) { 1534 un->un_exclopen &= ~(1<<part); 1535 } 1536 1537 if (otyp == OTYP_LYR) { 1538 un->un_ocmap.lyropen[part] -= 1; 1539 } else { 1540 un->un_ocmap.regopen[otyp] &= ~(1<<part); 1541 } 1542 1543 cp = &un->un_ocmap.chkd[0]; 1544 while (cp < &un->un_ocmap.chkd[OCSIZE]) { 1545 if (*cp != (uchar_t)0) { 1546 break; 1547 } 1548 cp++; 1549 } 1550 1551 if (cp == &un->un_ocmap.chkd[OCSIZE]) { 1552 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n"); 1553 if (un->un_state == DCD_STATE_OFFLINE) { 1554 dcd_offline(un, 1); 1555 } 1556 1557 mutex_exit(DCD_MUTEX); 1558 (void) cmlb_close(un->un_dklbhandle, 0); 1559 1560 _NOTE(NO_COMPETING_THREADS_NOW); 1561 if (un->un_stats) { 1562 kstat_delete(un->un_stats); 1563 un->un_stats = 0; 1564 } 1565 for (i = 0; i < NDKMAP; i++) { 1566 if (un->un_pstats[i]) { 1567 kstat_delete(un->un_pstats[i]); 1568 un->un_pstats[i] = (kstat_t *)0; 1569 } 1570 } 1571 1572 if (un->un_errstats) { 1573 kstat_delete(un->un_errstats); 1574 un->un_errstats = (kstat_t *)0; 1575 } 1576 mutex_enter(DCD_MUTEX); 1577 1578 #ifndef lint 1579 _NOTE(COMPETING_THREADS_NOW); 1580 #endif 1581 } 1582 1583 mutex_exit(DCD_MUTEX); 1584 sema_v(&un->un_semoclose); 1585 return (0); 1586 } 1587 1588 static void 1589 dcd_offline(struct dcd_disk *un, int bechatty) 1590 { 1591 if (bechatty) 1592 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n"); 1593 1594 mutex_exit(DCD_MUTEX); 1595 cmlb_invalidate(un->un_dklbhandle, 0); 1596 mutex_enter(DCD_MUTEX); 1597 } 1598 1599 /* 1600 * Given the device number return the devinfo pointer 1601 * from the scsi_device structure. 1602 */ 1603 /*ARGSUSED*/ 1604 static int 1605 dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 1606 { 1607 dev_t dev; 1608 struct dcd_disk *un; 1609 int instance, error; 1610 1611 1612 switch (infocmd) { 1613 case DDI_INFO_DEVT2DEVINFO: 1614 dev = (dev_t)arg; 1615 instance = DCDUNIT(dev); 1616 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) 1617 return (DDI_FAILURE); 1618 *result = (void *) DCD_DEVINFO; 1619 error = DDI_SUCCESS; 1620 break; 1621 case DDI_INFO_DEVT2INSTANCE: 1622 dev = (dev_t)arg; 1623 instance = DCDUNIT(dev); 1624 *result = (void *)(uintptr_t)instance; 1625 error = DDI_SUCCESS; 1626 break; 1627 default: 1628 error = DDI_FAILURE; 1629 } 1630 return (error); 1631 } 1632 1633 /* 1634 * property operation routine. return the number of blocks for the partition 1635 * in question or forward the request to the propery facilities. 1636 */ 1637 static int 1638 dcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags, 1639 char *name, caddr_t valuep, int *lengthp) 1640 { 1641 int instance = ddi_get_instance(dip); 1642 struct dcd_disk *un; 1643 uint64_t nblocks64; 1644 diskaddr_t lblocks; 1645 1646 /* 1647 * Our dynamic properties are all device specific and size oriented. 1648 * Requests issued under conditions where size is valid are passed 1649 * to ddi_prop_op_nblocks with the size information, otherwise the 1650 * request is passed to ddi_prop_op. Size depends on valid geometry. 1651 */ 1652 un = ddi_get_soft_state(dcd_state, instance); 1653 if ((dev == DDI_DEV_T_ANY) || (un == NULL)) { 1654 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1655 name, valuep, lengthp)); 1656 } else { 1657 if (cmlb_partinfo( 1658 un->un_dklbhandle, 1659 DCDPART(dev), 1660 &lblocks, 1661 NULL, 1662 NULL, 1663 NULL, 1664 0)) { 1665 return (ddi_prop_op(dev, dip, prop_op, mod_flags, 1666 name, valuep, lengthp)); 1667 } 1668 1669 /* get nblocks value */ 1670 nblocks64 = (ulong_t)lblocks; 1671 1672 return (ddi_prop_op_nblocks(dev, dip, prop_op, mod_flags, 1673 name, valuep, lengthp, nblocks64)); 1674 } 1675 } 1676 1677 /* 1678 * These routines perform raw i/o operations. 1679 */ 1680 /*ARGSUSED*/ 1681 void 1682 dcduscsimin(struct buf *bp) 1683 { 1684 1685 } 1686 1687 1688 static void 1689 dcdmin(struct buf *bp) 1690 { 1691 struct dcd_disk *un; 1692 int instance; 1693 minor_t minor = getminor(bp->b_edev); 1694 instance = minor >> DCDUNIT_SHIFT; 1695 un = ddi_get_soft_state(dcd_state, instance); 1696 1697 if (bp->b_bcount > un->un_max_xfer_size) 1698 bp->b_bcount = un->un_max_xfer_size; 1699 } 1700 1701 1702 /* ARGSUSED2 */ 1703 static int 1704 dcdread(dev_t dev, struct uio *uio, cred_t *cred_p) 1705 { 1706 int secmask; 1707 GET_SOFT_STATE(dev); 1708 #ifdef lint 1709 part = part; 1710 #endif /* lint */ 1711 secmask = un->un_secsize - 1; 1712 1713 if (uio->uio_loffset & ((offset_t)(secmask))) { 1714 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1715 "file offset not modulo %d\n", 1716 un->un_secsize); 1717 return (EINVAL); 1718 } else if (uio->uio_iov->iov_len & (secmask)) { 1719 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1720 "transfer length not modulo %d\n", un->un_secsize); 1721 return (EINVAL); 1722 } 1723 return (physio(dcdstrategy, (struct buf *)0, dev, B_READ, dcdmin, uio)); 1724 } 1725 1726 /* ARGSUSED2 */ 1727 static int 1728 dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p) 1729 { 1730 int secmask; 1731 struct uio *uio = aio->aio_uio; 1732 GET_SOFT_STATE(dev); 1733 #ifdef lint 1734 part = part; 1735 #endif /* lint */ 1736 secmask = un->un_secsize - 1; 1737 1738 if (uio->uio_loffset & ((offset_t)(secmask))) { 1739 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1740 "file offset not modulo %d\n", 1741 un->un_secsize); 1742 return (EINVAL); 1743 } else if (uio->uio_iov->iov_len & (secmask)) { 1744 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1745 "transfer length not modulo %d\n", un->un_secsize); 1746 return (EINVAL); 1747 } 1748 return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio)); 1749 } 1750 1751 /* ARGSUSED2 */ 1752 static int 1753 dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p) 1754 { 1755 int secmask; 1756 GET_SOFT_STATE(dev); 1757 #ifdef lint 1758 part = part; 1759 #endif /* lint */ 1760 secmask = un->un_secsize - 1; 1761 1762 if (uio->uio_loffset & ((offset_t)(secmask))) { 1763 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1764 "file offset not modulo %d\n", 1765 un->un_secsize); 1766 return (EINVAL); 1767 } else if (uio->uio_iov->iov_len & (secmask)) { 1768 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1769 "transfer length not modulo %d\n", un->un_secsize); 1770 return (EINVAL); 1771 } 1772 return (physio(dcdstrategy, (struct buf *)0, dev, B_WRITE, dcdmin, 1773 uio)); 1774 } 1775 1776 /* ARGSUSED2 */ 1777 static int 1778 dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p) 1779 { 1780 int secmask; 1781 struct uio *uio = aio->aio_uio; 1782 GET_SOFT_STATE(dev); 1783 #ifdef lint 1784 part = part; 1785 #endif /* lint */ 1786 secmask = un->un_secsize - 1; 1787 1788 if (uio->uio_loffset & ((offset_t)(secmask))) { 1789 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1790 "file offset not modulo %d\n", 1791 un->un_secsize); 1792 return (EINVAL); 1793 } else if (uio->uio_iov->iov_len & (secmask)) { 1794 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1795 "transfer length not modulo %d\n", un->un_secsize); 1796 return (EINVAL); 1797 } 1798 return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio)); 1799 } 1800 1801 /* 1802 * strategy routine 1803 */ 1804 static int 1805 dcdstrategy(struct buf *bp) 1806 { 1807 struct dcd_disk *un; 1808 struct diskhd *dp; 1809 int i; 1810 minor_t minor = getminor(bp->b_edev); 1811 diskaddr_t p_lblksrt; 1812 diskaddr_t lblocks; 1813 diskaddr_t bn; 1814 1815 if ((un = ddi_get_soft_state(dcd_state, 1816 minor >> DCDUNIT_SHIFT)) == NULL || 1817 un->un_state == DCD_STATE_DUMPING || 1818 ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)) { 1819 SET_BP_ERROR(bp, ((un) ? ENXIO : EIO)); 1820 error: 1821 bp->b_resid = bp->b_bcount; 1822 biodone(bp); 1823 return (0); 1824 } 1825 1826 /* 1827 * If the request size (buf->b_bcount)is greater than the size 1828 * (un->un_max_xfer_size) supported by the target driver fail 1829 * the request with EINVAL error code. 1830 * 1831 * We are not supposed to receive requests exceeding 1832 * un->un_max_xfer_size size because the caller is expected to 1833 * check what is the maximum size that is supported by this 1834 * driver either through ioctl or dcdmin routine(which is private 1835 * to this driver). 1836 * But we have seen cases (like meta driver(md))where dcdstrategy 1837 * called with more than supported size and cause data corruption. 1838 */ 1839 1840 if (bp->b_bcount > un->un_max_xfer_size) { 1841 SET_BP_ERROR(bp, EINVAL); 1842 goto error; 1843 } 1844 1845 TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START, 1846 "dcdstrategy_start: bp 0x%p un 0x%p", bp, un); 1847 1848 /* 1849 * Commands may sneak in while we released the mutex in 1850 * DDI_SUSPEND, we should block new commands. 1851 */ 1852 mutex_enter(DCD_MUTEX); 1853 while (un->un_state == DCD_STATE_SUSPENDED) { 1854 cv_wait(&un->un_suspend_cv, DCD_MUTEX); 1855 } 1856 1857 if (un->un_state == DCD_STATE_PM_SUSPENDED) { 1858 mutex_exit(DCD_MUTEX); 1859 (void) pm_idle_component(DCD_DEVINFO, 0); 1860 if (pm_raise_power(DCD_DEVINFO, 0, 1861 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) { 1862 SET_BP_ERROR(bp, EIO); 1863 goto error; 1864 } 1865 mutex_enter(DCD_MUTEX); 1866 } 1867 mutex_exit(DCD_MUTEX); 1868 1869 /* 1870 * Map-in the buffer in case starting address is not word aligned. 1871 */ 1872 1873 if (((uintptr_t)bp->b_un.b_addr) & 0x1) 1874 bp_mapin(bp); 1875 1876 bp->b_flags &= ~(B_DONE|B_ERROR); 1877 bp->b_resid = 0; 1878 bp->av_forw = 0; 1879 1880 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1881 "bp->b_bcount %lx\n", bp->b_bcount); 1882 1883 if (bp != un->un_sbufp) { 1884 validated: if (cmlb_partinfo(un->un_dklbhandle, 1885 minor & DCDPART_MASK, 1886 &lblocks, 1887 &p_lblksrt, 1888 NULL, 1889 NULL, 1890 0) == 0) { 1891 1892 bn = dkblock(bp); 1893 1894 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 1895 "dkblock(bp) is %llu\n", bn); 1896 1897 i = 0; 1898 if (bn < 0) { 1899 i = -1; 1900 } else if (bn >= lblocks) { 1901 /* 1902 * For proper comparison, file system block 1903 * number has to be scaled to actual CD 1904 * transfer size. 1905 * Since all the CDROM operations 1906 * that have Sun Labels are in the correct 1907 * block size this will work for CD's. This 1908 * will have to change when we have different 1909 * sector sizes. 1910 * 1911 * if bn == lblocks, 1912 * Not an error, resid == count 1913 */ 1914 if (bn > lblocks) { 1915 i = -1; 1916 } else { 1917 i = 1; 1918 } 1919 } else if (bp->b_bcount & (un->un_secsize-1)) { 1920 /* 1921 * This should really be: 1922 * 1923 * ... if (bp->b_bcount & (un->un_lbasize-1)) 1924 * 1925 */ 1926 i = -1; 1927 } else { 1928 if (!bp->b_bcount) { 1929 printf("Waring : Zero read or Write\n"); 1930 goto error; 1931 } 1932 /* 1933 * sort by absolute block number. 1934 */ 1935 bp->b_resid = bn; 1936 bp->b_resid += p_lblksrt; 1937 /* 1938 * zero out av_back - this will be a signal 1939 * to dcdstart to go and fetch the resources 1940 */ 1941 bp->av_back = NO_PKT_ALLOCATED; 1942 } 1943 1944 /* 1945 * Check to see whether or not we are done 1946 * (with or without errors). 1947 */ 1948 1949 if (i != 0) { 1950 if (i < 0) { 1951 bp->b_flags |= B_ERROR; 1952 } 1953 goto error; 1954 } 1955 } else { 1956 /* 1957 * opened in NDELAY/NONBLOCK mode? 1958 * Check if disk is ready and has a valid geometry 1959 */ 1960 if (dcd_ready_and_valid(bp->b_edev, un) == 0) { 1961 goto validated; 1962 } else { 1963 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 1964 "i/o to invalid geometry\n"); 1965 SET_BP_ERROR(bp, EIO); 1966 goto error; 1967 } 1968 } 1969 } else if (BP_HAS_NO_PKT(bp)) { 1970 struct udcd_cmd *tscmdp; 1971 struct dcd_cmd *tcmdp; 1972 /* 1973 * This indicates that it is a special buffer 1974 * This could be a udcd-cmd and hence call bp_mapin just 1975 * in case that it could be a PIO command issued. 1976 */ 1977 tscmdp = (struct udcd_cmd *)bp->b_forw; 1978 tcmdp = tscmdp->udcd_cmd; 1979 if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) && 1980 (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) && 1981 (tcmdp->cmd != IDENTIFY_DMA) && 1982 (tcmdp->cmd != ATA_FLUSH_CACHE)) { 1983 bp_mapin(bp); 1984 } 1985 } 1986 1987 /* 1988 * We are doing it a bit non-standard. That is, the 1989 * head of the b_actf chain is *not* the active command- 1990 * it is just the head of the wait queue. The reason 1991 * we do this is that the head of the b_actf chain is 1992 * guaranteed to not be moved by disksort(), so that 1993 * our restart command (pointed to by 1994 * b_forw) and the head of the wait queue (b_actf) can 1995 * have resources granted without it getting lost in 1996 * the queue at some later point (where we would have 1997 * to go and look for it). 1998 */ 1999 mutex_enter(DCD_MUTEX); 2000 2001 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); 2002 2003 dp = &un->un_utab; 2004 2005 if (dp->b_actf == NULL) { 2006 dp->b_actf = bp; 2007 dp->b_actl = bp; 2008 } else if ((un->un_state == DCD_STATE_SUSPENDED) && 2009 bp == un->un_sbufp) { 2010 bp->b_actf = dp->b_actf; 2011 dp->b_actf = bp; 2012 } else { 2013 TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START, 2014 "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p", 2015 dp, bp, un); 2016 disksort(dp, bp); 2017 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END, 2018 "dcdstrategy_disksort_end"); 2019 } 2020 2021 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2022 "ncmd %x , throttle %x, forw 0x%p\n", 2023 un->un_ncmds, un->un_throttle, (void *)dp->b_forw); 2024 ASSERT(un->un_ncmds >= 0); 2025 ASSERT(un->un_throttle >= 0); 2026 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) { 2027 dcdstart(un); 2028 } else if (BP_HAS_NO_PKT(dp->b_actf)) { 2029 struct buf *cmd_bp; 2030 2031 cmd_bp = dp->b_actf; 2032 cmd_bp->av_back = ALLOCATING_PKT; 2033 mutex_exit(DCD_MUTEX); 2034 /* 2035 * try and map this one 2036 */ 2037 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START, 2038 "dcdstrategy_small_window_call (begin)"); 2039 2040 make_dcd_cmd(un, cmd_bp, NULL_FUNC); 2041 2042 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END, 2043 "dcdstrategy_small_window_call (end)"); 2044 2045 /* 2046 * there is a small window where the active cmd 2047 * completes before make_dcd_cmd returns. 2048 * consequently, this cmd never gets started so 2049 * we start it from here 2050 */ 2051 mutex_enter(DCD_MUTEX); 2052 if ((un->un_ncmds < un->un_throttle) && 2053 (dp->b_forw == NULL)) { 2054 dcdstart(un); 2055 } 2056 } 2057 mutex_exit(DCD_MUTEX); 2058 2059 done: 2060 TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end"); 2061 return (0); 2062 } 2063 2064 2065 /* 2066 * Unit start and Completion 2067 * NOTE: we assume that the caller has at least checked for: 2068 * (un->un_ncmds < un->un_throttle) 2069 * if not, there is no real harm done, dcd_transport() will 2070 * return BUSY 2071 */ 2072 static void 2073 dcdstart(struct dcd_disk *un) 2074 { 2075 int status, sort_key; 2076 struct buf *bp; 2077 struct diskhd *dp; 2078 uchar_t state = un->un_last_state; 2079 2080 TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un); 2081 2082 retry: 2083 ASSERT(mutex_owned(DCD_MUTEX)); 2084 2085 dp = &un->un_utab; 2086 if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) || 2087 (dp->b_forw != NULL)) { 2088 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END, 2089 "dcdstart_end (no work)"); 2090 return; 2091 } 2092 2093 /* 2094 * remove from active queue 2095 */ 2096 dp->b_actf = bp->b_actf; 2097 bp->b_actf = 0; 2098 2099 /* 2100 * increment ncmds before calling dcd_transport because dcdintr 2101 * may be called before we return from dcd_transport! 2102 */ 2103 un->un_ncmds++; 2104 2105 /* 2106 * If measuring stats, mark exit from wait queue and 2107 * entrance into run 'queue' if and only if we are 2108 * going to actually start a command. 2109 * Normally the bp already has a packet at this point 2110 */ 2111 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp); 2112 2113 mutex_exit(DCD_MUTEX); 2114 2115 if (BP_HAS_NO_PKT(bp)) { 2116 make_dcd_cmd(un, bp, dcdrunout); 2117 if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) { 2118 mutex_enter(DCD_MUTEX); 2119 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 2120 2121 bp->b_actf = dp->b_actf; 2122 dp->b_actf = bp; 2123 New_state(un, DCD_STATE_RWAIT); 2124 un->un_ncmds--; 2125 TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END, 2126 "dcdstart_end (No Resources)"); 2127 goto done; 2128 2129 } else if (bp->b_flags & B_ERROR) { 2130 mutex_enter(DCD_MUTEX); 2131 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2132 2133 un->un_ncmds--; 2134 bp->b_resid = bp->b_bcount; 2135 if (bp->b_error == 0) { 2136 SET_BP_ERROR(bp, EIO); 2137 } 2138 2139 /* 2140 * restore old state 2141 */ 2142 un->un_state = un->un_last_state; 2143 un->un_last_state = state; 2144 2145 mutex_exit(DCD_MUTEX); 2146 2147 biodone(bp); 2148 mutex_enter(DCD_MUTEX); 2149 if (un->un_state == DCD_STATE_SUSPENDED) { 2150 cv_broadcast(&un->un_disk_busy_cv); 2151 } 2152 2153 if ((un->un_ncmds < un->un_throttle) && 2154 (dp->b_forw == NULL)) { 2155 goto retry; 2156 } else { 2157 goto done; 2158 } 2159 } 2160 } 2161 2162 /* 2163 * Restore resid from the packet, b_resid had been the 2164 * disksort key. 2165 */ 2166 sort_key = bp->b_resid; 2167 bp->b_resid = BP_PKT(bp)->pkt_resid; 2168 BP_PKT(bp)->pkt_resid = 0; 2169 2170 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2171 "bp->b_resid %lx, pkt_resid %lx\n", 2172 bp->b_resid, BP_PKT(bp)->pkt_resid); 2173 2174 /* 2175 * We used to check whether or not to try and link commands here. 2176 * Since we have found that there is no performance improvement 2177 * for linked commands, this has not made much sense. 2178 */ 2179 if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp))) 2180 != TRAN_ACCEPT) { 2181 mutex_enter(DCD_MUTEX); 2182 un->un_ncmds--; 2183 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2184 "transport returned %x\n", status); 2185 if (status == TRAN_BUSY) { 2186 DCD_DO_ERRSTATS(un, dcd_transerrs); 2187 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 2188 dcd_handle_tran_busy(bp, dp, un); 2189 if (un->un_ncmds > 0) { 2190 bp->b_resid = sort_key; 2191 } 2192 } else { 2193 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2194 mutex_exit(DCD_MUTEX); 2195 2196 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2197 "transport rejected (%d)\n", 2198 status); 2199 SET_BP_ERROR(bp, EIO); 2200 bp->b_resid = bp->b_bcount; 2201 if (bp != un->un_sbufp) { 2202 dcd_destroy_pkt(BP_PKT(bp)); 2203 } 2204 biodone(bp); 2205 2206 mutex_enter(DCD_MUTEX); 2207 if (un->un_state == DCD_STATE_SUSPENDED) { 2208 cv_broadcast(&un->un_disk_busy_cv); 2209 } 2210 if ((un->un_ncmds < un->un_throttle) && 2211 (dp->b_forw == NULL)) { 2212 goto retry; 2213 } 2214 } 2215 } else { 2216 mutex_enter(DCD_MUTEX); 2217 2218 if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) { 2219 struct buf *cmd_bp; 2220 2221 cmd_bp = dp->b_actf; 2222 cmd_bp->av_back = ALLOCATING_PKT; 2223 mutex_exit(DCD_MUTEX); 2224 /* 2225 * try and map this one 2226 */ 2227 TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START, 2228 "dcdstart_small_window_start"); 2229 2230 make_dcd_cmd(un, cmd_bp, NULL_FUNC); 2231 2232 TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END, 2233 "dcdstart_small_window_end"); 2234 /* 2235 * there is a small window where the active cmd 2236 * completes before make_dcd_cmd returns. 2237 * consequently, this cmd never gets started so 2238 * we start it from here 2239 */ 2240 mutex_enter(DCD_MUTEX); 2241 if ((un->un_ncmds < un->un_throttle) && 2242 (dp->b_forw == NULL)) { 2243 goto retry; 2244 } 2245 } 2246 } 2247 2248 done: 2249 ASSERT(mutex_owned(DCD_MUTEX)); 2250 TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end"); 2251 } 2252 2253 /* 2254 * make_dcd_cmd: create a pkt 2255 */ 2256 static void 2257 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)()) 2258 { 2259 auto int count, com, direction; 2260 struct dcd_pkt *pkt; 2261 int flags, tval; 2262 2263 _NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp)) 2264 TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START, 2265 "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un); 2266 2267 2268 flags = un->un_cmd_flags; 2269 2270 if (bp != un->un_sbufp) { 2271 int partition = DCDPART(bp->b_edev); 2272 diskaddr_t p_lblksrt; 2273 diskaddr_t lblocks; 2274 long secnt; 2275 uint32_t blkno; 2276 int dkl_nblk, delta; 2277 long resid; 2278 2279 if (cmlb_partinfo(un->un_dklbhandle, 2280 partition, 2281 &lblocks, 2282 &p_lblksrt, 2283 NULL, 2284 NULL, 2285 0) != NULL) { 2286 lblocks = 0; 2287 p_lblksrt = 0; 2288 } 2289 2290 dkl_nblk = (int)lblocks; 2291 2292 /* 2293 * Make sure we don't run off the end of a partition. 2294 * 2295 * Put this test here so that we can adjust b_count 2296 * to accurately reflect the actual amount we are 2297 * goint to transfer. 2298 */ 2299 2300 /* 2301 * First, compute partition-relative block number 2302 */ 2303 blkno = dkblock(bp); 2304 secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv; 2305 count = MIN(secnt, dkl_nblk - blkno); 2306 if (count != secnt) { 2307 /* 2308 * We have an overrun 2309 */ 2310 resid = (secnt - count) << un->un_secdiv; 2311 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2312 "overrun by %ld sectors\n", 2313 secnt - count); 2314 bp->b_bcount -= resid; 2315 } else { 2316 resid = 0; 2317 } 2318 2319 /* 2320 * Adjust block number to absolute 2321 */ 2322 delta = (int)p_lblksrt; 2323 blkno += delta; 2324 2325 mutex_enter(DCD_MUTEX); 2326 /* 2327 * This is for devices having block size different from 2328 * from DEV_BSIZE (e.g. 2K CDROMs). 2329 */ 2330 if (un->un_lbasize != un->un_secsize) { 2331 blkno >>= un->un_blknoshift; 2332 count >>= un->un_blknoshift; 2333 } 2334 mutex_exit(DCD_MUTEX); 2335 2336 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START, 2337 "make_dcd_cmd_init_pkt_call (begin)"); 2338 pkt = dcd_init_pkt(ROUTE, NULL, bp, 2339 (uint32_t)sizeof (struct dcd_cmd), 2340 un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT, 2341 func, (caddr_t)un); 2342 TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END, 2343 "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt); 2344 if (!pkt) { 2345 bp->b_bcount += resid; 2346 bp->av_back = NO_PKT_ALLOCATED; 2347 TRACE_0(TR_FAC_DADA, 2348 TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END, 2349 "make_dcd_cmd_end (NO_PKT_ALLOCATED1)"); 2350 return; 2351 } 2352 if (bp->b_flags & B_READ) { 2353 if ((un->un_dp->options & DMA_SUPPORTTED) == 2354 DMA_SUPPORTTED) { 2355 com = ATA_READ_DMA; 2356 } else { 2357 if (un->un_dp->options & BLOCK_MODE) 2358 com = ATA_READ_MULTIPLE; 2359 else 2360 com = ATA_READ; 2361 } 2362 direction = DATA_READ; 2363 } else { 2364 if ((un->un_dp->options & DMA_SUPPORTTED) == 2365 DMA_SUPPORTTED) { 2366 com = ATA_WRITE_DMA; 2367 } else { 2368 if (un->un_dp->options & BLOCK_MODE) 2369 com = ATA_WRITE_MULTIPLE; 2370 else 2371 com = ATA_WRITE; 2372 } 2373 direction = DATA_WRITE; 2374 } 2375 2376 /* 2377 * Save the resid in the packet, temporarily until 2378 * we transport the command. 2379 */ 2380 pkt->pkt_resid = resid; 2381 2382 makecommand(pkt, flags, com, blkno, ADD_LBA_MODE, 2383 bp->b_bcount, direction, 0); 2384 tval = dcd_io_time; 2385 } else { 2386 2387 struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw; 2388 2389 /* 2390 * set options 2391 */ 2392 if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) { 2393 flags |= FLAG_SILENT; 2394 } 2395 if (scmd->udcd_flags & UDCD_DIAGNOSE) 2396 flags |= FLAG_DIAGNOSE; 2397 2398 if (scmd->udcd_flags & UDCD_NOINTR) 2399 flags |= FLAG_NOINTR; 2400 2401 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 2402 (bp->b_bcount)? bp: NULL, 2403 (uint32_t)sizeof (struct dcd_cmd), 2404 2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un); 2405 2406 if (!pkt) { 2407 bp->av_back = NO_PKT_ALLOCATED; 2408 return; 2409 } 2410 2411 makecommand(pkt, 0, scmd->udcd_cmd->cmd, 2412 scmd->udcd_cmd->sector_num.lba_num, 2413 scmd->udcd_cmd->address_mode, 2414 scmd->udcd_cmd->size, 2415 scmd->udcd_cmd->direction, scmd->udcd_cmd->features); 2416 2417 pkt->pkt_flags = flags; 2418 if (scmd->udcd_timeout == 0) 2419 tval = dcd_io_time; 2420 else 2421 tval = scmd->udcd_timeout; 2422 /* UDAD interface should be decided. */ 2423 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2424 "udcd interface\n"); 2425 } 2426 2427 pkt->pkt_comp = dcdintr; 2428 pkt->pkt_time = tval; 2429 PKT_SET_BP(pkt, bp); 2430 bp->av_back = (struct buf *)pkt; 2431 2432 TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end"); 2433 } 2434 2435 /* 2436 * Command completion processing 2437 */ 2438 static void 2439 dcdintr(struct dcd_pkt *pkt) 2440 { 2441 struct dcd_disk *un; 2442 struct buf *bp; 2443 int action; 2444 int status; 2445 2446 bp = PKT_GET_BP(pkt); 2447 un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev)); 2448 2449 TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un); 2450 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n"); 2451 2452 mutex_enter(DCD_MUTEX); 2453 un->un_ncmds--; 2454 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2455 ASSERT(un->un_ncmds >= 0); 2456 2457 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2458 "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt)); 2459 2460 /* 2461 * do most common case first 2462 */ 2463 if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) { 2464 int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp); 2465 2466 if (un->un_state == DCD_STATE_OFFLINE) { 2467 un->un_state = un->un_last_state; 2468 dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE, 2469 (const char *) diskokay); 2470 } 2471 /* 2472 * If the command is a read or a write, and we have 2473 * a non-zero pkt_resid, that is an error. We should 2474 * attempt to retry the operation if possible. 2475 */ 2476 action = COMMAND_DONE; 2477 if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) { 2478 DCD_DO_ERRSTATS(un, dcd_harderrs); 2479 if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) { 2480 PKT_INCR_RETRY_CNT(pkt, 1); 2481 action = QUE_COMMAND; 2482 } else { 2483 /* 2484 * if we have exhausted retries 2485 * a command with a residual is in error in 2486 * this case. 2487 */ 2488 action = COMMAND_DONE_ERROR; 2489 } 2490 dcd_log(DCD_DEVINFO, dcd_label, 2491 CE_WARN, "incomplete %s- %s\n", 2492 (bp->b_flags & B_READ)? "read" : "write", 2493 (action == QUE_COMMAND)? "retrying" : 2494 "giving up"); 2495 } 2496 2497 /* 2498 * pkt_resid will reflect, at this point, a residual 2499 * of how many bytes left to be transferred there were 2500 * from the actual scsi command. Add this to b_resid i.e 2501 * the amount this driver could not see to transfer, 2502 * to get the total number of bytes not transfered. 2503 */ 2504 if (action != QUE_COMMAND) { 2505 bp->b_resid += pkt->pkt_resid; 2506 } 2507 2508 } else if (pkt->pkt_reason != CMD_CMPLT) { 2509 action = dcd_handle_incomplete(un, bp); 2510 } 2511 2512 /* 2513 * If we are in the middle of syncing or dumping, we have got 2514 * here because dcd_transport has called us explictly after 2515 * completing the command in a polled mode. We don't want to 2516 * have a recursive call into dcd_transport again. 2517 */ 2518 if (ddi_in_panic() && (action == QUE_COMMAND)) { 2519 action = COMMAND_DONE_ERROR; 2520 } 2521 2522 /* 2523 * save pkt reason; consecutive failures are not reported unless 2524 * fatal 2525 * do not reset last_pkt_reason when the cmd was retried and 2526 * succeeded because 2527 * there maybe more commands comming back with last_pkt_reason 2528 */ 2529 if ((un->un_last_pkt_reason != pkt->pkt_reason) && 2530 ((pkt->pkt_reason != CMD_CMPLT) || 2531 (PKT_GET_RETRY_CNT(pkt) == 0))) { 2532 un->un_last_pkt_reason = pkt->pkt_reason; 2533 } 2534 2535 switch (action) { 2536 case COMMAND_DONE_ERROR: 2537 error: 2538 if (bp->b_resid == 0) { 2539 bp->b_resid = bp->b_bcount; 2540 } 2541 if (bp->b_error == 0) { 2542 struct dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp; 2543 if (cdbp->cmd == ATA_FLUSH_CACHE && 2544 (pkt->pkt_scbp[0] & STATUS_ATA_ERR) && 2545 (pkt->pkt_scbp[1] & ERR_ABORT)) { 2546 SET_BP_ERROR(bp, ENOTSUP); 2547 un->un_flush_not_supported = 1; 2548 } else { 2549 SET_BP_ERROR(bp, EIO); 2550 } 2551 } 2552 bp->b_flags |= B_ERROR; 2553 /*FALLTHROUGH*/ 2554 case COMMAND_DONE: 2555 dcddone_and_mutex_exit(un, bp); 2556 2557 TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END, 2558 "dcdintr_end (COMMAND_DONE)"); 2559 return; 2560 2561 case QUE_COMMAND: 2562 if (un->un_ncmds >= un->un_throttle) { 2563 struct diskhd *dp = &un->un_utab; 2564 2565 bp->b_actf = dp->b_actf; 2566 dp->b_actf = bp; 2567 2568 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); 2569 2570 mutex_exit(DCD_MUTEX); 2571 goto exit; 2572 } 2573 2574 un->un_ncmds++; 2575 /* reset the pkt reason again */ 2576 pkt->pkt_reason = 0; 2577 DCD_DO_KSTATS(un, kstat_runq_enter, bp); 2578 mutex_exit(DCD_MUTEX); 2579 if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) { 2580 struct diskhd *dp = &un->un_utab; 2581 2582 mutex_enter(DCD_MUTEX); 2583 un->un_ncmds--; 2584 if (status == TRAN_BUSY) { 2585 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 2586 dcd_handle_tran_busy(bp, dp, un); 2587 mutex_exit(DCD_MUTEX); 2588 goto exit; 2589 } 2590 DCD_DO_ERRSTATS(un, dcd_transerrs); 2591 DCD_DO_KSTATS(un, kstat_runq_exit, bp); 2592 2593 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2594 "requeue of command fails (%x)\n", status); 2595 SET_BP_ERROR(bp, EIO); 2596 bp->b_resid = bp->b_bcount; 2597 2598 dcddone_and_mutex_exit(un, bp); 2599 goto exit; 2600 } 2601 break; 2602 2603 case JUST_RETURN: 2604 default: 2605 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); 2606 mutex_exit(DCD_MUTEX); 2607 break; 2608 } 2609 2610 exit: 2611 TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end"); 2612 } 2613 2614 2615 /* 2616 * Done with a command. 2617 */ 2618 static void 2619 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp) 2620 { 2621 struct diskhd *dp; 2622 2623 TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un); 2624 2625 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex)); 2626 2627 dp = &un->un_utab; 2628 if (bp == dp->b_forw) { 2629 dp->b_forw = NULL; 2630 } 2631 2632 if (un->un_stats) { 2633 ulong_t n_done = bp->b_bcount - bp->b_resid; 2634 if (bp->b_flags & B_READ) { 2635 IOSP->reads++; 2636 IOSP->nread += n_done; 2637 } else { 2638 IOSP->writes++; 2639 IOSP->nwritten += n_done; 2640 } 2641 } 2642 if (IO_PARTITION_STATS) { 2643 ulong_t n_done = bp->b_bcount - bp->b_resid; 2644 if (bp->b_flags & B_READ) { 2645 IOSP_PARTITION->reads++; 2646 IOSP_PARTITION->nread += n_done; 2647 } else { 2648 IOSP_PARTITION->writes++; 2649 IOSP_PARTITION->nwritten += n_done; 2650 } 2651 } 2652 2653 /* 2654 * Start the next one before releasing resources on this one 2655 */ 2656 if (un->un_state == DCD_STATE_SUSPENDED) { 2657 cv_broadcast(&un->un_disk_busy_cv); 2658 } else if (dp->b_actf && (un->un_ncmds < un->un_throttle) && 2659 (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) { 2660 dcdstart(un); 2661 } 2662 2663 mutex_exit(DCD_MUTEX); 2664 2665 if (bp != un->un_sbufp) { 2666 dcd_destroy_pkt(BP_PKT(bp)); 2667 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2668 "regular done: resid %ld\n", bp->b_resid); 2669 } else { 2670 ASSERT(un->un_sbuf_busy); 2671 } 2672 TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call"); 2673 2674 biodone(bp); 2675 2676 (void) pm_idle_component(DCD_DEVINFO, 0); 2677 2678 TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end"); 2679 } 2680 2681 2682 /* 2683 * reset the disk unless the transport layer has already 2684 * cleared the problem 2685 */ 2686 #define C1 (STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED) 2687 static void 2688 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt) 2689 { 2690 2691 if ((pkt->pkt_statistics & C1) == 0) { 2692 mutex_exit(DCD_MUTEX); 2693 if (!dcd_reset(ROUTE, RESET_ALL)) { 2694 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2695 "Reset failed"); 2696 } 2697 mutex_enter(DCD_MUTEX); 2698 } 2699 } 2700 2701 static int 2702 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp) 2703 { 2704 static char *fail = "ATA transport failed: reason '%s': %s\n"; 2705 static char *notresp = "disk not responding to selection\n"; 2706 int rval = COMMAND_DONE_ERROR; 2707 int action = COMMAND_SOFT_ERROR; 2708 struct dcd_pkt *pkt = BP_PKT(bp); 2709 int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) && 2710 (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT)); 2711 2712 ASSERT(mutex_owned(DCD_MUTEX)); 2713 2714 switch (pkt->pkt_reason) { 2715 2716 case CMD_TIMEOUT: 2717 /* 2718 * This Indicates the already the HBA would have reset 2719 * so Just indicate to retry the command 2720 */ 2721 break; 2722 2723 case CMD_INCOMPLETE: 2724 action = dcd_check_error(un, bp); 2725 DCD_DO_ERRSTATS(un, dcd_transerrs); 2726 (void) dcd_reset_disk(un, pkt); 2727 break; 2728 2729 case CMD_FATAL: 2730 /* 2731 * Something drastic has gone wrong 2732 */ 2733 break; 2734 case CMD_DMA_DERR: 2735 case CMD_DATA_OVR: 2736 /* FALLTHROUGH */ 2737 2738 default: 2739 /* 2740 * the target may still be running the command, 2741 * so we should try and reset that target. 2742 */ 2743 DCD_DO_ERRSTATS(un, dcd_transerrs); 2744 if ((pkt->pkt_reason != CMD_RESET) && 2745 (pkt->pkt_reason != CMD_ABORTED)) { 2746 (void) dcd_reset_disk(un, pkt); 2747 } 2748 break; 2749 } 2750 2751 /* 2752 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got 2753 * reset/aborted because another disk on this bus caused it. 2754 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics 2755 * of STAT_TIMEOUT/STAT_DEV_RESET 2756 */ 2757 if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) { 2758 /* To be written : XXX */ 2759 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2760 "Command aborted\n"); 2761 } 2762 2763 if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) { 2764 rval = COMMAND_DONE_ERROR; 2765 } else { 2766 if ((rval == COMMAND_DONE_ERROR) && 2767 (action == COMMAND_SOFT_ERROR) && 2768 ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) { 2769 PKT_INCR_RETRY_CNT(pkt, 1); 2770 rval = QUE_COMMAND; 2771 } 2772 } 2773 2774 if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) { 2775 /* 2776 * Looks like someone turned off this shoebox. 2777 */ 2778 if (un->un_state != DCD_STATE_OFFLINE) { 2779 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2780 (const char *) notresp); 2781 New_state(un, DCD_STATE_OFFLINE); 2782 } 2783 } else if (pkt->pkt_reason == CMD_FATAL) { 2784 /* 2785 * Suppressing the following message for the time being 2786 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2787 * (const char *) notresp); 2788 */ 2789 PKT_INCR_RETRY_CNT(pkt, 6); 2790 rval = COMMAND_DONE_ERROR; 2791 New_state(un, DCD_STATE_FATAL); 2792 } else if (be_chatty) { 2793 int in_panic = ddi_in_panic(); 2794 if (!in_panic || (rval == COMMAND_DONE_ERROR)) { 2795 if (((pkt->pkt_reason != un->un_last_pkt_reason) && 2796 (pkt->pkt_reason != CMD_RESET)) || 2797 (rval == COMMAND_DONE_ERROR) || 2798 (dcd_error_level == DCD_ERR_ALL)) { 2799 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2800 fail, dcd_rname(pkt->pkt_reason), 2801 (rval == COMMAND_DONE_ERROR) ? 2802 "giving up": "retrying command"); 2803 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2804 "retrycount=%x\n", 2805 PKT_GET_RETRY_CNT(pkt)); 2806 } 2807 } 2808 } 2809 error: 2810 return (rval); 2811 } 2812 2813 static int 2814 dcd_check_error(struct dcd_disk *un, struct buf *bp) 2815 { 2816 struct diskhd *dp = &un->un_utab; 2817 struct dcd_pkt *pkt = BP_PKT(bp); 2818 int rval = 0; 2819 unsigned char status; 2820 unsigned char error; 2821 2822 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start"); 2823 ASSERT(mutex_owned(DCD_MUTEX)); 2824 2825 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 2826 "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp); 2827 2828 /* 2829 * Here we need to check status first and then if error is indicated 2830 * Then the error register. 2831 */ 2832 2833 status = (pkt->pkt_scbp)[0]; 2834 if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) { 2835 /* 2836 * There has been a Device Fault - reason for such error 2837 * is vendor specific 2838 * Action to be taken is - Indicate error and reset device. 2839 */ 2840 2841 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n"); 2842 rval = COMMAND_HARD_ERROR; 2843 } else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) { 2844 2845 /* 2846 * The sector read or written is marginal and hence ECC 2847 * Correction has been applied. Indicate to repair 2848 * Here we need to probably re-assign based on the badblock 2849 * mapping. 2850 */ 2851 2852 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2853 "Soft Error on block %x\n", 2854 ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num); 2855 rval = COMMAND_SOFT_ERROR; 2856 } else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) { 2857 error = pkt->pkt_scbp[1]; 2858 2859 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2860 "Command:0x%x,Error:0x%x,Status:0x%x\n", 2861 GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp), 2862 error, status); 2863 if ((error & ERR_AMNF) == ERR_AMNF) { 2864 /* Address make not found */ 2865 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2866 "Address Mark Not Found"); 2867 } else if ((error & ERR_TKONF) == ERR_TKONF) { 2868 /* Track 0 Not found */ 2869 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2870 "Track 0 Not found \n"); 2871 } else if ((error & ERR_IDNF) == ERR_IDNF) { 2872 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2873 " ID not found \n"); 2874 } else if ((error & ERR_UNC) == ERR_UNC) { 2875 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2876 "Uncorrectable data Error: Block %x\n", 2877 ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num); 2878 } else if ((error & ERR_BBK) == ERR_BBK) { 2879 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2880 "Bad block detected: Block %x\n", 2881 ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num); 2882 } else if ((error & ERR_ABORT) == ERR_ABORT) { 2883 /* Aborted Command */ 2884 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2885 " Aborted Command \n"); 2886 } 2887 /* 2888 * Return the soft error so that the command 2889 * will be retried. 2890 */ 2891 rval = COMMAND_SOFT_ERROR; 2892 } 2893 2894 TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end"); 2895 return (rval); 2896 } 2897 2898 2899 /* 2900 * System Crash Dump routine 2901 */ 2902 2903 #define NDUMP_RETRIES 5 2904 2905 static int 2906 dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk) 2907 { 2908 struct dcd_pkt *pkt; 2909 int i; 2910 struct buf local, *bp; 2911 int err; 2912 unsigned char com; 2913 diskaddr_t p_lblksrt; 2914 diskaddr_t lblocks; 2915 2916 GET_SOFT_STATE(dev); 2917 #ifdef lint 2918 part = part; 2919 #endif /* lint */ 2920 2921 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) 2922 2923 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) 2924 return (ENXIO); 2925 2926 if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev), 2927 &lblocks, &p_lblksrt, NULL, NULL, 0)) 2928 return (ENXIO); 2929 2930 if (blkno+nblk > lblocks) { 2931 return (EINVAL); 2932 } 2933 2934 2935 if ((un->un_state == DCD_STATE_SUSPENDED) || 2936 (un->un_state == DCD_STATE_PM_SUSPENDED)) { 2937 if (pm_raise_power(DCD_DEVINFO, 0, 2938 DCD_DEVICE_ACTIVE) != DDI_SUCCESS) { 2939 return (EIO); 2940 } 2941 } 2942 2943 /* 2944 * When cpr calls dcddump, we know that dad is in a 2945 * a good state, so no bus reset is required 2946 */ 2947 un->un_throttle = 0; 2948 2949 if ((un->un_state != DCD_STATE_SUSPENDED) && 2950 (un->un_state != DCD_STATE_DUMPING)) { 2951 2952 New_state(un, DCD_STATE_DUMPING); 2953 2954 /* 2955 * Reset the bus. I'd like to not have to do this, 2956 * but this is the safest thing to do... 2957 */ 2958 2959 if (dcd_reset(ROUTE, RESET_ALL) == 0) { 2960 return (EIO); 2961 } 2962 2963 } 2964 2965 blkno += p_lblksrt; 2966 2967 /* 2968 * It should be safe to call the allocator here without 2969 * worrying about being locked for DVMA mapping because 2970 * the address we're passed is already a DVMA mapping 2971 * 2972 * We are also not going to worry about semaphore ownership 2973 * in the dump buffer. Dumping is single threaded at present. 2974 */ 2975 2976 bp = &local; 2977 bzero((caddr_t)bp, sizeof (*bp)); 2978 bp->b_flags = B_BUSY; 2979 bp->b_un.b_addr = addr; 2980 bp->b_bcount = nblk << DEV_BSHIFT; 2981 bp->b_resid = 0; 2982 2983 for (i = 0; i < NDUMP_RETRIES; i++) { 2984 bp->b_flags &= ~B_ERROR; 2985 if ((pkt = dcd_init_pkt(ROUTE, NULL, bp, 2986 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 2987 PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) { 2988 break; 2989 } 2990 if (i == 0) { 2991 if (bp->b_flags & B_ERROR) { 2992 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2993 "no resources for dumping; " 2994 "error code: 0x%x, retrying", 2995 geterror(bp)); 2996 } else { 2997 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 2998 "no resources for dumping; retrying"); 2999 } 3000 } else if (i != (NDUMP_RETRIES - 1)) { 3001 if (bp->b_flags & B_ERROR) { 3002 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no " 3003 "resources for dumping; error code: 0x%x, " 3004 "retrying\n", geterror(bp)); 3005 } 3006 } else { 3007 if (bp->b_flags & B_ERROR) { 3008 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, 3009 "no resources for dumping; " 3010 "error code: 0x%x, retries failed, " 3011 "giving up.\n", geterror(bp)); 3012 } else { 3013 dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, 3014 "no resources for dumping; " 3015 "retries failed, giving up.\n"); 3016 } 3017 return (EIO); 3018 } 3019 delay(10); 3020 } 3021 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 3022 com = ATA_WRITE_DMA; 3023 } else { 3024 if (un->un_dp->options & BLOCK_MODE) 3025 com = ATA_WRITE_MULTIPLE; 3026 else 3027 com = ATA_WRITE; 3028 } 3029 3030 makecommand(pkt, 0, com, blkno, ADD_LBA_MODE, 3031 (int)nblk*un->un_secsize, DATA_WRITE, 0); 3032 3033 for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) { 3034 3035 if (dcd_poll(pkt) == 0) { 3036 switch (SCBP_C(pkt)) { 3037 case STATUS_GOOD: 3038 if (pkt->pkt_resid == 0) { 3039 err = 0; 3040 } 3041 break; 3042 case STATUS_ATA_BUSY: 3043 (void) dcd_reset(ROUTE, RESET_TARGET); 3044 break; 3045 default: 3046 mutex_enter(DCD_MUTEX); 3047 (void) dcd_reset_disk(un, pkt); 3048 mutex_exit(DCD_MUTEX); 3049 break; 3050 } 3051 } else if (i > NDUMP_RETRIES/2) { 3052 (void) dcd_reset(ROUTE, RESET_ALL); 3053 } 3054 3055 } 3056 dcd_destroy_pkt(pkt); 3057 return (err); 3058 } 3059 3060 /* 3061 * This routine implements the ioctl calls. It is called 3062 * from the device switch at normal priority. 3063 */ 3064 /* ARGSUSED3 */ 3065 static int 3066 dcdioctl(dev_t dev, int cmd, intptr_t arg, int flag, 3067 cred_t *cred_p, int *rval_p) 3068 { 3069 auto int32_t data[512 / (sizeof (int32_t))]; 3070 struct dk_cinfo *info; 3071 struct dk_minfo media_info; 3072 struct udcd_cmd *scmd; 3073 int i, err; 3074 enum uio_seg uioseg = 0; 3075 enum dkio_state state = 0; 3076 #ifdef _MULTI_DATAMODEL 3077 struct dadkio_rwcmd rwcmd; 3078 #endif 3079 struct dadkio_rwcmd32 rwcmd32; 3080 struct dcd_cmd dcdcmd; 3081 3082 GET_SOFT_STATE(dev); 3083 #ifdef lint 3084 part = part; 3085 state = state; 3086 uioseg = uioseg; 3087 #endif /* lint */ 3088 3089 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3090 "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg); 3091 3092 bzero((caddr_t)data, sizeof (data)); 3093 3094 switch (cmd) { 3095 3096 #ifdef DCDDEBUG 3097 /* 3098 * Following ioctl are for testing RESET/ABORTS 3099 */ 3100 #define DKIOCRESET (DKIOC|14) 3101 #define DKIOCABORT (DKIOC|15) 3102 3103 case DKIOCRESET: 3104 if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag)) 3105 return (EFAULT); 3106 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3107 "DKIOCRESET: data = 0x%x\n", data[0]); 3108 if (dcd_reset(ROUTE, data[0])) { 3109 return (0); 3110 } else { 3111 return (EIO); 3112 } 3113 case DKIOCABORT: 3114 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3115 "DKIOCABORT:\n"); 3116 if (dcd_abort(ROUTE, (struct dcd_pkt *)0)) { 3117 return (0); 3118 } else { 3119 return (EIO); 3120 } 3121 #endif 3122 3123 case DKIOCINFO: 3124 /* 3125 * Controller Information 3126 */ 3127 info = (struct dk_cinfo *)data; 3128 3129 mutex_enter(DCD_MUTEX); 3130 switch (un->un_dp->ctype) { 3131 default: 3132 info->dki_ctype = DKC_DIRECT; 3133 break; 3134 } 3135 mutex_exit(DCD_MUTEX); 3136 info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO)); 3137 (void) strcpy(info->dki_cname, 3138 ddi_get_name(ddi_get_parent(DCD_DEVINFO))); 3139 /* 3140 * Unit Information 3141 */ 3142 info->dki_unit = ddi_get_instance(DCD_DEVINFO); 3143 info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3); 3144 (void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO)); 3145 info->dki_flags = DKI_FMTVOL; 3146 info->dki_partition = DCDPART(dev); 3147 3148 /* 3149 * Max Transfer size of this device in blocks 3150 */ 3151 info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE; 3152 3153 /* 3154 * We can't get from here to there yet 3155 */ 3156 info->dki_addr = 0; 3157 info->dki_space = 0; 3158 info->dki_prio = 0; 3159 info->dki_vec = 0; 3160 3161 i = sizeof (struct dk_cinfo); 3162 if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag)) 3163 return (EFAULT); 3164 else 3165 return (0); 3166 3167 case DKIOCGMEDIAINFO: 3168 /* 3169 * As dad target driver is used for IDE disks only 3170 * Can keep the return value hardcoded to FIXED_DISK 3171 */ 3172 media_info.dki_media_type = DK_FIXED_DISK; 3173 3174 mutex_enter(DCD_MUTEX); 3175 media_info.dki_lbsize = un->un_lbasize; 3176 media_info.dki_capacity = un->un_diskcapacity; 3177 mutex_exit(DCD_MUTEX); 3178 3179 if (ddi_copyout(&media_info, (caddr_t)arg, 3180 sizeof (struct dk_minfo), flag)) 3181 return (EFAULT); 3182 else 3183 return (0); 3184 3185 case DKIOCGGEOM: 3186 case DKIOCGVTOC: 3187 case DKIOCGETEFI: 3188 3189 mutex_enter(DCD_MUTEX); 3190 if (un->un_ncmds == 0) { 3191 if ((err = dcd_unit_ready(dev)) != 0) { 3192 mutex_exit(DCD_MUTEX); 3193 return (err); 3194 } 3195 } 3196 3197 mutex_exit(DCD_MUTEX); 3198 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd, 3199 arg, flag, cred_p, rval_p, 0); 3200 return (err); 3201 3202 case DKIOCGAPART: 3203 case DKIOCSAPART: 3204 case DKIOCSGEOM: 3205 case DKIOCSVTOC: 3206 case DKIOCSETEFI: 3207 case DKIOCPARTITION: 3208 case DKIOCPARTINFO: 3209 case DKIOCGMBOOT: 3210 case DKIOCSMBOOT: 3211 3212 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd, 3213 arg, flag, cred_p, rval_p, 0); 3214 return (err); 3215 3216 case DIOCTL_RWCMD: 3217 if (drv_priv(cred_p) != 0) { 3218 return (EPERM); 3219 } 3220 3221 #ifdef _MULTI_DATAMODEL 3222 switch (ddi_model_convert_from(flag & FMODELS)) { 3223 case DDI_MODEL_NONE: 3224 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd, 3225 sizeof (struct dadkio_rwcmd), flag)) { 3226 return (EFAULT); 3227 } 3228 rwcmd32.cmd = rwcmd.cmd; 3229 rwcmd32.flags = rwcmd.flags; 3230 rwcmd32.blkaddr = rwcmd.blkaddr; 3231 rwcmd32.buflen = rwcmd.buflen; 3232 rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr; 3233 break; 3234 case DDI_MODEL_ILP32: 3235 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32, 3236 sizeof (struct dadkio_rwcmd32), flag)) { 3237 return (EFAULT); 3238 } 3239 break; 3240 } 3241 #else 3242 if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32, 3243 sizeof (struct dadkio_rwcmd32), flag)) { 3244 return (EFAULT); 3245 } 3246 #endif 3247 mutex_enter(DCD_MUTEX); 3248 3249 uioseg = UIO_SYSSPACE; 3250 scmd = (struct udcd_cmd *)data; 3251 scmd->udcd_cmd = &dcdcmd; 3252 /* 3253 * Convert the dadkio_rwcmd structure to udcd_cmd so that 3254 * it can take the normal path to get the io done 3255 */ 3256 if (rwcmd32.cmd == DADKIO_RWCMD_READ) { 3257 if ((un->un_dp->options & DMA_SUPPORTTED) == 3258 DMA_SUPPORTTED) 3259 scmd->udcd_cmd->cmd = ATA_READ_DMA; 3260 else 3261 scmd->udcd_cmd->cmd = ATA_READ; 3262 scmd->udcd_cmd->address_mode = ADD_LBA_MODE; 3263 scmd->udcd_cmd->direction = DATA_READ; 3264 scmd->udcd_flags |= UDCD_READ|UDCD_SILENT; 3265 } else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) { 3266 if ((un->un_dp->options & DMA_SUPPORTTED) == 3267 DMA_SUPPORTTED) 3268 scmd->udcd_cmd->cmd = ATA_WRITE_DMA; 3269 else 3270 scmd->udcd_cmd->cmd = ATA_WRITE; 3271 scmd->udcd_cmd->direction = DATA_WRITE; 3272 scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT; 3273 } else { 3274 mutex_exit(DCD_MUTEX); 3275 return (EINVAL); 3276 } 3277 3278 scmd->udcd_cmd->address_mode = ADD_LBA_MODE; 3279 scmd->udcd_cmd->features = 0; 3280 scmd->udcd_cmd->size = rwcmd32.buflen; 3281 scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr; 3282 scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr; 3283 scmd->udcd_buflen = rwcmd32.buflen; 3284 scmd->udcd_timeout = (ushort_t)dcd_io_time; 3285 scmd->udcd_resid = 0ULL; 3286 scmd->udcd_status = 0; 3287 scmd->udcd_error_reg = 0; 3288 scmd->udcd_status_reg = 0; 3289 3290 mutex_exit(DCD_MUTEX); 3291 3292 i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE); 3293 mutex_enter(DCD_MUTEX); 3294 /* 3295 * After return convert the status from scmd to 3296 * dadkio_status 3297 */ 3298 (void) dcd_translate(&(rwcmd32.status), scmd); 3299 rwcmd32.status.resid = scmd->udcd_resid; 3300 mutex_exit(DCD_MUTEX); 3301 3302 #ifdef _MULTI_DATAMODEL 3303 switch (ddi_model_convert_from(flag & FMODELS)) { 3304 case DDI_MODEL_NONE: { 3305 int counter; 3306 rwcmd.status.status = rwcmd32.status.status; 3307 rwcmd.status.resid = rwcmd32.status.resid; 3308 rwcmd.status.failed_blk_is_valid = 3309 rwcmd32.status.failed_blk_is_valid; 3310 rwcmd.status.failed_blk = rwcmd32.status.failed_blk; 3311 rwcmd.status.fru_code_is_valid = 3312 rwcmd32.status.fru_code_is_valid; 3313 rwcmd.status.fru_code = rwcmd32.status.fru_code; 3314 for (counter = 0; 3315 counter < DADKIO_ERROR_INFO_LEN; counter++) 3316 rwcmd.status.add_error_info[counter] = 3317 rwcmd32.status.add_error_info[counter]; 3318 } 3319 /* Copy out the result back to the user program */ 3320 if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg, 3321 sizeof (struct dadkio_rwcmd), flag)) { 3322 if (i != 0) { 3323 i = EFAULT; 3324 } 3325 } 3326 break; 3327 case DDI_MODEL_ILP32: 3328 /* Copy out the result back to the user program */ 3329 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg, 3330 sizeof (struct dadkio_rwcmd32), flag)) { 3331 if (i != 0) { 3332 i = EFAULT; 3333 } 3334 } 3335 break; 3336 } 3337 #else 3338 /* Copy out the result back to the user program */ 3339 if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg, 3340 sizeof (struct dadkio_rwcmd32), flag)) { 3341 if (i != 0) 3342 i = EFAULT; 3343 } 3344 #endif 3345 return (i); 3346 3347 case UDCDCMD: { 3348 #ifdef _MULTI_DATAMODEL 3349 /* 3350 * For use when a 32 bit app makes a call into a 3351 * 64 bit ioctl 3352 */ 3353 struct udcd_cmd32 udcd_cmd_32_for_64; 3354 struct udcd_cmd32 *ucmd32 = &udcd_cmd_32_for_64; 3355 model_t model; 3356 #endif /* _MULTI_DATAMODEL */ 3357 3358 if (drv_priv(cred_p) != 0) { 3359 return (EPERM); 3360 } 3361 3362 scmd = (struct udcd_cmd *)data; 3363 3364 #ifdef _MULTI_DATAMODEL 3365 switch (model = ddi_model_convert_from(flag & FMODELS)) { 3366 case DDI_MODEL_ILP32: 3367 if (ddi_copyin((caddr_t)arg, ucmd32, 3368 sizeof (struct udcd_cmd32), flag)) { 3369 return (EFAULT); 3370 } 3371 /* 3372 * Convert the ILP32 uscsi data from the 3373 * application to LP64 for internal use. 3374 */ 3375 udcd_cmd32toudcd_cmd(ucmd32, scmd); 3376 break; 3377 case DDI_MODEL_NONE: 3378 if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd), 3379 flag)) { 3380 return (EFAULT); 3381 } 3382 break; 3383 } 3384 #else /* ! _MULTI_DATAMODEL */ 3385 if (ddi_copyin((caddr_t)arg, (caddr_t)scmd, 3386 sizeof (*scmd), flag)) { 3387 return (EFAULT); 3388 } 3389 #endif /* ! _MULTI_DATAMODEL */ 3390 3391 scmd->udcd_flags &= ~UDCD_NOINTR; 3392 uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE; 3393 3394 i = dcdioctl_cmd(dev, scmd, uioseg, uioseg); 3395 #ifdef _MULTI_DATAMODEL 3396 switch (model) { 3397 case DDI_MODEL_ILP32: 3398 /* 3399 * Convert back to ILP32 before copyout to the 3400 * application 3401 */ 3402 udcd_cmdtoudcd_cmd32(scmd, ucmd32); 3403 if (ddi_copyout(ucmd32, (caddr_t)arg, 3404 sizeof (*ucmd32), flag)) { 3405 if (i != 0) 3406 i = EFAULT; 3407 } 3408 break; 3409 case DDI_MODEL_NONE: 3410 if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd), 3411 flag)) { 3412 if (i != 0) 3413 i = EFAULT; 3414 } 3415 break; 3416 } 3417 #else /* ! _MULTI_DATAMODE */ 3418 if (ddi_copyout((caddr_t)scmd, (caddr_t)arg, 3419 sizeof (*scmd), flag)) { 3420 if (i != 0) 3421 i = EFAULT; 3422 } 3423 #endif 3424 return (i); 3425 } 3426 case DKIOCFLUSHWRITECACHE: { 3427 struct dk_callback *dkc = (struct dk_callback *)arg; 3428 struct dcd_pkt *pkt; 3429 struct buf *bp; 3430 int is_sync = 1; 3431 3432 mutex_enter(DCD_MUTEX); 3433 if (un->un_flush_not_supported || 3434 ! un->un_write_cache_enabled) { 3435 i = un->un_flush_not_supported ? ENOTSUP : 0; 3436 mutex_exit(DCD_MUTEX); 3437 /* 3438 * If a callback was requested: a callback will 3439 * always be done if the caller saw the 3440 * DKIOCFLUSHWRITECACHE ioctl return 0, and 3441 * never done if the caller saw the ioctl return 3442 * an error. 3443 */ 3444 if ((flag & FKIOCTL) && dkc != NULL && 3445 dkc->dkc_callback != NULL) { 3446 (*dkc->dkc_callback)(dkc->dkc_cookie, i); 3447 /* 3448 * Did callback and reported error. 3449 * Since we did a callback, ioctl 3450 * should return 0. 3451 */ 3452 i = 0; 3453 } 3454 return (i); 3455 } 3456 3457 /* 3458 * Get the special buffer 3459 */ 3460 while (un->un_sbuf_busy) { 3461 cv_wait(&un->un_sbuf_cv, DCD_MUTEX); 3462 } 3463 un->un_sbuf_busy = 1; 3464 bp = un->un_sbufp; 3465 mutex_exit(DCD_MUTEX); 3466 3467 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 3468 NULL, (uint32_t)sizeof (struct dcd_cmd), 3469 2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un); 3470 ASSERT(pkt != NULL); 3471 3472 makecommand(pkt, un->un_cmd_flags | FLAG_SILENT, 3473 ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0); 3474 3475 pkt->pkt_comp = dcdintr; 3476 pkt->pkt_time = DCD_FLUSH_TIME; 3477 PKT_SET_BP(pkt, bp); 3478 3479 bp->av_back = (struct buf *)pkt; 3480 bp->b_forw = NULL; 3481 bp->b_flags = B_BUSY; 3482 bp->b_error = 0; 3483 bp->b_edev = dev; 3484 bp->b_dev = cmpdev(dev); 3485 bp->b_bcount = 0; 3486 bp->b_blkno = 0; 3487 bp->b_un.b_addr = 0; 3488 bp->b_iodone = NULL; 3489 bp->b_list = NULL; 3490 3491 if ((flag & FKIOCTL) && dkc != NULL && 3492 dkc->dkc_callback != NULL) { 3493 struct dk_callback *dkc2 = (struct dk_callback *) 3494 kmem_zalloc(sizeof (*dkc2), KM_SLEEP); 3495 bcopy(dkc, dkc2, sizeof (*dkc2)); 3496 3497 bp->b_list = (struct buf *)dkc2; 3498 bp->b_iodone = dcdflushdone; 3499 is_sync = 0; 3500 } 3501 3502 (void) dcdstrategy(bp); 3503 3504 i = 0; 3505 if (is_sync) { 3506 i = biowait(bp); 3507 (void) dcdflushdone(bp); 3508 } 3509 3510 return (i); 3511 } 3512 default: 3513 break; 3514 } 3515 return (ENOTTY); 3516 } 3517 3518 3519 static int 3520 dcdflushdone(struct buf *bp) 3521 { 3522 struct dcd_disk *un = ddi_get_soft_state(dcd_state, 3523 DCDUNIT(bp->b_edev)); 3524 struct dcd_pkt *pkt = BP_PKT(bp); 3525 struct dk_callback *dkc = (struct dk_callback *)bp->b_list; 3526 3527 ASSERT(un != NULL); 3528 ASSERT(bp == un->un_sbufp); 3529 ASSERT(pkt != NULL); 3530 3531 dcd_destroy_pkt(pkt); 3532 bp->av_back = NO_PKT_ALLOCATED; 3533 3534 if (dkc != NULL) { 3535 ASSERT(bp->b_iodone != NULL); 3536 (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp)); 3537 kmem_free(dkc, sizeof (*dkc)); 3538 bp->b_iodone = NULL; 3539 bp->b_list = NULL; 3540 } 3541 3542 /* 3543 * Tell anybody who cares that the buffer is now free 3544 */ 3545 mutex_enter(DCD_MUTEX); 3546 un->un_sbuf_busy = 0; 3547 cv_signal(&un->un_sbuf_cv); 3548 mutex_exit(DCD_MUTEX); 3549 return (0); 3550 } 3551 3552 /* 3553 * dcdrunout: 3554 * the callback function for resource allocation 3555 * 3556 * XXX it would be preferable that dcdrunout() scans the whole 3557 * list for possible candidates for dcdstart(); this avoids 3558 * that a bp at the head of the list whose request cannot be 3559 * satisfied is retried again and again 3560 */ 3561 /*ARGSUSED*/ 3562 static int 3563 dcdrunout(caddr_t arg) 3564 { 3565 int serviced; 3566 struct dcd_disk *un; 3567 struct diskhd *dp; 3568 3569 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p", 3570 arg); 3571 serviced = 1; 3572 3573 un = (struct dcd_disk *)arg; 3574 dp = &un->un_utab; 3575 3576 /* 3577 * We now support passing a structure to the callback 3578 * routine. 3579 */ 3580 ASSERT(un != NULL); 3581 mutex_enter(DCD_MUTEX); 3582 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) { 3583 dcdstart(un); 3584 } 3585 if (un->un_state == DCD_STATE_RWAIT) { 3586 serviced = 0; 3587 } 3588 mutex_exit(DCD_MUTEX); 3589 TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END, 3590 "dcdrunout_end: serviced %d", serviced); 3591 return (serviced); 3592 } 3593 3594 3595 /* 3596 * This routine called to see whether unit is (still) there. Must not 3597 * be called when un->un_sbufp is in use, and must not be called with 3598 * an unattached disk. Soft state of disk is restored to what it was 3599 * upon entry- up to caller to set the correct state. 3600 * 3601 * We enter with the disk mutex held. 3602 */ 3603 3604 /* ARGSUSED0 */ 3605 static int 3606 dcd_unit_ready(dev_t dev) 3607 { 3608 #ifndef lint 3609 auto struct udcd_cmd dcmd, *com = &dcmd; 3610 auto struct dcd_cmd cmdblk; 3611 #endif 3612 int error; 3613 #ifndef lint 3614 GET_SOFT_STATE(dev); 3615 #endif 3616 3617 /* 3618 * Now that we protect the special buffer with 3619 * a mutex, we could probably do a mutex_tryenter 3620 * on it here and return failure if it were held... 3621 */ 3622 3623 error = 0; 3624 return (error); 3625 } 3626 3627 /* ARGSUSED0 */ 3628 int 3629 dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace, 3630 enum uio_seg dataspace) 3631 { 3632 3633 struct buf *bp; 3634 struct udcd_cmd *scmd; 3635 struct dcd_pkt *pkt; 3636 int err, rw; 3637 caddr_t cdb; 3638 int flags = 0; 3639 3640 GET_SOFT_STATE(devp); 3641 3642 #ifdef lint 3643 part = part; 3644 #endif 3645 3646 /* 3647 * Is this a request to reset the bus? 3648 * if so, we need to do reseting. 3649 */ 3650 3651 if (in->udcd_flags & UDCD_RESET) { 3652 int flag = RESET_TARGET; 3653 err = dcd_reset(ROUTE, flag) ? 0: EIO; 3654 return (err); 3655 } 3656 3657 scmd = in; 3658 3659 3660 /* Do some sanity checks */ 3661 if (scmd->udcd_buflen <= 0) { 3662 if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) { 3663 return (EINVAL); 3664 } else { 3665 scmd->udcd_buflen = 0; 3666 } 3667 } 3668 3669 /* Make a copy of the dcd_cmd passed */ 3670 cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP); 3671 if (cdbspace == UIO_SYSSPACE) { 3672 flags |= FKIOCTL; 3673 } 3674 3675 if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd), 3676 flags)) { 3677 kmem_free(cdb, sizeof (struct dcd_cmd)); 3678 return (EFAULT); 3679 } 3680 scmd = (struct udcd_cmd *)kmem_alloc(sizeof (*scmd), KM_SLEEP); 3681 bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd)); 3682 scmd->udcd_cmd = (struct dcd_cmd *)cdb; 3683 rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE; 3684 3685 3686 /* 3687 * Get the special buffer 3688 */ 3689 3690 mutex_enter(DCD_MUTEX); 3691 while (un->un_sbuf_busy) { 3692 if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) { 3693 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd)); 3694 kmem_free((caddr_t)scmd, sizeof (*scmd)); 3695 mutex_exit(DCD_MUTEX); 3696 return (EINTR); 3697 } 3698 } 3699 3700 un->un_sbuf_busy = 1; 3701 bp = un->un_sbufp; 3702 mutex_exit(DCD_MUTEX); 3703 3704 3705 /* 3706 * If we are going to do actual I/O, let physio do all the 3707 * things 3708 */ 3709 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 3710 "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen); 3711 3712 if (scmd->udcd_buflen) { 3713 auto struct iovec aiov; 3714 auto struct uio auio; 3715 struct uio *uio = &auio; 3716 3717 bzero((caddr_t)&auio, sizeof (struct uio)); 3718 bzero((caddr_t)&aiov, sizeof (struct iovec)); 3719 3720 aiov.iov_base = scmd->udcd_bufaddr; 3721 aiov.iov_len = scmd->udcd_buflen; 3722 3723 uio->uio_iov = &aiov; 3724 uio->uio_iovcnt = 1; 3725 uio->uio_resid = scmd->udcd_buflen; 3726 uio->uio_segflg = dataspace; 3727 3728 /* 3729 * Let physio do the rest... 3730 */ 3731 bp->av_back = NO_PKT_ALLOCATED; 3732 bp->b_forw = (struct buf *)scmd; 3733 err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio); 3734 } else { 3735 /* 3736 * We have to mimic what physio would do here. 3737 */ 3738 bp->av_back = NO_PKT_ALLOCATED; 3739 bp->b_forw = (struct buf *)scmd; 3740 bp->b_flags = B_BUSY | rw; 3741 bp->b_edev = devp; 3742 bp->b_dev = cmpdev(devp); 3743 bp->b_bcount = bp->b_blkno = 0; 3744 (void) dcdstrategy(bp); 3745 err = biowait(bp); 3746 } 3747 3748 done: 3749 if ((pkt = BP_PKT(bp)) != NULL) { 3750 bp->av_back = NO_PKT_ALLOCATED; 3751 /* we need to update the completion status of udcd command */ 3752 in->udcd_resid = bp->b_resid; 3753 in->udcd_status_reg = SCBP_C(pkt); 3754 /* XXX: we need to give error_reg also */ 3755 dcd_destroy_pkt(pkt); 3756 } 3757 /* 3758 * Tell anybody who cares that the buffer is now free 3759 */ 3760 mutex_enter(DCD_MUTEX); 3761 un->un_sbuf_busy = 0; 3762 cv_signal(&un->un_sbuf_cv); 3763 mutex_exit(DCD_MUTEX); 3764 3765 kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd)); 3766 kmem_free((caddr_t)scmd, sizeof (*scmd)); 3767 return (err); 3768 } 3769 3770 static void 3771 dcdudcdmin(struct buf *bp) 3772 { 3773 3774 #ifdef lint 3775 bp = bp; 3776 #endif 3777 3778 } 3779 3780 /* 3781 * restart a cmd from timeout() context 3782 * 3783 * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero 3784 * a restart timeout request has been issued and no new timeouts should 3785 * be requested. b_forw is reset when the cmd eventually completes in 3786 * dcddone_and_mutex_exit() 3787 */ 3788 void 3789 dcdrestart(void *arg) 3790 { 3791 struct dcd_disk *un = (struct dcd_disk *)arg; 3792 struct buf *bp; 3793 int status; 3794 3795 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n"); 3796 3797 mutex_enter(DCD_MUTEX); 3798 bp = un->un_utab.b_forw; 3799 if (bp) { 3800 un->un_ncmds++; 3801 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp); 3802 } 3803 3804 3805 if (bp) { 3806 struct dcd_pkt *pkt = BP_PKT(bp); 3807 3808 mutex_exit(DCD_MUTEX); 3809 3810 pkt->pkt_flags = 0; 3811 3812 if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) { 3813 mutex_enter(DCD_MUTEX); 3814 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); 3815 un->un_ncmds--; 3816 if (status == TRAN_BUSY) { 3817 /* XXX : To be checked */ 3818 /* 3819 * if (un->un_throttle > 1) { 3820 * ASSERT(un->un_ncmds >= 0); 3821 * un->un_throttle = un->un_ncmds; 3822 * } 3823 */ 3824 un->un_reissued_timeid = 3825 timeout(dcdrestart, (caddr_t)un, 3826 DCD_BSY_TIMEOUT/500); 3827 mutex_exit(DCD_MUTEX); 3828 return; 3829 } 3830 DCD_DO_ERRSTATS(un, dcd_transerrs); 3831 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 3832 "dcdrestart transport failed (%x)\n", status); 3833 bp->b_resid = bp->b_bcount; 3834 SET_BP_ERROR(bp, EIO); 3835 3836 DCD_DO_KSTATS(un, kstat_waitq_exit, bp); 3837 un->un_reissued_timeid = 0L; 3838 dcddone_and_mutex_exit(un, bp); 3839 return; 3840 } 3841 mutex_enter(DCD_MUTEX); 3842 } 3843 un->un_reissued_timeid = 0L; 3844 mutex_exit(DCD_MUTEX); 3845 DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n"); 3846 } 3847 3848 /* 3849 * This routine gets called to reset the throttle to its saved 3850 * value wheneven we lower the throttle. 3851 */ 3852 void 3853 dcd_reset_throttle(caddr_t arg) 3854 { 3855 struct dcd_disk *un = (struct dcd_disk *)arg; 3856 struct diskhd *dp; 3857 3858 mutex_enter(DCD_MUTEX); 3859 dp = &un->un_utab; 3860 3861 /* 3862 * start any commands that didn't start while throttling. 3863 */ 3864 if (dp->b_actf && (un->un_ncmds < un->un_throttle) && 3865 (dp->b_forw == NULL)) { 3866 dcdstart(un); 3867 } 3868 mutex_exit(DCD_MUTEX); 3869 } 3870 3871 3872 /* 3873 * This routine handles the case when a TRAN_BUSY is 3874 * returned by HBA. 3875 * 3876 * If there are some commands already in the transport, the 3877 * bp can be put back on queue and it will 3878 * be retried when the queue is emptied after command 3879 * completes. But if there is no command in the tranport 3880 * and it still return busy, we have to retry the command 3881 * after some time like 10ms. 3882 */ 3883 /* ARGSUSED0 */ 3884 static void 3885 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un) 3886 { 3887 ASSERT(mutex_owned(DCD_MUTEX)); 3888 3889 3890 if (dp->b_forw == NULL || dp->b_forw == bp) { 3891 dp->b_forw = bp; 3892 } else if (dp->b_forw != bp) { 3893 bp->b_actf = dp->b_actf; 3894 dp->b_actf = bp; 3895 3896 } 3897 if (!un->un_reissued_timeid) { 3898 un->un_reissued_timeid = 3899 timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500); 3900 } 3901 } 3902 3903 static int 3904 dcd_write_deviceid(struct dcd_disk *un) 3905 { 3906 3907 int status; 3908 diskaddr_t blk; 3909 struct udcd_cmd ucmd; 3910 struct dcd_cmd cdb; 3911 struct dk_devid *dkdevid; 3912 uint_t *ip, chksum; 3913 int i; 3914 dev_t dev; 3915 3916 mutex_exit(DCD_MUTEX); 3917 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) { 3918 mutex_enter(DCD_MUTEX); 3919 return (EINVAL); 3920 } 3921 mutex_enter(DCD_MUTEX); 3922 3923 /* Allocate the buffer */ 3924 dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP); 3925 3926 /* Fill in the revision */ 3927 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB; 3928 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB; 3929 3930 /* Copy in the device id */ 3931 bcopy(un->un_devid, &dkdevid->dkd_devid, 3932 ddi_devid_sizeof(un->un_devid)); 3933 3934 /* Calculate the chksum */ 3935 chksum = 0; 3936 ip = (uint_t *)dkdevid; 3937 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++) 3938 chksum ^= ip[i]; 3939 3940 /* Fill in the checksum */ 3941 DKD_FORMCHKSUM(chksum, dkdevid); 3942 3943 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 3944 (void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd)); 3945 3946 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 3947 cdb.cmd = ATA_WRITE_DMA; 3948 } else { 3949 if (un->un_dp->options & BLOCK_MODE) 3950 cdb.cmd = ATA_WRITE_MULTIPLE; 3951 else 3952 cdb.cmd = ATA_WRITE; 3953 } 3954 cdb.size = un->un_secsize; 3955 cdb.sector_num.lba_num = blk; 3956 cdb.address_mode = ADD_LBA_MODE; 3957 cdb.direction = DATA_WRITE; 3958 3959 ucmd.udcd_flags = UDCD_WRITE; 3960 ucmd.udcd_cmd = &cdb; 3961 ucmd.udcd_bufaddr = (caddr_t)dkdevid; 3962 ucmd.udcd_buflen = un->un_secsize; 3963 ucmd.udcd_flags |= UDCD_SILENT; 3964 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 3965 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 3966 mutex_exit(DCD_MUTEX); 3967 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 3968 mutex_enter(DCD_MUTEX); 3969 3970 kmem_free(dkdevid, un->un_secsize); 3971 return (status); 3972 } 3973 3974 static int 3975 dcd_read_deviceid(struct dcd_disk *un) 3976 { 3977 int status; 3978 diskaddr_t blk; 3979 struct udcd_cmd ucmd; 3980 struct dcd_cmd cdb; 3981 struct dk_devid *dkdevid; 3982 uint_t *ip; 3983 int chksum; 3984 int i, sz; 3985 dev_t dev; 3986 3987 mutex_exit(DCD_MUTEX); 3988 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) { 3989 mutex_enter(DCD_MUTEX); 3990 return (EINVAL); 3991 } 3992 mutex_enter(DCD_MUTEX); 3993 3994 dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP); 3995 3996 (void) bzero((caddr_t)&ucmd, sizeof (ucmd)); 3997 (void) bzero((caddr_t)&cdb, sizeof (cdb)); 3998 3999 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 4000 cdb.cmd = ATA_READ_DMA; 4001 } else { 4002 if (un->un_dp->options & BLOCK_MODE) 4003 cdb.cmd = ATA_READ_MULTIPLE; 4004 else 4005 cdb.cmd = ATA_READ; 4006 } 4007 cdb.size = un->un_secsize; 4008 cdb.sector_num.lba_num = blk; 4009 cdb.address_mode = ADD_LBA_MODE; 4010 cdb.direction = DATA_READ; 4011 4012 ucmd.udcd_flags = UDCD_READ; 4013 ucmd.udcd_cmd = &cdb; 4014 ucmd.udcd_bufaddr = (caddr_t)dkdevid; 4015 ucmd.udcd_buflen = un->un_secsize; 4016 ucmd.udcd_flags |= UDCD_SILENT; 4017 dev = makedevice(ddi_driver_major(DCD_DEVINFO), 4018 ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT); 4019 mutex_exit(DCD_MUTEX); 4020 status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE); 4021 mutex_enter(DCD_MUTEX); 4022 4023 if (status != 0) { 4024 kmem_free((caddr_t)dkdevid, un->un_secsize); 4025 return (status); 4026 } 4027 4028 /* Validate the revision */ 4029 4030 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) || 4031 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) { 4032 kmem_free((caddr_t)dkdevid, un->un_secsize); 4033 return (EINVAL); 4034 } 4035 4036 /* Calculate the checksum */ 4037 chksum = 0; 4038 ip = (uint_t *)dkdevid; 4039 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++) 4040 chksum ^= ip[i]; 4041 4042 /* Compare the checksums */ 4043 4044 if (DKD_GETCHKSUM(dkdevid) != chksum) { 4045 kmem_free((caddr_t)dkdevid, un->un_secsize); 4046 return (EINVAL); 4047 } 4048 4049 /* VAlidate the device id */ 4050 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) { 4051 kmem_free((caddr_t)dkdevid, un->un_secsize); 4052 return (EINVAL); 4053 } 4054 4055 /* return a copy of the device id */ 4056 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid); 4057 un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP); 4058 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); 4059 kmem_free((caddr_t)dkdevid, un->un_secsize); 4060 4061 return (0); 4062 } 4063 4064 /* 4065 * Return the device id for the device. 4066 * 1. If the device ID exists then just return it - nothing to do in that case. 4067 * 2. Build one from the drives model number and serial number. 4068 * 3. If there is a problem in building it from serial/model #, then try 4069 * to read it from the acyl region of the disk. 4070 * Note: If this function is unable to return a valid ID then the calling 4071 * point will invoke the routine to create a fabricated ID ans stor it on the 4072 * acyl region of the disk. 4073 */ 4074 static ddi_devid_t 4075 dcd_get_devid(struct dcd_disk *un) 4076 { 4077 int rc; 4078 4079 /* If already registered, return that value */ 4080 if (un->un_devid != NULL) 4081 return (un->un_devid); 4082 4083 /* Build a devid from model and serial number, if present */ 4084 rc = dcd_make_devid_from_serial(un); 4085 4086 if (rc != DDI_SUCCESS) { 4087 /* Read the devid from the disk. */ 4088 if (dcd_read_deviceid(un)) 4089 return (NULL); 4090 } 4091 4092 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid); 4093 return (un->un_devid); 4094 } 4095 4096 4097 static ddi_devid_t 4098 dcd_create_devid(struct dcd_disk *un) 4099 { 4100 if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *) 4101 &un->un_devid) == DDI_FAILURE) 4102 return (NULL); 4103 4104 if (dcd_write_deviceid(un)) { 4105 ddi_devid_free(un->un_devid); 4106 un->un_devid = NULL; 4107 return (NULL); 4108 } 4109 4110 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid); 4111 return (un->un_devid); 4112 } 4113 4114 /* 4115 * Build a devid from the model and serial number, if present 4116 * Return DDI_SUCCESS or DDI_FAILURE. 4117 */ 4118 static int 4119 dcd_make_devid_from_serial(struct dcd_disk *un) 4120 { 4121 int rc = DDI_SUCCESS; 4122 char *hwid; 4123 char *model; 4124 int model_len; 4125 char *serno; 4126 int serno_len; 4127 int total_len; 4128 4129 /* initialize the model and serial number information */ 4130 model = un->un_dcd->dcd_ident->dcd_model; 4131 model_len = DCD_MODEL_NUMBER_LENGTH; 4132 serno = un->un_dcd->dcd_ident->dcd_drvser; 4133 serno_len = DCD_SERIAL_NUMBER_LENGTH; 4134 4135 /* Verify the model and serial number */ 4136 dcd_validate_model_serial(model, &model_len, model_len); 4137 if (model_len == 0) { 4138 rc = DDI_FAILURE; 4139 goto out; 4140 } 4141 dcd_validate_model_serial(serno, &serno_len, serno_len); 4142 if (serno_len == 0) { 4143 rc = DDI_FAILURE; 4144 goto out; 4145 } 4146 4147 /* 4148 * The device ID will be concatenation of the model number, 4149 * the '=' separator, the serial number. Allocate 4150 * the string and concatenate the components. 4151 */ 4152 total_len = model_len + 1 + serno_len; 4153 hwid = kmem_alloc(total_len, KM_SLEEP); 4154 bcopy((caddr_t)model, (caddr_t)hwid, model_len); 4155 bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1); 4156 bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len); 4157 4158 /* Initialize the device ID, trailing NULL not included */ 4159 rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len, 4160 hwid, (ddi_devid_t *)&un->un_devid); 4161 4162 /* Free the allocated string */ 4163 kmem_free(hwid, total_len); 4164 4165 out: return (rc); 4166 } 4167 4168 /* 4169 * Test for a valid model or serial number. Assume that a valid representation 4170 * contains at least one character that is neither a space, 0 digit, or NULL. 4171 * Trim trailing blanks and NULLS from returned length. 4172 */ 4173 static void 4174 dcd_validate_model_serial(char *str, int *retlen, int totallen) 4175 { 4176 char ch; 4177 boolean_t ret = B_FALSE; 4178 int i; 4179 int tb; 4180 4181 for (i = 0, tb = 0; i < totallen; i++) { 4182 ch = *str++; 4183 if ((ch != ' ') && (ch != '\0') && (ch != '0')) 4184 ret = B_TRUE; 4185 if ((ch == ' ') || (ch == '\0')) 4186 tb++; 4187 else 4188 tb = 0; 4189 } 4190 4191 if (ret == B_TRUE) { 4192 /* Atleast one non 0 or blank character. */ 4193 *retlen = totallen - tb; 4194 } else { 4195 *retlen = 0; 4196 } 4197 } 4198 4199 #ifndef lint 4200 void 4201 clean_print(dev_info_t *dev, char *label, uint_t level, 4202 char *title, char *data, int len) 4203 { 4204 int i; 4205 char buf[256]; 4206 4207 (void) sprintf(buf, "%s:", title); 4208 for (i = 0; i < len; i++) { 4209 (void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff)); 4210 } 4211 (void) sprintf(&buf[strlen(buf)], "\n"); 4212 4213 dcd_log(dev, label, level, "%s", buf); 4214 } 4215 #endif /* Not lint */ 4216 4217 #ifndef lint 4218 /* 4219 * Print a piece of inquiry data- cleaned up for non-printable characters 4220 * and stopping at the first space character after the beginning of the 4221 * passed string; 4222 */ 4223 4224 void 4225 inq_fill(char *p, int l, char *s) 4226 { 4227 unsigned i = 0; 4228 char c; 4229 4230 while (i++ < l) { 4231 if ((c = *p++) < ' ' || c >= 0177) { 4232 c = '*'; 4233 } else if (i != 1 && c == ' ') { 4234 break; 4235 } 4236 *s++ = c; 4237 } 4238 *s++ = 0; 4239 } 4240 #endif /* Not lint */ 4241 4242 char * 4243 dcd_sname(uchar_t status) 4244 { 4245 switch (status & STATUS_ATA_MASK) { 4246 case STATUS_GOOD: 4247 return ("good status"); 4248 4249 case STATUS_ATA_BUSY: 4250 return ("busy"); 4251 4252 default: 4253 return ("<unknown status>"); 4254 } 4255 } 4256 4257 /* ARGSUSED0 */ 4258 char * 4259 dcd_rname(int reason) 4260 { 4261 static char *rnames[] = { 4262 "cmplt", 4263 "incomplete", 4264 "dma_derr", 4265 "tran_err", 4266 "reset", 4267 "aborted", 4268 "timeout", 4269 "data_ovr", 4270 }; 4271 if (reason > CMD_DATA_OVR) { 4272 return ("<unknown reason>"); 4273 } else { 4274 return (rnames[reason]); 4275 } 4276 } 4277 4278 4279 4280 /* ARGSUSED0 */ 4281 int 4282 dcd_check_wp(dev_t dev) 4283 { 4284 4285 return (0); 4286 } 4287 4288 /* 4289 * Create device error kstats 4290 */ 4291 static int 4292 dcd_create_errstats(struct dcd_disk *un, int instance) 4293 { 4294 4295 char kstatname[KSTAT_STRLEN]; 4296 4297 if (un->un_errstats == (kstat_t *)0) { 4298 (void) sprintf(kstatname, "dad%d,error", instance); 4299 un->un_errstats = kstat_create("daderror", instance, kstatname, 4300 "device_error", KSTAT_TYPE_NAMED, 4301 sizeof (struct dcd_errstats)/ sizeof (kstat_named_t), 4302 KSTAT_FLAG_PERSISTENT); 4303 4304 if (un->un_errstats) { 4305 struct dcd_errstats *dtp; 4306 4307 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; 4308 kstat_named_init(&dtp->dcd_softerrs, "Soft Errors", 4309 KSTAT_DATA_UINT32); 4310 kstat_named_init(&dtp->dcd_harderrs, "Hard Errors", 4311 KSTAT_DATA_UINT32); 4312 kstat_named_init(&dtp->dcd_transerrs, 4313 "Transport Errors", KSTAT_DATA_UINT32); 4314 kstat_named_init(&dtp->dcd_model, "Model", 4315 KSTAT_DATA_CHAR); 4316 kstat_named_init(&dtp->dcd_revision, "Revision", 4317 KSTAT_DATA_CHAR); 4318 kstat_named_init(&dtp->dcd_serial, "Serial No", 4319 KSTAT_DATA_CHAR); 4320 kstat_named_init(&dtp->dcd_capacity, "Size", 4321 KSTAT_DATA_ULONGLONG); 4322 kstat_named_init(&dtp->dcd_rq_media_err, "Media Error", 4323 KSTAT_DATA_UINT32); 4324 kstat_named_init(&dtp->dcd_rq_ntrdy_err, 4325 "Device Not Ready", KSTAT_DATA_UINT32); 4326 kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device", 4327 KSTAT_DATA_UINT32); 4328 kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable", 4329 KSTAT_DATA_UINT32); 4330 kstat_named_init(&dtp->dcd_rq_illrq_err, 4331 "Illegal Request", KSTAT_DATA_UINT32); 4332 4333 un->un_errstats->ks_private = un; 4334 un->un_errstats->ks_update = nulldev; 4335 kstat_install(un->un_errstats); 4336 4337 (void) strncpy(&dtp->dcd_model.value.c[0], 4338 un->un_dcd->dcd_ident->dcd_model, 16); 4339 (void) strncpy(&dtp->dcd_serial.value.c[0], 4340 un->un_dcd->dcd_ident->dcd_drvser, 16); 4341 (void) strncpy(&dtp->dcd_revision.value.c[0], 4342 un->un_dcd->dcd_ident->dcd_fw, 8); 4343 dtp->dcd_capacity.value.ui64 = 4344 (uint64_t)((uint64_t)un->un_diskcapacity * 4345 (uint64_t)un->un_lbasize); 4346 } 4347 } 4348 return (0); 4349 } 4350 4351 4352 /* 4353 * This has been moved from DADA layer as this does not do anything other than 4354 * retrying the command when it is busy or it does not complete 4355 */ 4356 int 4357 dcd_poll(struct dcd_pkt *pkt) 4358 { 4359 int busy_count, rval = -1, savef; 4360 clock_t savet; 4361 void (*savec)(); 4362 4363 4364 /* 4365 * Save old flags 4366 */ 4367 savef = pkt->pkt_flags; 4368 savec = pkt->pkt_comp; 4369 savet = pkt->pkt_time; 4370 4371 pkt->pkt_flags |= FLAG_NOINTR; 4372 4373 4374 /* 4375 * Set the Pkt_comp to NULL 4376 */ 4377 4378 pkt->pkt_comp = 0; 4379 4380 /* 4381 * Set the Pkt time for the polled command 4382 */ 4383 if (pkt->pkt_time == 0) { 4384 pkt->pkt_time = DCD_POLL_TIMEOUT; 4385 } 4386 4387 4388 /* Now transport the command */ 4389 for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) { 4390 if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) { 4391 if (pkt->pkt_reason == CMD_INCOMPLETE && 4392 pkt->pkt_state == 0) { 4393 delay(100); 4394 } else if (pkt->pkt_reason == CMD_CMPLT) { 4395 rval = 0; 4396 break; 4397 } 4398 } 4399 if (rval == TRAN_BUSY) { 4400 delay(100); 4401 continue; 4402 } 4403 } 4404 4405 pkt->pkt_flags = savef; 4406 pkt->pkt_comp = savec; 4407 pkt->pkt_time = savet; 4408 return (rval); 4409 } 4410 4411 4412 void 4413 dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp) 4414 { 4415 if (cmdp->udcd_status_reg & STATUS_ATA_BUSY) 4416 statp->status = DADKIO_STAT_NOT_READY; 4417 else if (cmdp->udcd_status_reg & STATUS_ATA_DWF) 4418 statp->status = DADKIO_STAT_HARDWARE_ERROR; 4419 else if (cmdp->udcd_status_reg & STATUS_ATA_CORR) 4420 statp->status = DADKIO_STAT_SOFT_ERROR; 4421 else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) { 4422 /* 4423 * The error register is valid only when BSY and DRQ not set 4424 * Assumed that HBA has checked this before it gives the data 4425 */ 4426 if (cmdp->udcd_error_reg & ERR_AMNF) 4427 statp->status = DADKIO_STAT_NOT_FORMATTED; 4428 else if (cmdp->udcd_error_reg & ERR_TKONF) 4429 statp->status = DADKIO_STAT_NOT_FORMATTED; 4430 else if (cmdp->udcd_error_reg & ERR_ABORT) 4431 statp->status = DADKIO_STAT_ILLEGAL_REQUEST; 4432 else if (cmdp->udcd_error_reg & ERR_IDNF) 4433 statp->status = DADKIO_STAT_NOT_FORMATTED; 4434 else if (cmdp->udcd_error_reg & ERR_UNC) 4435 statp->status = DADKIO_STAT_BUS_ERROR; 4436 else if (cmdp->udcd_error_reg & ERR_BBK) 4437 statp->status = DADKIO_STAT_MEDIUM_ERROR; 4438 } else 4439 statp->status = DADKIO_STAT_NO_ERROR; 4440 } 4441 4442 static void 4443 dcd_flush_cache(struct dcd_disk *un) 4444 { 4445 struct dcd_pkt *pkt; 4446 int retry_count; 4447 4448 4449 if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL, 4450 (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 4451 PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) { 4452 return; 4453 } 4454 4455 makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, 4456 NO_DATA_XFER, 0); 4457 4458 /* 4459 * Send the command. There are chances it might fail on some 4460 * disks since it is not a mandatory command as per ata-4. Try 4461 * 3 times if it fails. The retry count has been randomly selected. 4462 * There is a need for retry since as per the spec FLUSH CACHE can fail 4463 * as a result of unrecoverable error encountered during execution 4464 * of writing data and subsequent command should continue flushing 4465 * cache. 4466 */ 4467 for (retry_count = 0; retry_count < 3; retry_count++) { 4468 /* 4469 * Set the packet fields. 4470 */ 4471 pkt->pkt_comp = 0; 4472 pkt->pkt_time = DCD_POLL_TIMEOUT; 4473 pkt->pkt_flags |= FLAG_FORCENOINTR; 4474 pkt->pkt_flags |= FLAG_NOINTR; 4475 if (dcd_transport(pkt) == TRAN_ACCEPT) { 4476 if (pkt->pkt_reason == CMD_CMPLT) { 4477 break; 4478 } 4479 } 4480 /* 4481 * Note the wait time value of 100ms is same as in the 4482 * dcd_poll routine. 4483 */ 4484 drv_usecwait(1000000); 4485 } 4486 (void) dcd_destroy_pkt(pkt); 4487 } 4488 4489 static int 4490 dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr, 4491 diskaddr_t start_block, size_t reqlength, uchar_t cmd) 4492 { 4493 struct dcd_pkt *pkt; 4494 struct buf *bp; 4495 diskaddr_t real_addr = start_block; 4496 size_t buffer_size = reqlength; 4497 uchar_t command, tmp; 4498 int i, rval = 0; 4499 struct dcd_disk *un; 4500 4501 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi)); 4502 if (un == NULL) 4503 return (ENXIO); 4504 4505 bp = dcd_alloc_consistent_buf(ROUTE, (struct buf *)NULL, 4506 buffer_size, B_READ, NULL_FUNC, NULL); 4507 if (!bp) { 4508 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 4509 "no bp for disk label\n"); 4510 return (ENOMEM); 4511 } 4512 4513 pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL, 4514 bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN, 4515 PKT_CONSISTENT, NULL_FUNC, NULL); 4516 4517 if (!pkt) { 4518 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 4519 "no memory for disk label\n"); 4520 dcd_free_consistent_buf(bp); 4521 return (ENOMEM); 4522 } 4523 4524 if (cmd == TG_READ) { 4525 bzero(bp->b_un.b_addr, buffer_size); 4526 tmp = DATA_READ; 4527 } else { 4528 bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size); 4529 tmp = DATA_WRITE; 4530 } 4531 4532 mutex_enter(DCD_MUTEX); 4533 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { 4534 if (cmd == TG_READ) { 4535 command = ATA_READ_DMA; 4536 } else { 4537 command = ATA_WRITE_DMA; 4538 } 4539 } else { 4540 if (cmd == TG_READ) { 4541 if (un->un_dp->options & BLOCK_MODE) 4542 command = ATA_READ_MULTIPLE; 4543 else 4544 command = ATA_READ; 4545 } else { 4546 if (un->un_dp->options & BLOCK_MODE) 4547 command = ATA_READ_MULTIPLE; 4548 else 4549 command = ATA_WRITE; 4550 } 4551 } 4552 mutex_exit(DCD_MUTEX); 4553 (void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE, 4554 buffer_size, tmp, 0); 4555 4556 for (i = 0; i < 3; i++) { 4557 if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD || 4558 (pkt->pkt_state & STATE_XFERRED_DATA) == 0 || 4559 (pkt->pkt_resid != 0)) { 4560 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 4561 "Status %x, state %x, resid %lx\n", 4562 SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid); 4563 rval = EIO; 4564 } else { 4565 break; 4566 } 4567 } 4568 4569 if (rval != 0) { 4570 dcd_destroy_pkt(pkt); 4571 dcd_free_consistent_buf(bp); 4572 return (EIO); 4573 } 4574 4575 if (cmd == TG_READ) { 4576 bcopy(bp->b_un.b_addr, bufaddr, reqlength); 4577 rval = 0; 4578 } 4579 4580 dcd_destroy_pkt(pkt); 4581 dcd_free_consistent_buf(bp); 4582 return (rval); 4583 } 4584 4585 static int dcd_compute_dk_capacity(struct dcd_device *devp, 4586 diskaddr_t *capacity) 4587 { 4588 diskaddr_t cap; 4589 diskaddr_t no_of_lbasec; 4590 4591 cap = devp->dcd_ident->dcd_fixcyls * 4592 devp->dcd_ident->dcd_heads * 4593 devp->dcd_ident->dcd_sectors; 4594 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1]; 4595 no_of_lbasec = no_of_lbasec << 16; 4596 no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0]; 4597 4598 if (no_of_lbasec > cap) { 4599 cap = no_of_lbasec; 4600 } 4601 4602 if (cap != ((uint32_t)-1)) 4603 *capacity = cap; 4604 else 4605 return (EINVAL); 4606 return (0); 4607 } 4608 4609 /*ARGSUSED5*/ 4610 static int 4611 dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr, 4612 diskaddr_t start_block, size_t reqlength, void *tg_cookie) 4613 { 4614 if (cmd != TG_READ && cmd != TG_WRITE) 4615 return (EINVAL); 4616 4617 return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block, 4618 reqlength, cmd)); 4619 } 4620 4621 static int 4622 dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp) 4623 { 4624 struct dcd_device *devp; 4625 uint32_t no_of_lbasec, capacity, calculated_cylinders; 4626 4627 devp = ddi_get_driver_private(devi); 4628 4629 if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) { 4630 if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) { 4631 phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2; 4632 phygeomp->g_acyl = 2; 4633 phygeomp->g_nhead = devp->dcd_ident->dcd_heads; 4634 phygeomp->g_nsect = devp->dcd_ident->dcd_sectors; 4635 4636 no_of_lbasec = devp->dcd_ident->dcd_addrsec[1]; 4637 no_of_lbasec = no_of_lbasec << 16; 4638 no_of_lbasec = no_of_lbasec | 4639 devp->dcd_ident->dcd_addrsec[0]; 4640 capacity = devp->dcd_ident->dcd_fixcyls * 4641 devp->dcd_ident->dcd_heads * 4642 devp->dcd_ident->dcd_sectors; 4643 if (no_of_lbasec > capacity) { 4644 capacity = no_of_lbasec; 4645 if (capacity > NUM_SECTORS_32G) { 4646 /* 4647 * if the capacity is greater than 32G, 4648 * then 255 is the sectors per track. 4649 * This should be good until 128G disk 4650 * capacity, which is the current ATA-4 4651 * limitation. 4652 */ 4653 phygeomp->g_nsect = 255; 4654 } 4655 4656 /* 4657 * If the disk capacity is >= 128GB then no. of 4658 * addressable sectors will be set to 0xfffffff 4659 * in the IDENTIFY info. In that case set the 4660 * no. of pcyl to the Max. 16bit value. 4661 */ 4662 4663 calculated_cylinders = (capacity) / 4664 (phygeomp->g_nhead * phygeomp->g_nsect); 4665 if (calculated_cylinders >= USHRT_MAX) { 4666 phygeomp->g_ncyl = USHRT_MAX - 2; 4667 } else { 4668 phygeomp->g_ncyl = 4669 calculated_cylinders - 2; 4670 } 4671 } 4672 4673 phygeomp->g_capacity = capacity; 4674 phygeomp->g_intrlv = 0; 4675 phygeomp->g_rpm = 5400; 4676 phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz; 4677 4678 return (0); 4679 } else 4680 return (ENOTSUP); 4681 } else { 4682 return (EINVAL); 4683 } 4684 } 4685 4686 4687 /*ARGSUSED3*/ 4688 static int 4689 dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie) 4690 { 4691 struct dcd_disk *un; 4692 4693 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi)); 4694 4695 if (un == NULL) 4696 return (ENXIO); 4697 4698 switch (cmd) { 4699 case TG_GETPHYGEOM: 4700 return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg)); 4701 4702 case TG_GETVIRTGEOM: 4703 return (-1); 4704 4705 case TG_GETCAPACITY: 4706 case TG_GETBLOCKSIZE: 4707 mutex_enter(DCD_MUTEX); 4708 if (un->un_diskcapacity <= 0) { 4709 mutex_exit(DCD_MUTEX); 4710 dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, 4711 "invalid disk capacity\n"); 4712 return (EIO); 4713 } 4714 if (cmd == TG_GETCAPACITY) 4715 *(diskaddr_t *)arg = un->un_diskcapacity; 4716 else 4717 *(uint32_t *)arg = DEV_BSIZE; 4718 4719 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n", 4720 un->un_diskcapacity); 4721 mutex_exit(DCD_MUTEX); 4722 return (0); 4723 4724 case TG_GETATTR: 4725 mutex_enter(DCD_MUTEX); 4726 *(tg_attribute_t *)arg = un->un_tgattribute; 4727 DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, 4728 "media_is_writable %x\n", 4729 un->un_tgattribute.media_is_writable); 4730 mutex_exit(DCD_MUTEX); 4731 return (0); 4732 default: 4733 return (ENOTTY); 4734 } 4735 } 4736