Lines Matching refs:un

83 static void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi);
85 static int dcd_validate_geometry(struct dcd_disk *un);
86 static ddi_devid_t dcd_get_devid(struct dcd_disk *un);
87 static ddi_devid_t dcd_create_devid(struct dcd_disk *un);
88 static int dcd_make_devid_from_serial(struct dcd_disk *un);
90 static int dcd_read_deviceid(struct dcd_disk *un);
91 static int dcd_write_deviceid(struct dcd_disk *un);
94 static void dcd_flush_cache(struct dcd_disk *un);
106 static void dcdstart(struct dcd_disk *un);
107 static void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp);
108 static void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)());
115 struct dcd_disk *un);
117 static int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp);
118 static void dcd_offline(struct dcd_disk *un, int bechatty);
119 static int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un);
120 static void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt);
149 static int dcd_check_error(struct dcd_disk *un, struct buf *bp);
252 #define IOSP KSTAT_IO_PTR(un->un_stats)
253 #define IO_PARTITION_STATS un->un_pstats[DCDPART(bp->b_edev)]
256 #define DCD_DO_KSTATS(un, kstat_function, bp) \ argument
258 if (bp != un->un_sbufp) { \
259 if (un->un_stats) { \
267 #define DCD_DO_ERRSTATS(un, x) \ argument
268 if (un->un_errstats) { \
270 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
275 struct dcd_disk *un; \
281 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) \
475 struct dcd_disk *un; in dcdattach() local
493 if (!(un = ddi_get_soft_state(dcd_state, instance))) in dcdattach()
496 Restore_state(un); in dcdattach()
501 un->un_last_state = un->un_save_state; in dcdattach()
502 un->un_throttle = 2; in dcdattach()
503 cv_broadcast(&un->un_suspend_cv); in dcdattach()
523 dp = &un->un_utab; in dcdattach()
525 dcdstart(un); in dcdattach()
539 if (!(un = (struct dcd_disk *) in dcdattach()
543 devp->dcd_private = (ataopaque_t)un; in dcdattach()
584 cmlb_alloc_handle(&un->un_dklbhandle); in dcdattach()
593 un->un_dklbhandle, in dcdattach()
595 cmlb_free_handle(&un->un_dklbhandle); in dcdattach()
596 dcd_free_softstate(un, devi); in dcdattach()
601 (void) dcd_validate_geometry(un); in dcdattach()
604 if (dcd_get_devid(un) == NULL) { in dcdattach()
606 (void) dcd_create_devid(un); in dcdattach()
614 dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi) in dcd_free_softstate() argument
621 if (un) { in dcd_free_softstate()
622 sema_destroy(&un->un_semoclose); in dcd_free_softstate()
623 cv_destroy(&un->un_sbuf_cv); in dcd_free_softstate()
624 cv_destroy(&un->un_state_cv); in dcd_free_softstate()
625 cv_destroy(&un->un_disk_busy_cv); in dcd_free_softstate()
626 cv_destroy(&un->un_suspend_cv); in dcd_free_softstate()
631 if (un->un_sbufp) in dcd_free_softstate()
632 freerbuf(un->un_sbufp); in dcd_free_softstate()
633 if (un->un_dp) { in dcd_free_softstate()
634 kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp)); in dcd_free_softstate()
640 if (un->un_devid) { in dcd_free_softstate()
641 ddi_devid_free(un->un_devid); in dcd_free_softstate()
642 un->un_devid = NULL; in dcd_free_softstate()
649 if (un->un_stats) { in dcd_free_softstate()
650 kstat_delete(un->un_stats); in dcd_free_softstate()
671 struct dcd_disk *un; in dcddetach() local
675 if (!(un = ddi_get_soft_state(dcd_state, instance))) in dcddetach()
684 if (un->un_state == DCD_STATE_SUSPENDED) { in dcddetach()
688 un->un_throttle = 0; in dcddetach()
692 un->un_save_state = un->un_last_state; in dcddetach()
694 New_state(un, DCD_STATE_SUSPENDED); in dcddetach()
708 while (un->un_ncmds) { in dcddetach()
709 if (cv_timedwait(&un->un_disk_busy_cv, in dcddetach()
718 Restore_state(un); in dcddetach()
739 struct dcd_disk *un; in dcdreset() local
744 if (!(un = ddi_get_soft_state(dcd_state, instance))) in dcdreset()
747 dcd_flush_cache(un); in dcdreset()
757 struct dcd_disk *un; in dcd_dr_detach() local
768 un = (struct dcd_disk *)devp->dcd_private; in dcd_dr_detach()
780 if (un->un_ncmds) { in dcd_dr_detach()
788 cmlb_detach(un->un_dklbhandle, 0); in dcd_dr_detach()
789 cmlb_free_handle(&un->un_dklbhandle); in dcd_dr_detach()
804 dcd_free_softstate(un, devi); in dcd_dr_detach()
813 struct dcd_disk *un; in dcdpower() local
820 if (!(un = ddi_get_soft_state(dcd_state, instance)) || in dcdpower()
832 if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) { in dcdpower()
837 if ((un->un_state == DCD_STATE_OFFLINE) || in dcdpower()
838 (un->un_state == DCD_STATE_FATAL)) { in dcdpower()
848 un->un_power_level = DCD_DEVICE_ACTIVE; in dcdpower()
849 if (un->un_state == DCD_STATE_PM_SUSPENDED) in dcdpower()
850 Restore_state(un); in dcdpower()
883 if (un->un_state != DCD_STATE_PM_SUSPENDED) in dcdpower()
884 New_state(un, DCD_STATE_PM_SUSPENDED); in dcdpower()
885 un->un_power_level = level; in dcdpower()
896 struct dcd_disk *un = (struct dcd_disk *)0; in dcd_doattach() local
941 un = ddi_get_soft_state(dcd_state, instance); in dcd_doattach()
943 un->un_sbufp = getrbuf(km_flags); in dcd_doattach()
944 if (un->un_sbufp == (struct buf *)NULL) { in dcd_doattach()
950 un->un_dcd = devp; in dcd_doattach()
951 un->un_power_level = -1; in dcd_doattach()
952 un->un_tgattribute.media_is_writable = 1; in dcd_doattach()
954 sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL); in dcd_doattach()
955 cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL); in dcd_doattach()
956 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL); in dcd_doattach()
958 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL); in dcd_doattach()
959 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL); in dcd_doattach()
961 if (un->un_dp == 0) { in dcd_doattach()
966 un->un_dp = (struct dcd_drivetype *) in dcd_doattach()
968 if (!un->un_dp) { in dcd_doattach()
974 un->un_dp->ctype = CTYPE_DISK; in dcd_doattach()
980 un->un_dp->name = "CCS"; in dcd_doattach()
981 un->un_dp->options = 0; in dcd_doattach()
987 un->un_secsize = DEV_BSIZE; in dcd_doattach()
995 un->un_diskcapacity = capacity; in dcd_doattach()
996 un->un_lbasize = DEV_BSIZE; in dcd_doattach()
1025 un->un_dp->options |= DMA_SUPPORTTED; in dcd_doattach()
1026 un->un_dp->dma_mode = (options >> 3) & 0x03; in dcd_doattach()
1028 "mode %x\n", un->un_dp->dma_mode); in dcd_doattach()
1030 un->un_dp->options &= ~DMA_SUPPORTTED; in dcd_doattach()
1031 un->un_dp->pio_mode = options & 0x7; in dcd_doattach()
1033 un->un_dp->options |= BLOCK_MODE; in dcd_doattach()
1035 "mode %x\n", un->un_dp->pio_mode); in dcd_doattach()
1038 "options %x,", un->un_dp->options); in dcd_doattach()
1041 un->un_throttle = 2; in dcd_doattach()
1046 un->un_max_xfer_size = MAX_ATA_XFER_SIZE; in dcd_doattach()
1056 un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) || in dcd_doattach()
1068 dcd_free_softstate(un, devi); in dcd_doattach()
1077 dcd_set_multiple(struct dcd_disk *un) in dcd_set_multiple() argument
1097 cdb.size = un->un_lbasize * un->un_dp->block_factor; in dcd_set_multiple()
1121 dcd_set_features(struct dcd_disk *un, uchar_t mode) in dcd_set_features() argument
1143 cdb.size = un->un_lbasize * mode; in dcd_set_features()
1173 dcd_validate_geometry(struct dcd_disk *un) in dcd_validate_geometry() argument
1184 if (un->un_lbasize < 0) { in dcd_validate_geometry()
1188 if (un->un_state == DCD_STATE_PM_SUSPENDED) { in dcd_validate_geometry()
1198 secsize = un->un_secsize; in dcd_validate_geometry()
1205 un->un_secdiv = secdiv; in dcd_validate_geometry()
1212 devp = un->un_dcd; in dcd_validate_geometry()
1217 rval = cmlb_validate(un->un_dklbhandle, 0, 0); in dcd_validate_geometry()
1231 secsize = un->un_lbasize; in dcd_validate_geometry()
1234 un->un_lbadiv = secdiv; in dcd_validate_geometry()
1240 secsize = un->un_lbasize >> DEV_BSHIFT; in dcd_validate_geometry()
1243 un->un_blknoshift = secdiv; in dcd_validate_geometry()
1279 sema_p(&un->un_semoclose); in dcdopen()
1283 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) { in dcdopen()
1288 while (un->un_state == DCD_STATE_SUSPENDED) { in dcdopen()
1289 cv_wait(&un->un_suspend_cv, DCD_MUTEX); in dcdopen()
1292 if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) { in dcdopen()
1307 un->un_cmd_flags = 0; in dcdopen()
1309 un->un_cmd_stat_size = 2; in dcdopen()
1312 (void *)un); in dcdopen()
1318 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]); in dcdopen()
1321 un->un_exclopen, partmask); in dcdopen()
1323 if (un->un_exclopen & (partmask)) { in dcdopen()
1333 if (un->un_ocmap.lyropen[part]) { in dcdopen()
1337 if (un->un_ocmap.regopen[i] & (partmask)) { in dcdopen()
1345 sema_v(&un->un_semoclose); in dcdopen()
1356 if ((rval = dcd_ready_and_valid(dev, un)) != 0) { in dcdopen()
1364 if (rval || cmlb_partinfo(un->un_dklbhandle, in dcdopen()
1375 un->un_ocmap.lyropen[part]++; in dcdopen()
1377 un->un_ocmap.regopen[otyp] |= partmask; in dcdopen()
1384 un->un_exclopen |= (partmask); in dcdopen()
1399 if (un->un_stats == (kstat_t *)0) { in dcdopen()
1400 un->un_stats = kstat_create("dad", instance, in dcdopen()
1403 if (un->un_stats) { in dcdopen()
1404 un->un_stats->ks_lock = DCD_MUTEX; in dcdopen()
1405 kstat_install(un->un_stats); in dcdopen()
1414 if ((un->un_pstats[i] == (kstat_t *)0) && in dcdopen()
1415 (cmlb_partinfo(un->un_dklbhandle, in dcdopen()
1420 un->un_pstats[i] = kstat_create("dad", in dcdopen()
1427 if (un->un_pstats[i]) { in dcdopen()
1428 un->un_pstats[i]->ks_lock = in dcdopen()
1430 kstat_install(un->un_pstats[i]); in dcdopen()
1438 (void) dcd_create_errstats(un, instance); in dcdopen()
1444 sema_v(&un->un_semoclose); in dcdopen()
1450 sema_v(&un->un_semoclose); in dcdopen()
1459 dcd_ready_and_valid(dev_t dev, struct dcd_disk *un) in dcd_ready_and_valid() argument
1468 if (un->un_ncmds == 0) { in dcd_ready_and_valid()
1475 if (un->un_state == DCD_STATE_NORMAL) { in dcd_ready_and_valid()
1478 dcd_offline(un, 1); in dcd_ready_and_valid()
1483 if (un->un_format_in_progress == 0) { in dcd_ready_and_valid()
1484 g_error = dcd_validate_geometry(un); in dcd_ready_and_valid()
1501 un->un_mediastate = DKIO_INSERTED; in dcd_ready_and_valid()
1502 cv_broadcast(&un->un_state_cv); in dcd_ready_and_valid()
1527 sema_p(&un->un_semoclose); in dcdclose()
1531 if (un->un_exclopen & (1<<part)) { in dcdclose()
1532 un->un_exclopen &= ~(1<<part); in dcdclose()
1536 un->un_ocmap.lyropen[part] -= 1; in dcdclose()
1538 un->un_ocmap.regopen[otyp] &= ~(1<<part); in dcdclose()
1541 cp = &un->un_ocmap.chkd[0]; in dcdclose()
1542 while (cp < &un->un_ocmap.chkd[OCSIZE]) { in dcdclose()
1549 if (cp == &un->un_ocmap.chkd[OCSIZE]) { in dcdclose()
1551 if (un->un_state == DCD_STATE_OFFLINE) { in dcdclose()
1552 dcd_offline(un, 1); in dcdclose()
1556 (void) cmlb_close(un->un_dklbhandle, 0); in dcdclose()
1559 if (un->un_stats) { in dcdclose()
1560 kstat_delete(un->un_stats); in dcdclose()
1561 un->un_stats = 0; in dcdclose()
1564 if (un->un_pstats[i]) { in dcdclose()
1565 kstat_delete(un->un_pstats[i]); in dcdclose()
1566 un->un_pstats[i] = (kstat_t *)0; in dcdclose()
1570 if (un->un_errstats) { in dcdclose()
1571 kstat_delete(un->un_errstats); in dcdclose()
1572 un->un_errstats = (kstat_t *)0; in dcdclose()
1582 sema_v(&un->un_semoclose); in dcdclose()
1587 dcd_offline(struct dcd_disk *un, int bechatty) in dcd_offline() argument
1593 cmlb_invalidate(un->un_dklbhandle, 0); in dcd_offline()
1606 struct dcd_disk *un; in dcdinfo() local
1614 if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL) in dcdinfo()
1639 struct dcd_disk *un; in dcd_prop_op() local
1641 if ((un = ddi_get_soft_state(dcd_state, ddi_get_instance(dip))) == NULL) in dcd_prop_op()
1645 return (cmlb_prop_op(un->un_dklbhandle, in dcd_prop_op()
1664 struct dcd_disk *un; in dcdmin() local
1668 un = ddi_get_soft_state(dcd_state, instance); in dcdmin()
1670 if (bp->b_bcount > un->un_max_xfer_size) in dcdmin()
1671 bp->b_bcount = un->un_max_xfer_size; in dcdmin()
1684 secmask = un->un_secsize - 1; in dcdread()
1689 un->un_secsize); in dcdread()
1693 "transfer length not modulo %d\n", un->un_secsize); in dcdread()
1709 secmask = un->un_secsize - 1; in dcdaread()
1714 un->un_secsize); in dcdaread()
1718 "transfer length not modulo %d\n", un->un_secsize); in dcdaread()
1733 secmask = un->un_secsize - 1; in dcdwrite()
1738 un->un_secsize); in dcdwrite()
1742 "transfer length not modulo %d\n", un->un_secsize); in dcdwrite()
1759 secmask = un->un_secsize - 1; in dcdawrite()
1764 un->un_secsize); in dcdawrite()
1768 "transfer length not modulo %d\n", un->un_secsize); in dcdawrite()
1780 struct dcd_disk *un; in dcdstrategy() local
1788 if ((un = ddi_get_soft_state(dcd_state, in dcdstrategy()
1790 un->un_state == DCD_STATE_DUMPING || in dcdstrategy()
1791 ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)) { in dcdstrategy()
1792 SET_BP_ERROR(bp, ((un) ? ENXIO : EIO)); in dcdstrategy()
1813 if (bp->b_bcount > un->un_max_xfer_size) { in dcdstrategy()
1819 "dcdstrategy_start: bp 0x%p un 0x%p", bp, un); in dcdstrategy()
1826 while (un->un_state == DCD_STATE_SUSPENDED) { in dcdstrategy()
1827 cv_wait(&un->un_suspend_cv, DCD_MUTEX); in dcdstrategy()
1830 if (un->un_state == DCD_STATE_PM_SUSPENDED) { in dcdstrategy()
1856 if (bp != un->un_sbufp) { in dcdstrategy()
1857 validated: if (cmlb_partinfo(un->un_dklbhandle, in dcdstrategy()
1892 } else if (bp->b_bcount & (un->un_secsize-1)) { in dcdstrategy()
1933 if (dcd_ready_and_valid(bp->b_edev, un) == 0) { in dcdstrategy()
1974 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); in dcdstrategy()
1976 dp = &un->un_utab; in dcdstrategy()
1981 } else if ((un->un_state == DCD_STATE_SUSPENDED) && in dcdstrategy()
1982 bp == un->un_sbufp) { in dcdstrategy()
1988 dp, bp, un); in dcdstrategy()
1996 un->un_ncmds, un->un_throttle, (void *)dp->b_forw); in dcdstrategy()
1997 ASSERT(un->un_ncmds >= 0); in dcdstrategy()
1998 ASSERT(un->un_throttle >= 0); in dcdstrategy()
1999 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) { in dcdstrategy()
2000 dcdstart(un); in dcdstrategy()
2013 make_dcd_cmd(un, cmd_bp, NULL_FUNC); in dcdstrategy()
2025 if ((un->un_ncmds < un->un_throttle) && in dcdstrategy()
2027 dcdstart(un); in dcdstrategy()
2046 dcdstart(struct dcd_disk *un) in dcdstart() argument
2051 uchar_t state = un->un_last_state; in dcdstart()
2053 TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un); in dcdstart()
2058 dp = &un->un_utab; in dcdstart()
2076 un->un_ncmds++; in dcdstart()
2084 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp); in dcdstart()
2089 make_dcd_cmd(un, bp, dcdrunout); in dcdstart()
2092 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); in dcdstart()
2096 New_state(un, DCD_STATE_RWAIT); in dcdstart()
2097 un->un_ncmds--; in dcdstart()
2104 DCD_DO_KSTATS(un, kstat_runq_exit, bp); in dcdstart()
2106 un->un_ncmds--; in dcdstart()
2115 un->un_state = un->un_last_state; in dcdstart()
2116 un->un_last_state = state; in dcdstart()
2122 if (un->un_state == DCD_STATE_SUSPENDED) { in dcdstart()
2123 cv_broadcast(&un->un_disk_busy_cv); in dcdstart()
2126 if ((un->un_ncmds < un->un_throttle) && in dcdstart()
2155 un->un_ncmds--; in dcdstart()
2159 DCD_DO_ERRSTATS(un, dcd_transerrs); in dcdstart()
2160 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); in dcdstart()
2161 dcd_handle_tran_busy(bp, dp, un); in dcdstart()
2162 if (un->un_ncmds > 0) { in dcdstart()
2166 DCD_DO_KSTATS(un, kstat_runq_exit, bp); in dcdstart()
2174 if (bp != un->un_sbufp) { in dcdstart()
2180 if (un->un_state == DCD_STATE_SUSPENDED) { in dcdstart()
2181 cv_broadcast(&un->un_disk_busy_cv); in dcdstart()
2183 if ((un->un_ncmds < un->un_throttle) && in dcdstart()
2203 make_dcd_cmd(un, cmd_bp, NULL_FUNC); in dcdstart()
2214 if ((un->un_ncmds < un->un_throttle) && in dcdstart()
2230 make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)()) in make_dcd_cmd() argument
2238 "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un); in make_dcd_cmd()
2241 flags = un->un_cmd_flags; in make_dcd_cmd()
2243 if (bp != un->un_sbufp) { in make_dcd_cmd()
2252 if (cmlb_partinfo(un->un_dklbhandle, in make_dcd_cmd()
2277 secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv; in make_dcd_cmd()
2283 resid = (secnt - count) << un->un_secdiv; in make_dcd_cmd()
2303 if (un->un_lbasize != un->un_secsize) { in make_dcd_cmd()
2304 blkno >>= un->un_blknoshift; in make_dcd_cmd()
2305 count >>= un->un_blknoshift; in make_dcd_cmd()
2313 un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT, in make_dcd_cmd()
2314 func, (caddr_t)un); in make_dcd_cmd()
2326 if ((un->un_dp->options & DMA_SUPPORTTED) == in make_dcd_cmd()
2330 if (un->un_dp->options & BLOCK_MODE) in make_dcd_cmd()
2337 if ((un->un_dp->options & DMA_SUPPORTTED) == in make_dcd_cmd()
2341 if (un->un_dp->options & BLOCK_MODE) in make_dcd_cmd()
2377 2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un); in make_dcd_cmd()
2414 struct dcd_disk *un; in dcdintr() local
2420 un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev)); in dcdintr()
2422 TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un); in dcdintr()
2426 un->un_ncmds--; in dcdintr()
2427 DCD_DO_KSTATS(un, kstat_runq_exit, bp); in dcdintr()
2428 ASSERT(un->un_ncmds >= 0); in dcdintr()
2439 if (un->un_state == DCD_STATE_OFFLINE) { in dcdintr()
2440 un->un_state = un->un_last_state; in dcdintr()
2451 DCD_DO_ERRSTATS(un, dcd_harderrs); in dcdintr()
2482 action = dcd_handle_incomplete(un, bp); in dcdintr()
2502 if ((un->un_last_pkt_reason != pkt->pkt_reason) && in dcdintr()
2505 un->un_last_pkt_reason = pkt->pkt_reason; in dcdintr()
2520 un->un_flush_not_supported = 1; in dcdintr()
2528 dcddone_and_mutex_exit(un, bp); in dcdintr()
2535 if (un->un_ncmds >= un->un_throttle) { in dcdintr()
2536 struct diskhd *dp = &un->un_utab; in dcdintr()
2541 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); in dcdintr()
2547 un->un_ncmds++; in dcdintr()
2550 DCD_DO_KSTATS(un, kstat_runq_enter, bp); in dcdintr()
2553 struct diskhd *dp = &un->un_utab; in dcdintr()
2556 un->un_ncmds--; in dcdintr()
2558 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); in dcdintr()
2559 dcd_handle_tran_busy(bp, dp, un); in dcdintr()
2563 DCD_DO_ERRSTATS(un, dcd_transerrs); in dcdintr()
2564 DCD_DO_KSTATS(un, kstat_runq_exit, bp); in dcdintr()
2571 dcddone_and_mutex_exit(un, bp); in dcdintr()
2578 DCD_DO_KSTATS(un, kstat_waitq_enter, bp); in dcdintr()
2592 dcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp) in dcddone_and_mutex_exit() argument
2596 TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un); in dcddone_and_mutex_exit()
2598 _NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex)); in dcddone_and_mutex_exit()
2600 dp = &un->un_utab; in dcddone_and_mutex_exit()
2605 if (un->un_stats) { in dcddone_and_mutex_exit()
2629 if (un->un_state == DCD_STATE_SUSPENDED) { in dcddone_and_mutex_exit()
2630 cv_broadcast(&un->un_disk_busy_cv); in dcddone_and_mutex_exit()
2631 } else if (dp->b_actf && (un->un_ncmds < un->un_throttle) && in dcddone_and_mutex_exit()
2632 (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) { in dcddone_and_mutex_exit()
2633 dcdstart(un); in dcddone_and_mutex_exit()
2638 if (bp != un->un_sbufp) { in dcddone_and_mutex_exit()
2643 ASSERT(un->un_sbuf_busy); in dcddone_and_mutex_exit()
2661 dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt) in dcd_reset_disk() argument
2675 dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp) in dcd_handle_incomplete() argument
2682 int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) && in dcd_handle_incomplete()
2683 (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT)); in dcd_handle_incomplete()
2697 action = dcd_check_error(un, bp); in dcd_handle_incomplete()
2698 DCD_DO_ERRSTATS(un, dcd_transerrs); in dcd_handle_incomplete()
2700 (void) dcd_reset_disk(un, pkt); in dcd_handle_incomplete()
2718 DCD_DO_ERRSTATS(un, dcd_transerrs); in dcd_handle_incomplete()
2721 (void) dcd_reset_disk(un, pkt); in dcd_handle_incomplete()
2738 if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) { in dcd_handle_incomplete()
2753 if (un->un_state != DCD_STATE_OFFLINE) { in dcd_handle_incomplete()
2756 New_state(un, DCD_STATE_OFFLINE); in dcd_handle_incomplete()
2766 New_state(un, DCD_STATE_FATAL); in dcd_handle_incomplete()
2770 if (((pkt->pkt_reason != un->un_last_pkt_reason) && in dcd_handle_incomplete()
2789 dcd_check_error(struct dcd_disk *un, struct buf *bp) in dcd_check_error() argument
2791 struct diskhd *dp = &un->un_utab; in dcd_check_error()
2898 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un)) in dcddump()
2900 if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) in dcddump()
2903 if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev), in dcddump()
2912 if ((un->un_state == DCD_STATE_SUSPENDED) || in dcddump()
2913 (un->un_state == DCD_STATE_PM_SUSPENDED)) { in dcddump()
2924 un->un_throttle = 0; in dcddump()
2926 if ((un->un_state != DCD_STATE_SUSPENDED) && in dcddump()
2927 (un->un_state != DCD_STATE_DUMPING)) { in dcddump()
2929 New_state(un, DCD_STATE_DUMPING); in dcddump()
2998 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { in dcddump()
3001 if (un->un_dp->options & BLOCK_MODE) in dcddump()
3008 (int)nblk*un->un_secsize, DATA_WRITE, 0); in dcddump()
3024 (void) dcd_reset_disk(un, pkt); in dcddump()
3107 switch (un->un_dp->ctype) { in dcdioctl()
3128 info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE; in dcdioctl()
3152 media_info.dki_lbsize = un->un_lbasize; in dcdioctl()
3153 media_info.dki_capacity = un->un_diskcapacity; in dcdioctl()
3167 if (un->un_ncmds == 0) { in dcdioctl()
3175 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd, in dcdioctl()
3189 err = cmlb_ioctl(un->un_dklbhandle, dev, cmd, in dcdioctl()
3234 if ((un->un_dp->options & DMA_SUPPORTTED) == in dcdioctl()
3243 if ((un->un_dp->options & DMA_SUPPORTTED) == in dcdioctl()
3410 if (un->un_flush_not_supported || in dcdioctl()
3411 ! un->un_write_cache_enabled) { in dcdioctl()
3412 i = un->un_flush_not_supported ? ENOTSUP : 0; in dcdioctl()
3437 while (un->un_sbuf_busy) { in dcdioctl()
3438 cv_wait(&un->un_sbuf_cv, DCD_MUTEX); in dcdioctl()
3440 un->un_sbuf_busy = 1; in dcdioctl()
3441 bp = un->un_sbufp; in dcdioctl()
3446 2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un); in dcdioctl()
3449 makecommand(pkt, un->un_cmd_flags | FLAG_SILENT, in dcdioctl()
3500 struct dcd_disk *un = ddi_get_soft_state(dcd_state, in dcdflushdone() local
3505 ASSERT(un != NULL); in dcdflushdone()
3506 ASSERT(bp == un->un_sbufp); in dcdflushdone()
3524 un->un_sbuf_busy = 0; in dcdflushdone()
3525 cv_signal(&un->un_sbuf_cv); in dcdflushdone()
3544 struct dcd_disk *un; in dcdrunout() local
3551 un = (struct dcd_disk *)arg; in dcdrunout()
3552 dp = &un->un_utab; in dcdrunout()
3558 ASSERT(un != NULL); in dcdrunout()
3560 if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) { in dcdrunout()
3561 dcdstart(un); in dcdrunout()
3563 if (un->un_state == DCD_STATE_RWAIT) { in dcdrunout()
3669 while (un->un_sbuf_busy) { in dcdioctl_cmd()
3670 if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) { in dcdioctl_cmd()
3678 un->un_sbuf_busy = 1; in dcdioctl_cmd()
3679 bp = un->un_sbufp; in dcdioctl_cmd()
3739 un->un_sbuf_busy = 0; in dcdioctl_cmd()
3740 cv_signal(&un->un_sbuf_cv); in dcdioctl_cmd()
3769 struct dcd_disk *un = (struct dcd_disk *)arg; in dcdrestart() local
3776 bp = un->un_utab.b_forw; in dcdrestart()
3778 un->un_ncmds++; in dcdrestart()
3779 DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp); in dcdrestart()
3792 DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp); in dcdrestart()
3793 un->un_ncmds--; in dcdrestart()
3802 un->un_reissued_timeid = in dcdrestart()
3803 timeout(dcdrestart, (caddr_t)un, in dcdrestart()
3808 DCD_DO_ERRSTATS(un, dcd_transerrs); in dcdrestart()
3814 DCD_DO_KSTATS(un, kstat_waitq_exit, bp); in dcdrestart()
3815 un->un_reissued_timeid = 0L; in dcdrestart()
3816 dcddone_and_mutex_exit(un, bp); in dcdrestart()
3821 un->un_reissued_timeid = 0L; in dcdrestart()
3833 struct dcd_disk *un = (struct dcd_disk *)arg; in dcd_reset_throttle() local
3837 dp = &un->un_utab; in dcd_reset_throttle()
3842 if (dp->b_actf && (un->un_ncmds < un->un_throttle) && in dcd_reset_throttle()
3844 dcdstart(un); in dcd_reset_throttle()
3863 dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un) in dcd_handle_tran_busy() argument
3875 if (!un->un_reissued_timeid) { in dcd_handle_tran_busy()
3876 un->un_reissued_timeid = in dcd_handle_tran_busy()
3877 timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500); in dcd_handle_tran_busy()
3882 dcd_write_deviceid(struct dcd_disk *un) in dcd_write_deviceid() argument
3895 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) { in dcd_write_deviceid()
3902 dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP); in dcd_write_deviceid()
3909 bcopy(un->un_devid, &dkdevid->dkd_devid, in dcd_write_deviceid()
3910 ddi_devid_sizeof(un->un_devid)); in dcd_write_deviceid()
3915 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++) in dcd_write_deviceid()
3924 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { in dcd_write_deviceid()
3927 if (un->un_dp->options & BLOCK_MODE) in dcd_write_deviceid()
3932 cdb.size = un->un_secsize; in dcd_write_deviceid()
3940 ucmd.udcd_buflen = un->un_secsize; in dcd_write_deviceid()
3948 kmem_free(dkdevid, un->un_secsize); in dcd_write_deviceid()
3953 dcd_read_deviceid(struct dcd_disk *un) in dcd_read_deviceid() argument
3966 if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) { in dcd_read_deviceid()
3972 dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP); in dcd_read_deviceid()
3977 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { in dcd_read_deviceid()
3980 if (un->un_dp->options & BLOCK_MODE) in dcd_read_deviceid()
3985 cdb.size = un->un_secsize; in dcd_read_deviceid()
3993 ucmd.udcd_buflen = un->un_secsize; in dcd_read_deviceid()
4002 kmem_free((caddr_t)dkdevid, un->un_secsize); in dcd_read_deviceid()
4010 kmem_free((caddr_t)dkdevid, un->un_secsize); in dcd_read_deviceid()
4017 for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++) in dcd_read_deviceid()
4023 kmem_free((caddr_t)dkdevid, un->un_secsize); in dcd_read_deviceid()
4029 kmem_free((caddr_t)dkdevid, un->un_secsize); in dcd_read_deviceid()
4035 un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP); in dcd_read_deviceid()
4036 bcopy(&dkdevid->dkd_devid, un->un_devid, sz); in dcd_read_deviceid()
4037 kmem_free((caddr_t)dkdevid, un->un_secsize); in dcd_read_deviceid()
4053 dcd_get_devid(struct dcd_disk *un) in dcd_get_devid() argument
4058 if (un->un_devid != NULL) in dcd_get_devid()
4059 return (un->un_devid); in dcd_get_devid()
4062 rc = dcd_make_devid_from_serial(un); in dcd_get_devid()
4066 if (dcd_read_deviceid(un)) in dcd_get_devid()
4070 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid); in dcd_get_devid()
4071 return (un->un_devid); in dcd_get_devid()
4076 dcd_create_devid(struct dcd_disk *un) in dcd_create_devid() argument
4079 &un->un_devid) == DDI_FAILURE) in dcd_create_devid()
4082 if (dcd_write_deviceid(un)) { in dcd_create_devid()
4083 ddi_devid_free(un->un_devid); in dcd_create_devid()
4084 un->un_devid = NULL; in dcd_create_devid()
4088 (void) ddi_devid_register(DCD_DEVINFO, un->un_devid); in dcd_create_devid()
4089 return (un->un_devid); in dcd_create_devid()
4097 dcd_make_devid_from_serial(struct dcd_disk *un) in dcd_make_devid_from_serial() argument
4108 model = un->un_dcd->dcd_ident->dcd_model; in dcd_make_devid_from_serial()
4110 serno = un->un_dcd->dcd_ident->dcd_drvser; in dcd_make_devid_from_serial()
4138 hwid, (ddi_devid_t *)&un->un_devid); in dcd_make_devid_from_serial()
4270 dcd_create_errstats(struct dcd_disk *un, int instance) in dcd_create_errstats() argument
4275 if (un->un_errstats == (kstat_t *)0) { in dcd_create_errstats()
4277 un->un_errstats = kstat_create("daderror", instance, kstatname, in dcd_create_errstats()
4282 if (un->un_errstats) { in dcd_create_errstats()
4285 dtp = (struct dcd_errstats *)un->un_errstats->ks_data; in dcd_create_errstats()
4311 un->un_errstats->ks_private = un; in dcd_create_errstats()
4312 un->un_errstats->ks_update = nulldev; in dcd_create_errstats()
4313 kstat_install(un->un_errstats); in dcd_create_errstats()
4316 un->un_dcd->dcd_ident->dcd_model, 16); in dcd_create_errstats()
4318 un->un_dcd->dcd_ident->dcd_drvser, 16); in dcd_create_errstats()
4320 un->un_dcd->dcd_ident->dcd_fw, 8); in dcd_create_errstats()
4322 (uint64_t)((uint64_t)un->un_diskcapacity * in dcd_create_errstats()
4323 (uint64_t)un->un_lbasize); in dcd_create_errstats()
4421 dcd_flush_cache(struct dcd_disk *un) in dcd_flush_cache() argument
4477 struct dcd_disk *un; in dcd_send_lb_rw_cmd() local
4479 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi)); in dcd_send_lb_rw_cmd()
4480 if (un == NULL) in dcd_send_lb_rw_cmd()
4511 if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) { in dcd_send_lb_rw_cmd()
4519 if (un->un_dp->options & BLOCK_MODE) in dcd_send_lb_rw_cmd()
4524 if (un->un_dp->options & BLOCK_MODE) in dcd_send_lb_rw_cmd()
4669 struct dcd_disk *un; in dcd_lb_getinfo() local
4671 un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi)); in dcd_lb_getinfo()
4673 if (un == NULL) in dcd_lb_getinfo()
4686 if (un->un_diskcapacity <= 0) { in dcd_lb_getinfo()
4693 *(diskaddr_t *)arg = un->un_diskcapacity; in dcd_lb_getinfo()
4698 un->un_diskcapacity); in dcd_lb_getinfo()
4704 *(tg_attribute_t *)arg = un->un_tgattribute; in dcd_lb_getinfo()
4707 un->un_tgattribute.media_is_writable); in dcd_lb_getinfo()