Lines Matching defs:bd

60 typedef struct bd bd_t;
63 struct bd {
237 rv = ddi_soft_state_init(&bd_state, sizeof (struct bd), 2);
272 bd_t *bd;
281 bd = ddi_get_soft_state(bd_state, inst);
282 if (bd == NULL) {
285 *resultp = (void *)bd->d_dip;
336 bd_create_errstats(bd_t *bd, int inst, bd_drive_t *drive)
342 if (bd->d_errstats != NULL)
346 ddi_driver_name(bd->d_dip));
348 ddi_driver_name(bd->d_dip), inst);
350 bd->d_errstats = kstat_create(ks_module, inst, ks_name, "device_error",
353 if (bd->d_errstats == NULL) {
360 bd->d_kerr = kmem_zalloc(sizeof (struct bd_errstats),
362 bd->d_errmutex = kmem_zalloc(sizeof (kmutex_t), KM_SLEEP);
363 mutex_init(bd->d_errmutex, NULL, MUTEX_DRIVER, NULL);
365 if (bd->d_errstats->ks_lock == NULL) {
366 bd->d_errstats->ks_lock = kmem_zalloc(sizeof (kmutex_t),
368 mutex_init(bd->d_errstats->ks_lock, NULL, MUTEX_DRIVER,
372 bd->d_errmutex = bd->d_errstats->ks_lock;
373 bd->d_kerr = (struct bd_errstats *)bd->d_errstats->ks_data;
376 kstat_named_init(&bd->d_kerr->bd_softerrs, "Soft Errors",
378 kstat_named_init(&bd->d_kerr->bd_harderrs, "Hard Errors",
380 kstat_named_init(&bd->d_kerr->bd_transerrs, "Transport Errors",
384 kstat_named_init(&bd->d_kerr->bd_model, "Model",
387 kstat_named_init(&bd->d_kerr->bd_vid, "Vendor",
389 kstat_named_init(&bd->d_kerr->bd_pid, "Product",
393 kstat_named_init(&bd->d_kerr->bd_revision, "Revision",
395 kstat_named_init(&bd->d_kerr->bd_serial, "Serial No",
397 kstat_named_init(&bd->d_kerr->bd_capacity, "Size",
399 kstat_named_init(&bd->d_kerr->bd_rq_media_err, "Media Error",
401 kstat_named_init(&bd->d_kerr->bd_rq_ntrdy_err, "Device Not Ready",
403 kstat_named_init(&bd->d_kerr->bd_rq_nodev_err, "No Device",
405 kstat_named_init(&bd->d_kerr->bd_rq_recov_err, "Recoverable",
407 kstat_named_init(&bd->d_kerr->bd_rq_illrq_err, "Illegal Request",
409 kstat_named_init(&bd->d_kerr->bd_rq_pfa_err,
412 bd->d_errstats->ks_private = bd;
414 kstat_install(bd->d_errstats);
435 bd_init_errstats(bd_t *bd, bd_drive_t *drive)
437 struct bd_errstats *est = bd->d_kerr;
439 mutex_enter(bd->d_errmutex);
457 mutex_exit(bd->d_errmutex);
465 bd_t *bd;
497 bd = ddi_get_soft_state(bd_state, inst);
500 bd->d_dma = *(hdl->h_dma);
501 bd->d_dma.dma_attr_granular =
502 max(DEV_BSIZE, bd->d_dma.dma_attr_granular);
503 bd->d_use_dma = B_TRUE;
505 if (bd->d_maxxfer &&
506 (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) {
511 bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer;
513 bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer;
516 bd->d_use_dma = B_FALSE;
517 if (bd->d_maxxfer == 0) {
518 bd->d_maxxfer = 1024 * 1024;
521 bd->d_ops = hdl->h_ops;
522 bd->d_private = hdl->h_private;
523 bd->d_blkshift = 9; /* 512 bytes, to start */
525 if (bd->d_maxxfer % DEV_BSIZE) {
527 bd->d_maxxfer &= ~(DEV_BSIZE - 1);
529 if (bd->d_maxxfer < DEV_BSIZE) {
535 bd->d_dip = dip;
536 bd->d_handle = hdl;
537 hdl->h_bd = bd;
538 ddi_set_driver_private(dip, bd);
540 mutex_init(&bd->d_iomutex, NULL, MUTEX_DRIVER, NULL);
541 mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL);
542 mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL);
543 cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL);
545 list_create(&bd->d_waitq, sizeof (bd_xfer_impl_t),
547 list_create(&bd->d_runq, sizeof (bd_xfer_impl_t),
550 bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8,
551 bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0);
553 bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk",
555 if (bd->d_ksp != NULL) {
556 bd->d_ksp->ks_lock = &bd->d_iomutex;
557 kstat_install(bd->d_ksp);
558 bd->d_kiop = bd->d_ksp->ks_data;
566 bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP);
569 cmlb_alloc_handle(&bd->d_cmlbh);
571 bd->d_state = DKIO_NONE;
574 bd->d_ops.o_drive_info(bd->d_private, &drive);
575 bd->d_qsize = drive.d_qsize;
576 bd->d_removable = drive.d_removable;
577 bd->d_hotpluggable = drive.d_hotpluggable;
579 if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer)
580 bd->d_maxxfer = drive.d_maxxfer;
584 bd_create_errstats(bd, inst, &drive);
585 bd_init_errstats(bd, &drive);
586 bd_update_state(bd);
589 bd->d_removable, bd->d_hotpluggable,
593 CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0);
595 cmlb_free_handle(&bd->d_cmlbh);
596 kmem_cache_destroy(bd->d_cache);
597 mutex_destroy(&bd->d_iomutex);
598 mutex_destroy(&bd->d_ocmutex);
599 mutex_destroy(&bd->d_statemutex);
600 cv_destroy(&bd->d_statecv);
601 list_destroy(&bd->d_waitq);
602 list_destroy(&bd->d_runq);
603 if (bd->d_ksp != NULL) {
604 kstat_delete(bd->d_ksp);
605 bd->d_ksp = NULL;
607 kmem_free(bd->d_kiop, sizeof (kstat_io_t));
613 if (bd->d_ops.o_devid_init != NULL) {
614 rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid);
616 if (ddi_devid_register(dip, bd->d_devid) !=
631 if (bd->d_removable) {
635 if (bd->d_hotpluggable) {
648 bd_t *bd;
650 bd = ddi_get_driver_private(dip);
661 if (bd->d_ksp != NULL) {
662 kstat_delete(bd->d_ksp);
663 bd->d_ksp = NULL;
665 kmem_free(bd->d_kiop, sizeof (kstat_io_t));
668 if (bd->d_errstats != NULL) {
669 kstat_delete(bd->d_errstats);
670 bd->d_errstats = NULL;
672 kmem_free(bd->d_kerr, sizeof (struct bd_errstats));
673 mutex_destroy(bd->d_errmutex);
676 cmlb_detach(bd->d_cmlbh, 0);
677 cmlb_free_handle(&bd->d_cmlbh);
678 if (bd->d_devid)
679 ddi_devid_free(bd->d_devid);
680 kmem_cache_destroy(bd->d_cache);
681 mutex_destroy(&bd->d_iomutex);
682 mutex_destroy(&bd->d_ocmutex);
683 mutex_destroy(&bd->d_statemutex);
684 cv_destroy(&bd->d_statecv);
685 list_destroy(&bd->d_waitq);
686 list_destroy(&bd->d_runq);
695 bd_t *bd = arg;
706 xi->i_bd = bd;
708 if (bd->d_use_dma) {
709 if (ddi_dma_alloc_handle(bd->d_dip, &bd->d_dma, dcb, NULL,
731 bd_xfer_alloc(bd_t *bd, struct buf *bp, int (*func)(void *, bd_xfer_t *),
748 xi = kmem_cache_alloc(bd->d_cache, kmflag);
758 xi->i_blkno = bp->b_lblkno >> (bd->d_blkshift - DEV_BSHIFT);
771 xi->i_func = bd->d_ops.o_read;
774 xi->i_func = bd->d_ops.o_write;
777 shift = bd->d_blkshift;
780 if (!bd->d_use_dma) {
785 (bp->b_bcount + (bd->d_maxxfer - 1)) / bd->d_maxxfer;
787 xi->i_len = min(bp->b_bcount, bd->d_maxxfer);
849 kmem_cache_free(bd->d_cache, xi);
870 bd_t *bd;
894 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) {
899 mutex_enter(&bd->d_ocmutex);
904 bd_update_state(bd);
906 if (cmlb_validate(bd->d_cmlbh, 0, 0) != 0) {
913 } else if (cmlb_partinfo(bd->d_cmlbh, part, &nblks, &lba,
936 if ((flag & FWRITE) && bd->d_rdonly) {
941 if ((bd->d_open_excl) & (mask)) {
946 if (bd->d_open_lyr[part]) {
951 if (bd->d_open_reg[i] & mask) {
959 bd->d_open_lyr[part]++;
961 bd->d_open_reg[otyp] |= mask;
964 bd->d_open_excl |= mask;
969 mutex_exit(&bd->d_ocmutex);
978 bd_t *bd;
995 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) {
1000 mutex_enter(&bd->d_ocmutex);
1001 if (bd->d_open_excl & mask) {
1002 bd->d_open_excl &= ~mask;
1005 bd->d_open_lyr[part]--;
1007 bd->d_open_reg[otyp] &= ~mask;
1010 if (bd->d_open_lyr[part]) {
1015 if (bd->d_open_reg[i]) {
1019 mutex_exit(&bd->d_ocmutex);
1022 cmlb_invalidate(bd->d_cmlbh, 0);
1036 bd_t *bd;
1049 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) {
1053 shift = bd->d_blkshift;
1060 if (cmlb_partinfo(bd->d_cmlbh, part, &psize, &pstart, NULL, NULL,
1081 xi = bd_xfer_alloc(bd, bp, bd->d_ops.o_write, KM_NOSLEEP);
1089 bd_submit(bd, xi);
1108 bd_t *bd;
1111 bd = ddi_get_soft_state(bd_state, inst);
1114 * In a non-debug kernel, bd_strategy will catch !bd as
1117 ASSERT(bd);
1119 if (bp->b_bcount > bd->d_maxxfer)
1120 bp->b_bcount = bd->d_maxxfer;
1126 bd_t *bd;
1129 if ((bd = ddi_get_soft_state(bd_state, BDINST(dev))) == NULL) {
1133 shift = bd->d_blkshift;
1191 bd_t *bd;
1207 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) {
1213 if (cmlb_partinfo(bd->d_cmlbh, part, &p_nblks, &p_lba,
1220 shift = bd->d_blkshift;
1241 func = (bp->b_flags & B_READ) ? bd->d_ops.o_read : bd->d_ops.o_write;
1243 xi = bd_xfer_alloc(bd, bp, func, KM_NOSLEEP);
1245 xi = bd_xfer_alloc(bd, bp, func, KM_PUSHPAGE);
1254 bd_submit(bd, xi);
1264 bd_t *bd;
1271 if ((bd = ddi_get_soft_state(bd_state, inst)) == NULL) {
1275 rv = cmlb_ioctl(bd->d_cmlbh, dev, cmd, arg, flag, credp, rvalp, 0);
1289 bd_update_state(bd);
1292 minfo.dki_lbsize = (1U << bd->d_blkshift);
1293 minfo.dki_capacity = bd->d_numblks;
1303 bd_update_state(bd);
1306 miext.dki_lbsize = (1U << bd->d_blkshift);
1307 miext.dki_pbsize = (1U << bd->d_pblkshift);
1308 miext.dki_capacity = bd->d_numblks;
1318 cinfo.dki_cnum = ddi_get_instance(ddi_get_parent(bd->d_dip));
1320 "%s", ddi_driver_name(ddi_get_parent(bd->d_dip)));
1322 "%s", ddi_driver_name(bd->d_dip));
1326 cinfo.dki_maxtransfer = bd->d_maxxfer / DEV_BSIZE;
1339 i = bd->d_removable ? 1 : 0;
1347 i = bd->d_hotpluggable ? 1 : 0;
1355 i = bd->d_rdonly ? 1 : 0;
1363 i = bd->d_ssd ? 1 : 0;
1374 if ((rv = bd_check_state(bd, &state)) != 0) {
1388 rv = bd_flush_write_cache(bd, dkc);
1403 bd_t *bd;
1405 bd = ddi_get_soft_state(bd_state, ddi_get_instance(dip));
1406 if (bd == NULL)
1410 return (cmlb_prop_op(bd->d_cmlbh, dev, dip, prop_op, mod_flags, name,
1419 bd_t *bd;
1432 bd = ddi_get_soft_state(bd_state, ddi_get_instance(dip));
1434 if (P2PHASE(length, (1U << bd->d_blkshift)) != 0) {
1446 func = bd->d_ops.o_read;
1450 func = bd->d_ops.o_write;
1459 xi = bd_xfer_alloc(bd, bp, func, kmflag);
1467 bd_submit(bd, xi);
1478 bd_t *bd;
1481 bd = ddi_get_soft_state(bd_state, ddi_get_instance(dip));
1493 bd_update_state(bd);
1494 *(diskaddr_t *)arg = bd->d_numblks;
1498 *(uint32_t *)arg = (1U << bd->d_blkshift);
1509 bd_update_state(bd);
1511 bd->d_rdonly ? B_FALSE : B_TRUE;
1512 ((tg_attribute_t *)arg)->media_is_solid_state = bd->d_ssd;
1523 bd_sched(bd_t *bd)
1529 mutex_enter(&bd->d_iomutex);
1531 while ((bd->d_qactive < bd->d_qsize) &&
1532 ((xi = list_remove_head(&bd->d_waitq)) != NULL)) {
1533 bd->d_qactive++;
1534 kstat_waitq_to_runq(bd->d_kiop);
1535 list_insert_tail(&bd->d_runq, xi);
1543 mutex_exit(&bd->d_iomutex);
1545 rv = xi->i_func(bd->d_private, &xi->i_public);
1551 atomic_inc_32(&bd->d_kerr->bd_transerrs.value.ui32);
1553 mutex_enter(&bd->d_iomutex);
1554 bd->d_qactive--;
1555 kstat_runq_exit(bd->d_kiop);
1556 list_remove(&bd->d_runq, xi);
1559 mutex_enter(&bd->d_iomutex);
1563 mutex_exit(&bd->d_iomutex);
1567 bd_submit(bd_t *bd, bd_xfer_impl_t *xi)
1569 mutex_enter(&bd->d_iomutex);
1570 list_insert_tail(&bd->d_waitq, xi);
1571 kstat_waitq_enter(bd->d_kiop);
1572 mutex_exit(&bd->d_iomutex);
1574 bd_sched(bd);
1580 bd_t *bd = xi->i_bd;
1583 mutex_enter(&bd->d_iomutex);
1584 bd->d_qactive--;
1585 kstat_runq_exit(bd->d_kiop);
1586 list_remove(&bd->d_runq, xi);
1587 mutex_exit(&bd->d_iomutex);
1591 bd->d_kiop->reads++;
1592 bd->d_kiop->nread += (bp->b_bcount - xi->i_resid);
1594 bd->d_kiop->writes++;
1595 bd->d_kiop->nwritten += (bp->b_bcount - xi->i_resid);
1598 bd_sched(bd);
1602 bd_update_state(bd_t *bd)
1610 mutex_enter(&bd->d_statemutex);
1611 if (bd->d_ops.o_media_info(bd->d_private, &media) != 0) {
1612 bd->d_numblks = 0;
1619 (P2PHASE(bd->d_maxxfer, media.m_blksize))) {
1621 ddi_driver_name(bd->d_dip), ddi_get_instance(bd->d_dip),
1627 bd->d_numblks = 0;
1631 if (((1U << bd->d_blkshift) != media.m_blksize) ||
1632 (bd->d_numblks != media.m_nblks)) {
1637 bd->d_blkshift = ddi_ffs(media.m_blksize) - 1;
1638 bd->d_pblkshift = bd->d_blkshift;
1639 bd->d_numblks = media.m_nblks;
1640 bd->d_rdonly = media.m_readonly;
1641 bd->d_ssd = media.m_solidstate;
1651 bd->d_pblkshift = ddi_ffs(media.m_pblksize) - 1;
1654 if (state != bd->d_state) {
1655 bd->d_state = state;
1656 cv_broadcast(&bd->d_statecv);
1659 mutex_exit(&bd->d_statemutex);
1661 bd->d_kerr->bd_capacity.value.ui64 = bd->d_numblks << bd->d_blkshift;
1665 (void) cmlb_validate(bd->d_cmlbh, 0, 0);
1667 cmlb_invalidate(bd->d_cmlbh, 0);
1673 bd_check_state(bd_t *bd, enum dkio_state *state)
1679 bd_update_state(bd);
1681 mutex_enter(&bd->d_statemutex);
1683 if (bd->d_state != *state) {
1684 *state = bd->d_state;
1685 mutex_exit(&bd->d_statemutex);
1690 if (cv_reltimedwait_sig(&bd->d_statecv, &bd->d_statemutex,
1692 mutex_exit(&bd->d_statemutex);
1696 mutex_exit(&bd->d_statemutex);
1714 bd_flush_write_cache(bd_t *bd, struct dk_callback *dkc)
1721 if (bd->d_ops.o_sync_cache == NULL) {
1730 xi = bd_xfer_alloc(bd, bp, bd->d_ops.o_sync_cache, KM_SLEEP);
1745 bd_submit(bd, xi);
1750 bd_submit(bd, xi);
1929 bd_t *bd = xi->i_bd;
1934 atomic_inc_32(&bd->d_kerr->bd_harderrs.value.ui32);
1957 if (bd->d_use_dma) {
1965 len = min(bp->b_bcount - xi->i_offset, bd->d_maxxfer);
1983 rv = xi->i_func(bd->d_private, &xi->i_public);
1987 atomic_inc_32(&bd->d_kerr->bd_transerrs.value.ui32);
2000 bd_t *bd = xi->i_bd;
2004 atomic_inc_32(&bd->d_kerr->bd_rq_media_err.value.ui32);
2007 atomic_inc_32(&bd->d_kerr->bd_rq_ntrdy_err.value.ui32);
2010 atomic_inc_32(&bd->d_kerr->bd_rq_nodev_err.value.ui32);
2013 atomic_inc_32(&bd->d_kerr->bd_rq_recov_err.value.ui32);
2016 atomic_inc_32(&bd->d_kerr->bd_rq_illrq_err.value.ui32);
2019 atomic_inc_32(&bd->d_kerr->bd_rq_pfa_err.value.ui32);
2030 bd_t *bd;
2032 if ((bd = hdl->h_bd) != NULL) {
2033 bd_update_state(bd);