Lines Matching +full:slice +full:- +full:per +full:- +full:line
177 caddr_t addr, size_t nbytes, int slice, diskaddr_t offset,
181 caddr_t addr, size_t nbytes, int slice, diskaddr_t offset,
184 size_t nbytes, int slice, diskaddr_t offset,
187 int slice, diskaddr_t offset, struct buf *bufp,
268 * 50 seconds x max(number of servers, vdc->hattr_min)
297 /* values for dumping - need to run in a tighter loop */
317 * vdc_msglevel - controls level of messages
318 * vdc_matchinst - 64-bit variable where each bit corresponds
379 /* -------------------------------------------------------------------------- */
428 *resultp = vdc->dip; in vdc_getinfo()
453 /* nothing to do for this non-device */ in vdc_detach()
473 if (vdc->dkio_flush_pending) { in vdc_detach()
476 instance, vdc->dkio_flush_pending); in vdc_detach()
480 if (vdc->validate_pending) { in vdc_detach()
483 instance, vdc->validate_pending); in vdc_detach()
490 mutex_enter(&vdc->ownership_lock); in vdc_detach()
491 if (vdc->ownership & VDC_OWNERSHIP_GRANTED) { in vdc_detach()
497 mutex_exit(&vdc->ownership_lock); in vdc_detach()
500 mutex_enter(&vdc->lock); in vdc_detach()
501 vdc->lifecycle = VDC_LC_DETACHING; in vdc_detach()
502 mutex_exit(&vdc->lock); in vdc_detach()
508 for (srvr = vdc->server_list; srvr != NULL; srvr = srvr->next) { in vdc_detach()
509 rv = ldc_set_cb_mode(srvr->ldc_handle, LDC_CB_DISABLE); in vdc_detach()
511 srvr->ldc_id, rv); in vdc_detach()
514 if (vdc->initialized & VDC_THREAD) { in vdc_detach()
515 mutex_enter(&vdc->read_lock); in vdc_detach()
516 if ((vdc->read_state == VDC_READ_WAITING) || in vdc_detach()
517 (vdc->read_state == VDC_READ_RESET)) { in vdc_detach()
518 vdc->read_state = VDC_READ_RESET; in vdc_detach()
519 cv_signal(&vdc->read_cv); in vdc_detach()
522 mutex_exit(&vdc->read_lock); in vdc_detach()
525 mutex_enter(&vdc->lock); in vdc_detach()
526 if (vdc->state == VDC_STATE_INIT_WAITING) { in vdc_detach()
528 "[%d] write reset - move to resetting state...\n", in vdc_detach()
530 vdc->state = VDC_STATE_RESETTING; in vdc_detach()
531 cv_signal(&vdc->initwait_cv); in vdc_detach()
532 } else if (vdc->state == VDC_STATE_FAILED) { in vdc_detach()
533 vdc->io_pending = B_TRUE; in vdc_detach()
534 cv_signal(&vdc->io_pending_cv); in vdc_detach()
536 mutex_exit(&vdc->lock); in vdc_detach()
539 thread_join(vdc->msg_proc_thr->t_did); in vdc_detach()
540 ASSERT(vdc->state == VDC_STATE_DETACH); in vdc_detach()
542 vdc->instance); in vdc_detach()
545 mutex_enter(&vdc->lock); in vdc_detach()
547 if (vdc->initialized & VDC_DRING) in vdc_detach()
552 if (vdc->eio_thread) { in vdc_detach()
553 eio_tid = vdc->eio_thread->t_did; in vdc_detach()
554 vdc->failfast_interval = 0; in vdc_detach()
555 ASSERT(vdc->num_servers == 0); in vdc_detach()
556 cv_signal(&vdc->eio_cv); in vdc_detach()
561 if (vdc->ownership & VDC_OWNERSHIP_WANTED) { in vdc_detach()
562 ownership_tid = vdc->ownership_thread->t_did; in vdc_detach()
563 vdc->ownership = VDC_OWNERSHIP_NONE; in vdc_detach()
564 cv_signal(&vdc->ownership_cv); in vdc_detach()
569 mutex_exit(&vdc->lock); in vdc_detach()
577 if (vdc->initialized & VDC_MINOR) in vdc_detach()
580 if (vdc->io_stats) { in vdc_detach()
581 kstat_delete(vdc->io_stats); in vdc_detach()
582 vdc->io_stats = NULL; in vdc_detach()
585 if (vdc->err_stats) { in vdc_detach()
586 kstat_delete(vdc->err_stats); in vdc_detach()
587 vdc->err_stats = NULL; in vdc_detach()
590 if (vdc->initialized & VDC_LOCKS) { in vdc_detach()
591 mutex_destroy(&vdc->lock); in vdc_detach()
592 mutex_destroy(&vdc->read_lock); in vdc_detach()
593 mutex_destroy(&vdc->ownership_lock); in vdc_detach()
594 cv_destroy(&vdc->initwait_cv); in vdc_detach()
595 cv_destroy(&vdc->dring_free_cv); in vdc_detach()
596 cv_destroy(&vdc->membind_cv); in vdc_detach()
597 cv_destroy(&vdc->sync_blocked_cv); in vdc_detach()
598 cv_destroy(&vdc->read_cv); in vdc_detach()
599 cv_destroy(&vdc->running_cv); in vdc_detach()
600 cv_destroy(&vdc->io_pending_cv); in vdc_detach()
601 cv_destroy(&vdc->ownership_cv); in vdc_detach()
602 cv_destroy(&vdc->eio_cv); in vdc_detach()
605 if (vdc->minfo) in vdc_detach()
606 kmem_free(vdc->minfo, sizeof (struct dk_minfo)); in vdc_detach()
608 if (vdc->cinfo) in vdc_detach()
609 kmem_free(vdc->cinfo, sizeof (struct dk_cinfo)); in vdc_detach()
611 if (vdc->vtoc) in vdc_detach()
612 kmem_free(vdc->vtoc, sizeof (struct extvtoc)); in vdc_detach()
614 if (vdc->geom) in vdc_detach()
615 kmem_free(vdc->geom, sizeof (struct dk_geom)); in vdc_detach()
617 if (vdc->devid) { in vdc_detach()
619 ddi_devid_free(vdc->devid); in vdc_detach()
622 if (vdc->initialized & VDC_SOFT_STATE) in vdc_detach()
658 vdc->initialized = VDC_SOFT_STATE; in vdc_do_attach()
663 vdc->dip = dip; in vdc_do_attach()
664 vdc->instance = instance; in vdc_do_attach()
665 vdc->vdisk_type = VD_DISK_TYPE_UNK; in vdc_do_attach()
666 vdc->vdisk_label = VD_DISK_LABEL_UNK; in vdc_do_attach()
667 vdc->state = VDC_STATE_INIT; in vdc_do_attach()
668 vdc->lifecycle = VDC_LC_ATTACHING; in vdc_do_attach()
669 vdc->session_id = 0; in vdc_do_attach()
670 vdc->vdisk_bsize = DEV_BSIZE; in vdc_do_attach()
671 vdc->vio_bmask = 0; in vdc_do_attach()
672 vdc->vio_bshift = 0; in vdc_do_attach()
673 vdc->max_xfer_sz = maxphys / vdc->vdisk_bsize; in vdc_do_attach()
683 vdc->operations = VD_OP_MASK_READ; in vdc_do_attach()
685 vdc->vtoc = NULL; in vdc_do_attach()
686 vdc->geom = NULL; in vdc_do_attach()
687 vdc->cinfo = NULL; in vdc_do_attach()
688 vdc->minfo = NULL; in vdc_do_attach()
690 mutex_init(&vdc->lock, NULL, MUTEX_DRIVER, NULL); in vdc_do_attach()
691 cv_init(&vdc->initwait_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
692 cv_init(&vdc->dring_free_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
693 cv_init(&vdc->membind_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
694 cv_init(&vdc->running_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
695 cv_init(&vdc->io_pending_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
697 vdc->io_pending = B_FALSE; in vdc_do_attach()
698 vdc->threads_pending = 0; in vdc_do_attach()
699 vdc->sync_op_blocked = B_FALSE; in vdc_do_attach()
700 cv_init(&vdc->sync_blocked_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
702 mutex_init(&vdc->ownership_lock, NULL, MUTEX_DRIVER, NULL); in vdc_do_attach()
703 cv_init(&vdc->ownership_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
704 cv_init(&vdc->eio_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
707 mutex_init(&vdc->read_lock, NULL, MUTEX_DRIVER, NULL); in vdc_do_attach()
708 cv_init(&vdc->read_cv, NULL, CV_DRIVER, NULL); in vdc_do_attach()
709 vdc->read_state = VDC_READ_IDLE; in vdc_do_attach()
711 vdc->initialized |= VDC_LOCKS; in vdc_do_attach()
732 vdc->vdisk_label = VD_DISK_LABEL_UNK; in vdc_do_attach()
733 vdc->vtoc = kmem_zalloc(sizeof (struct extvtoc), KM_SLEEP); in vdc_do_attach()
734 vdc->geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP); in vdc_do_attach()
735 vdc->minfo = kmem_zalloc(sizeof (struct dk_minfo), KM_SLEEP); in vdc_do_attach()
738 vdc->msg_proc_thr = thread_create(NULL, 0, vdc_process_msg_thread, in vdc_do_attach()
740 if (vdc->msg_proc_thr == NULL) { in vdc_do_attach()
749 if (vdc->num_servers > 1) { in vdc_do_attach()
750 vdc->eio_thread = thread_create(NULL, 0, vdc_eio_thread, vdc, 0, in vdc_do_attach()
751 &p0, TS_RUN, v.v_maxsyspri - 2); in vdc_do_attach()
752 if (vdc->eio_thread == NULL) { in vdc_do_attach()
759 vdc->initialized |= VDC_THREAD; in vdc_do_attach()
766 * the handshake do be done so that we know the type of the disk (slice in vdc_do_attach()
770 mutex_enter(&vdc->lock); in vdc_do_attach()
772 mutex_exit(&vdc->lock); in vdc_do_attach()
790 ASSERT(vdc->lifecycle == VDC_LC_ONLINE || in vdc_do_attach()
791 vdc->lifecycle == VDC_LC_ONLINE_PENDING); in vdc_do_attach()
810 /* nothing to do for this non-device */ in vdc_attach()
828 ldc_attr.instance = vdc->instance; in vdc_do_ldc_init()
832 if ((srvr->state & VDC_LDC_INIT) == 0) { in vdc_do_ldc_init()
833 status = ldc_init(srvr->ldc_id, &ldc_attr, in vdc_do_ldc_init()
834 &srvr->ldc_handle); in vdc_do_ldc_init()
837 vdc->instance, srvr->ldc_id, status); in vdc_do_ldc_init()
840 srvr->state |= VDC_LDC_INIT; in vdc_do_ldc_init()
842 status = ldc_status(srvr->ldc_handle, &ldc_state); in vdc_do_ldc_init()
845 vdc->instance, status); in vdc_do_ldc_init()
848 srvr->ldc_state = ldc_state; in vdc_do_ldc_init()
850 if ((srvr->state & VDC_LDC_CB) == 0) { in vdc_do_ldc_init()
851 status = ldc_reg_callback(srvr->ldc_handle, vdc_handle_cb, in vdc_do_ldc_init()
855 vdc->instance, status); in vdc_do_ldc_init()
858 srvr->state |= VDC_LDC_CB; in vdc_do_ldc_init()
865 if (srvr->ldc_state == LDC_INIT) { in vdc_do_ldc_init()
866 status = ldc_open(srvr->ldc_handle); in vdc_do_ldc_init()
869 vdc->instance, srvr->ldc_id, status); in vdc_do_ldc_init()
872 srvr->state |= VDC_LDC_OPEN; in vdc_do_ldc_init()
890 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_start_ldc_connection()
894 DMSG(vdc, 0, "[%d] Finished bringing up LDC\n", vdc->instance); in vdc_start_ldc_connection()
906 ASSERT(MUTEX_HELD(&vdcp->lock)); in vdc_stop_ldc_connection()
909 vdcp->state); in vdc_stop_ldc_connection()
911 status = ldc_down(vdcp->curr_server->ldc_handle); in vdc_stop_ldc_connection()
914 vdcp->initialized &= ~VDC_HANDSHAKE; in vdc_stop_ldc_connection()
915 DMSG(vdcp, 0, "initialized=%x\n", vdcp->initialized); in vdc_stop_ldc_connection()
923 if (vdc->io_stats != NULL) { in vdc_create_io_kstats()
924 DMSG(vdc, 0, "[%d] I/O kstat already exists\n", vdc->instance); in vdc_create_io_kstats()
928 vdc->io_stats = kstat_create(VDC_DRIVER_NAME, vdc->instance, NULL, in vdc_create_io_kstats()
930 if (vdc->io_stats != NULL) { in vdc_create_io_kstats()
931 vdc->io_stats->ks_lock = &vdc->lock; in vdc_create_io_kstats()
932 kstat_install(vdc->io_stats); in vdc_create_io_kstats()
935 " will not be gathered", vdc->instance); in vdc_create_io_kstats()
946 int instance = vdc->instance; in vdc_create_err_kstats()
948 if (vdc->err_stats != NULL) { in vdc_create_err_kstats()
949 DMSG(vdc, 0, "[%d] ERR kstat already exists\n", vdc->instance); in vdc_create_err_kstats()
958 vdc->err_stats = kstat_create(kstatmodule_err, instance, kstatname, in vdc_create_err_kstats()
961 if (vdc->err_stats == NULL) { in vdc_create_err_kstats()
967 stp = (vd_err_stats_t *)vdc->err_stats->ks_data; in vdc_create_err_kstats()
968 kstat_named_init(&stp->vd_softerrs, "Soft Errors", in vdc_create_err_kstats()
970 kstat_named_init(&stp->vd_transerrs, "Transport Errors", in vdc_create_err_kstats()
972 kstat_named_init(&stp->vd_protoerrs, "Protocol Errors", in vdc_create_err_kstats()
974 kstat_named_init(&stp->vd_vid, "Vendor", in vdc_create_err_kstats()
976 kstat_named_init(&stp->vd_pid, "Product", in vdc_create_err_kstats()
978 kstat_named_init(&stp->vd_capacity, "Size", in vdc_create_err_kstats()
981 vdc->err_stats->ks_update = nulldev; in vdc_create_err_kstats()
983 kstat_install(vdc->err_stats); in vdc_create_err_kstats()
991 if (vdc->err_stats == NULL) in vdc_set_err_kstats()
994 mutex_enter(&vdc->lock); in vdc_set_err_kstats()
996 stp = (vd_err_stats_t *)vdc->err_stats->ks_data; in vdc_set_err_kstats()
999 stp->vd_capacity.value.ui64 = vdc->vdisk_size * vdc->vdisk_bsize; in vdc_set_err_kstats()
1000 (void) strcpy(stp->vd_vid.value.c, "SUN"); in vdc_set_err_kstats()
1001 (void) strcpy(stp->vd_pid.value.c, "VDSK"); in vdc_set_err_kstats()
1003 mutex_exit(&vdc->lock); in vdc_set_err_kstats()
1009 ddi_remove_minor_node(vdc->dip, "h"); in vdc_create_device_nodes_efi()
1010 ddi_remove_minor_node(vdc->dip, "h,raw"); in vdc_create_device_nodes_efi()
1012 if (ddi_create_minor_node(vdc->dip, "wd", S_IFBLK, in vdc_create_device_nodes_efi()
1013 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_efi()
1016 vdc->instance); in vdc_create_device_nodes_efi()
1021 vdc->initialized |= VDC_MINOR; in vdc_create_device_nodes_efi()
1023 if (ddi_create_minor_node(vdc->dip, "wd,raw", S_IFCHR, in vdc_create_device_nodes_efi()
1024 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_efi()
1027 vdc->instance); in vdc_create_device_nodes_efi()
1037 ddi_remove_minor_node(vdc->dip, "wd"); in vdc_create_device_nodes_vtoc()
1038 ddi_remove_minor_node(vdc->dip, "wd,raw"); in vdc_create_device_nodes_vtoc()
1040 if (ddi_create_minor_node(vdc->dip, "h", S_IFBLK, in vdc_create_device_nodes_vtoc()
1041 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_vtoc()
1044 vdc->instance); in vdc_create_device_nodes_vtoc()
1049 vdc->initialized |= VDC_MINOR; in vdc_create_device_nodes_vtoc()
1051 if (ddi_create_minor_node(vdc->dip, "h,raw", S_IFCHR, in vdc_create_device_nodes_vtoc()
1052 VD_MAKE_DEV(vdc->instance, VD_EFI_WD_SLICE), in vdc_create_device_nodes_vtoc()
1055 vdc->instance); in vdc_create_device_nodes_vtoc()
1073 * of 2 is used in keeping with the Solaris convention that slice 2
1077 * vdc - soft state pointer
1080 * 0 - Success
1081 * EIO - Failed to create node
1094 instance = vdc->instance; in vdc_create_device_nodes()
1095 dip = vdc->dip; in vdc_create_device_nodes()
1097 switch (vdc->vdisk_type) { in vdc_create_device_nodes()
1111 * a minor node 'g' for the minor number corresponding to slice in vdc_create_device_nodes()
1112 * VD_EFI_WD_SLICE (slice 7) instead they have a minor node 'wd' in vdc_create_device_nodes()
1118 if (vdc->vdisk_label == VD_DISK_LABEL_EFI) in vdc_create_device_nodes()
1136 vdc->initialized |= VDC_MINOR; in vdc_create_device_nodes()
1171 mutex_enter(&vdc->lock); in vdc_prop_op()
1173 if (vdc->vdisk_label == VD_DISK_LABEL_UNK) { in vdc_prop_op()
1174 mutex_exit(&vdc->lock); in vdc_prop_op()
1178 nblocks = vdc->slice[VDCPART(dev)].nblocks; in vdc_prop_op()
1179 blksize = vdc->vdisk_bsize; in vdc_prop_op()
1180 mutex_exit(&vdc->lock); in vdc_prop_op()
1191 * This function checks if any slice of a given virtual disk is
1195 * vdc - soft state pointer
1198 * B_TRUE - at least one slice is opened.
1199 * B_FALSE - no slice is opened.
1208 if (vdc->open_lyr[i] > 0) in vdc_is_opened()
1214 if (vdc->open[i] != 0) in vdc_is_opened()
1222 vdc_mark_opened(vdc_t *vdc, int slice, int flag, int otyp) in vdc_mark_opened() argument
1228 ASSERT(slice < V_NUMPAR); in vdc_mark_opened()
1229 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_mark_opened()
1231 slicemask = 1 << slice; in vdc_mark_opened()
1234 * If we have a single-slice disk which was unavailable during the in vdc_mark_opened()
1236 * the type is known, we prevent opening any slice other than 0 in vdc_mark_opened()
1239 if (vdc->vdisk_type == VD_DISK_TYPE_SLICE && slice != 0) in vdc_mark_opened()
1242 /* check if slice is already exclusively opened */ in vdc_mark_opened()
1243 if (vdc->open_excl & slicemask) in vdc_mark_opened()
1246 /* if open exclusive, check if slice is already opened */ in vdc_mark_opened()
1248 if (vdc->open_lyr[slice] > 0) in vdc_mark_opened()
1251 if (vdc->open[i] & slicemask) in vdc_mark_opened()
1254 vdc->open_excl |= slicemask; in vdc_mark_opened()
1257 /* mark slice as opened */ in vdc_mark_opened()
1259 vdc->open_lyr[slice]++; in vdc_mark_opened()
1261 vdc->open[otyp] |= slicemask; in vdc_mark_opened()
1268 vdc_mark_closed(vdc_t *vdc, int slice, int flag, int otyp) in vdc_mark_closed() argument
1273 ASSERT(slice < V_NUMPAR); in vdc_mark_closed()
1274 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_mark_closed()
1276 slicemask = 1 << slice; in vdc_mark_closed()
1279 ASSERT(vdc->open_lyr[slice] > 0); in vdc_mark_closed()
1280 vdc->open_lyr[slice]--; in vdc_mark_closed()
1282 vdc->open[otyp] &= ~slicemask; in vdc_mark_closed()
1286 vdc->open_excl &= ~slicemask; in vdc_mark_closed()
1295 int slice, status = 0; in vdc_open() local
1312 slice = VDCPART(*dev); in vdc_open()
1317 !(VD_OP_SUPPORTED(vdc->operations, VD_OP_BWRITE))) { in vdc_open()
1321 mutex_enter(&vdc->lock); in vdc_open()
1323 status = vdc_mark_opened(vdc, slice, flag, otyp); in vdc_open()
1326 mutex_exit(&vdc->lock); in vdc_open()
1332 * handshake to complete because we don't know if the slice in vdc_open()
1335 if (vdc->vdisk_type != VD_DISK_TYPE_UNK && nodelay) { in vdc_open()
1338 if (vdc->validate_pending > 0) { in vdc_open()
1339 mutex_exit(&vdc->lock); in vdc_open()
1346 vdc_mark_closed(vdc, slice, flag, otyp); in vdc_open()
1347 mutex_exit(&vdc->lock); in vdc_open()
1351 vdc->validate_pending++; in vdc_open()
1352 mutex_exit(&vdc->lock); in vdc_open()
1356 mutex_exit(&vdc->lock); in vdc_open()
1360 mutex_enter(&vdc->lock); in vdc_open()
1362 if (vdc->vdisk_type == VD_DISK_TYPE_UNK || in vdc_open()
1363 (vdc->vdisk_type == VD_DISK_TYPE_SLICE && slice != 0) || in vdc_open()
1364 (!nodelay && (vdc->vdisk_label == VD_DISK_LABEL_UNK || in vdc_open()
1365 vdc->slice[slice].nblocks == 0))) { in vdc_open()
1366 vdc_mark_closed(vdc, slice, flag, otyp); in vdc_open()
1370 mutex_exit(&vdc->lock); in vdc_open()
1381 int slice; in vdc_close() local
1397 slice = VDCPART(dev); in vdc_close()
1401 * not a supported IOCTL command or the backing device is read-only in vdc_close()
1412 mutex_enter(&vdc->lock); in vdc_close()
1413 vdc_mark_closed(vdc, slice, flag, otyp); in vdc_close()
1414 mutex_exit(&vdc->lock); in vdc_close()
1452 if ((blkno & vdc->vio_bmask) != 0) { in vdc_dump()
1456 vio_blkno = blkno >> vdc->vio_bshift; in vdc_dump()
1477 /* -------------------------------------------------------------------------- */
1488 * 0: As per strategy(9E), the strategy() function must return 0
1496 int instance = VDCUNIT(buf->b_edev); in vdc_strategy()
1497 int op = (buf->b_flags & B_READ) ? VD_OP_BREAD : VD_OP_BWRITE; in vdc_strategy()
1498 int slice; in vdc_strategy() local
1508 instance, (buf->b_flags & B_READ) ? "Read" : "Write", in vdc_strategy()
1509 buf->b_bcount, buf->b_lblkno, (void *)buf->b_un.b_addr); in vdc_strategy()
1513 if ((long)buf->b_private == VD_SLICE_NONE) { in vdc_strategy()
1515 slice = VD_SLICE_NONE; in vdc_strategy()
1517 slice = VDCPART(buf->b_edev); in vdc_strategy()
1526 if ((buf->b_lblkno & vdc->vio_bmask) != 0) { in vdc_strategy()
1531 vio_blkno = buf->b_lblkno >> vdc->vio_bshift; in vdc_strategy()
1534 (void) vdc_do_op(vdc, op, (caddr_t)buf->b_un.b_addr, in vdc_strategy()
1535 buf->b_bcount, slice, vio_blkno, in vdc_strategy()
1551 * bp - pointer to the indicated buf(9S) struct.
1558 int instance = VDCUNIT(bufp->b_edev); in vdc_min()
1563 if (bufp->b_bcount > (vdc->max_xfer_sz * vdc->vdisk_bsize)) { in vdc_min()
1564 bufp->b_bcount = vdc->max_xfer_sz * vdc->vdisk_bsize; in vdc_min()
1605 /* -------------------------------------------------------------------------- */
1619 * vdc - soft state pointer for this instance of the device driver.
1622 * 0 - Success
1629 int status = -1; in vdc_init_ver_negotiation()
1632 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_ver_negotiation()
1634 DMSG(vdc, 0, "[%d] Entered.\n", vdc->instance); in vdc_init_ver_negotiation()
1640 vdc->session_id = ((uint32_t)gettick() & 0xffffffff); in vdc_init_ver_negotiation()
1641 DMSG(vdc, 0, "[%d] Set SID to 0x%lx\n", vdc->instance, vdc->session_id); in vdc_init_ver_negotiation()
1646 pkt.tag.vio_sid = vdc->session_id; in vdc_init_ver_negotiation()
1653 vdc->instance, status); in vdc_init_ver_negotiation()
1656 "id(%lx) rv(%d) size(%ld)", vdc->instance, in vdc_init_ver_negotiation()
1657 vdc->curr_server->ldc_handle, status, msglen); in vdc_init_ver_negotiation()
1672 * vdcp - soft state pointer for this instance of the device driver.
1675 * 0 - Success
1687 mutex_exit(&vdcp->lock); in vdc_ver_negotiation()
1689 mutex_enter(&vdcp->lock); in vdc_ver_negotiation()
1693 vdcp->instance, status); in vdc_ver_negotiation()
1701 vdcp->instance); in vdc_ver_negotiation()
1715 * vdc - soft state pointer for this instance of the device driver.
1718 * 0 - Success
1728 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_attr_negotiation()
1730 DMSG(vdc, 0, "[%d] entered\n", vdc->instance); in vdc_init_attr_negotiation()
1736 pkt.tag.vio_sid = vdc->session_id; in vdc_init_attr_negotiation()
1738 pkt.max_xfer_sz = vdc->max_xfer_sz; in vdc_init_attr_negotiation()
1739 pkt.vdisk_block_size = vdc->vdisk_bsize; in vdc_init_attr_negotiation()
1751 "id(%lx) rv(%d) size(%ld)", vdc->instance, in vdc_init_attr_negotiation()
1752 vdc->curr_server->ldc_handle, status, msglen); in vdc_init_attr_negotiation()
1767 * vdc - soft state pointer for this instance of the device driver.
1770 * 0 - Success
1782 mutex_exit(&vdcp->lock); in vdc_attr_negotiation()
1784 mutex_enter(&vdcp->lock); in vdc_attr_negotiation()
1788 vdcp->instance, status); in vdc_attr_negotiation()
1796 vdcp->instance); in vdc_attr_negotiation()
1811 * vdc - soft state pointer for this instance of the device driver.
1814 * 0 - Success
1821 int status = -1; in vdc_init_dring_negotiate()
1826 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_dring_negotiate()
1837 vdc->instance, status); in vdc_init_dring_negotiate()
1842 vdc->instance, status); in vdc_init_dring_negotiate()
1848 pkt.tag.vio_sid = vdc->session_id; in vdc_init_dring_negotiate()
1851 pkt.num_descriptors = vdc->dring_len; in vdc_init_dring_negotiate()
1852 pkt.descriptor_size = vdc->dring_entry_size; in vdc_init_dring_negotiate()
1854 pkt.ncookies = vdc->dring_cookie_count; in vdc_init_dring_negotiate()
1855 pkt.cookie[0] = vdc->dring_cookie[0]; /* for now just one cookie */ in vdc_init_dring_negotiate()
1860 vdc->instance, status); in vdc_init_dring_negotiate()
1874 * vdc - soft state pointer for this instance of the device driver.
1877 * 0 - Success
1889 mutex_exit(&vdcp->lock); in vdc_dring_negotiation()
1891 mutex_enter(&vdcp->lock); in vdc_dring_negotiation()
1895 " rv(%d)", vdcp->instance, status); in vdc_dring_negotiation()
1903 vdcp->instance); in vdc_dring_negotiation()
1919 * vdc - soft state pointer for this instance of the device driver.
1922 * 0 - Success
1938 msg.tag.vio_sid = vdcp->session_id; in vdc_send_rdx()
1942 vdcp->instance, status); in vdc_send_rdx()
1955 * vdc - soft state pointer for this instance of the device driver.
1956 * msgp - received msg
1959 * 0 - Success
1967 ASSERT(msgp->tag.vio_msgtype == VIO_TYPE_CTRL); in vdc_handle_rdx()
1968 ASSERT(msgp->tag.vio_subtype == VIO_SUBTYPE_ACK); in vdc_handle_rdx()
1969 ASSERT(msgp->tag.vio_subtype_env == VIO_RDX); in vdc_handle_rdx()
1971 DMSG(vdcp, 1, "[%d] Got an RDX msg", vdcp->instance); in vdc_handle_rdx()
1983 * vdc - soft state pointer for this instance of the device driver.
1986 * 0 - Success
1998 mutex_exit(&vdcp->lock); in vdc_rdx_exchange()
2000 mutex_enter(&vdcp->lock); in vdc_rdx_exchange()
2003 vdcp->instance, status); in vdc_rdx_exchange()
2010 DMSG(vdcp, 0, "[%d] Invalid RDX response\n", vdcp->instance); in vdc_rdx_exchange()
2018 /* -------------------------------------------------------------------------- */
2046 * vdc->curr_server is protected by vdc->lock but to avoid in vdc_recv()
2050 * can change vdc->curr_server. in vdc_recv()
2052 status = ldc_read(vdc->curr_server->ldc_handle, in vdc_recv()
2073 mutex_enter(&vdc->read_lock); in vdc_recv()
2075 while (vdc->read_state != VDC_READ_PENDING) { in vdc_recv()
2078 if (vdc->read_state == VDC_READ_RESET) { in vdc_recv()
2079 mutex_exit(&vdc->read_lock); in vdc_recv()
2083 vdc->read_state = VDC_READ_WAITING; in vdc_recv()
2084 cv_wait(&vdc->read_cv, &vdc->read_lock); in vdc_recv()
2087 vdc->read_state = VDC_READ_IDLE; in vdc_recv()
2088 mutex_exit(&vdc->read_lock); in vdc_recv()
2103 switch (msg->tag.vio_msgtype) { in vdc_decode_tag()
2112 switch (msg->tag.vio_subtype) { in vdc_decode_tag()
2121 switch (msg->tag.vio_subtype_env) { in vdc_decode_tag()
2136 msg->tag.vio_msgtype, msg->tag.vio_subtype, in vdc_decode_tag()
2137 msg->tag.vio_subtype_env, ms, ss, ses); in vdc_decode_tag()
2151 * ldc_handle - LDC handle for the channel this instance of vdc uses
2152 * pkt - address of LDC message to be sent
2153 * msglen - the size of the message being sent. When the function
2157 * 0 - Success.
2158 * EINVAL - pkt or msglen were NULL
2159 * ECONNRESET - The connection was not up.
2160 * EWOULDBLOCK - LDC queue is full
2161 * xxx - other error codes returned by ldc_write
2171 ASSERT(mutex_owned(&vdc->lock)); in vdc_send()
2186 status = ldc_write(vdc->curr_server->ldc_handle, pkt, &size); in vdc_send()
2196 /* if LDC had serious issues --- reset vdc state */ in vdc_send()
2198 /* LDC had serious issues --- reset vdc state */ in vdc_send()
2199 mutex_enter(&vdc->read_lock); in vdc_send()
2200 if ((vdc->read_state == VDC_READ_WAITING) || in vdc_send()
2201 (vdc->read_state == VDC_READ_RESET)) in vdc_send()
2202 cv_signal(&vdc->read_cv); in vdc_send()
2203 vdc->read_state = VDC_READ_RESET; in vdc_send()
2204 mutex_exit(&vdc->read_lock); in vdc_send()
2207 if (vdc->state == VDC_STATE_INIT_WAITING) { in vdc_send()
2208 DMSG(vdc, 0, "[%d] write reset - " in vdc_send()
2209 "vdc is resetting ..\n", vdc->instance); in vdc_send()
2210 vdc->state = VDC_STATE_RESETTING; in vdc_send()
2211 cv_signal(&vdc->initwait_cv); in vdc_send()
2233 * dip - dev info pointer for this instance of the device driver.
2234 * mdpp - the returned MD.
2235 * vd_nodep - the returned device node.
2238 * 0 - Success.
2239 * ENOENT - Expected node or property did not exist.
2240 * ENXIO - Unexpected error communicating with MD framework
2262 * The "cfg-handle" property of a vdc node in an MD contains the MD's in vdc_get_md_node()
2264 * stores the value of the "cfg-handle" MD property as the value of in vdc_get_md_node()
2276 OBP_REG, -1); in vdc_get_md_node()
2356 * vdc - soft state pointer for this instance of the device driver.
2357 * mdp - md pointer
2358 * vd_nodep - device md node.
2361 * 0 - Success.
2362 * ENOENT - Expected node or property did not exist.
2404 vdc->num_servers = 0; in vdc_init_ports()
2410 srvr->vdcp = vdc; in vdc_init_ports()
2411 srvr->svc_state = VDC_SERVICE_OFFLINE; in vdc_init_ports()
2412 srvr->log_state = VDC_SERVICE_NONE; in vdc_init_ports()
2415 if (md_get_prop_val(mdp, vd_port, VDC_MD_ID, &srvr->id) != 0) { in vdc_init_ports()
2424 &srvr->ctimeout) != 0) { in vdc_init_ports()
2425 srvr->ctimeout = 0; in vdc_init_ports()
2450 &srvr->ldc_id) != 0) { in vdc_init_ports()
2468 prev_srvr->next = srvr; in vdc_init_ports()
2470 vdc->server_list = srvr; in vdc_init_ports()
2475 vdc->num_servers++; in vdc_init_ports()
2479 if (vdc->server_list != NULL) { in vdc_init_ports()
2480 vdc->curr_server = vdc->server_list; in vdc_init_ports()
2501 * vdc - soft state pointer for this instance of the device driver.
2504 * 0 - Success.
2505 * EINVAL - Driver is detaching / LDC error
2506 * ECONNREFUSED - Other end is not listening
2514 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_do_ldc_up()
2517 vdc->instance, vdc->curr_server->ldc_id); in vdc_do_ldc_up()
2519 if (vdc->lifecycle == VDC_LC_DETACHING) in vdc_do_ldc_up()
2522 if ((status = ldc_up(vdc->curr_server->ldc_handle)) != 0) { in vdc_do_ldc_up()
2526 vdc->instance, vdc->curr_server->ldc_id, status); in vdc_do_ldc_up()
2531 "channel=%ld, err=%d", vdc->instance, in vdc_do_ldc_up()
2532 vdc->curr_server->ldc_id, status); in vdc_do_ldc_up()
2537 if (ldc_status(vdc->curr_server->ldc_handle, &ldc_state) == 0) { in vdc_do_ldc_up()
2538 vdc->curr_server->ldc_state = ldc_state; in vdc_do_ldc_up()
2541 vdc->instance); in vdc_do_ldc_up()
2542 vdc->seq_num = 1; in vdc_do_ldc_up()
2543 vdc->seq_num_reply = 0; in vdc_do_ldc_up()
2557 * vdc - soft state pointer for this instance of the device driver.
2558 * srvr - vdc per-server info structure
2566 int instance = ddi_get_instance(vdc->dip); in vdc_terminate_ldc()
2568 if (srvr->state & VDC_LDC_OPEN) { in vdc_terminate_ldc()
2570 (void) ldc_close(srvr->ldc_handle); in vdc_terminate_ldc()
2572 if (srvr->state & VDC_LDC_CB) { in vdc_terminate_ldc()
2574 (void) ldc_unreg_callback(srvr->ldc_handle); in vdc_terminate_ldc()
2576 if (srvr->state & VDC_LDC_INIT) { in vdc_terminate_ldc()
2578 (void) ldc_fini(srvr->ldc_handle); in vdc_terminate_ldc()
2579 srvr->ldc_handle = 0; in vdc_terminate_ldc()
2582 srvr->state &= ~(VDC_LDC_INIT | VDC_LDC_CB | VDC_LDC_OPEN); in vdc_terminate_ldc()
2594 * vdc - soft state pointer for this instance of the device driver.
2602 int instance = ddi_get_instance(vdc->dip); in vdc_fini_ports()
2606 ASSERT(mutex_owned(&vdc->lock)); in vdc_fini_ports()
2608 DMSG(vdc, 0, "[%d] initialized=%x\n", instance, vdc->initialized); in vdc_fini_ports()
2610 srvr = vdc->server_list; in vdc_fini_ports()
2618 srvr = srvr->next; in vdc_fini_ports()
2624 vdc->server_list = NULL; in vdc_fini_ports()
2625 vdc->num_servers = 0; in vdc_fini_ports()
2628 /* -------------------------------------------------------------------------- */
2641 * vdc - soft state pointer for this instance of the device driver.
2644 * 0 - Success
2653 DMSG(vdc, 0, "[%d] initialized=%x\n", vdc->instance, vdc->initialized); in vdc_init_descriptor_ring()
2656 ASSERT(mutex_owned(&vdc->lock)); in vdc_init_descriptor_ring()
2661 if ((vdc->initialized & VDC_DRING_INIT) == 0) { in vdc_init_descriptor_ring()
2662 DMSG(vdc, 0, "[%d] ldc_mem_dring_create\n", vdc->instance); in vdc_init_descriptor_ring()
2670 if ((vdc->max_xfer_sz * vdc->vdisk_bsize) < maxphys) { in vdc_init_descriptor_ring()
2672 vdc->instance); in vdc_init_descriptor_ring()
2673 vdc->dring_max_cookies = maxphys / PAGESIZE; in vdc_init_descriptor_ring()
2675 vdc->dring_max_cookies = in vdc_init_descriptor_ring()
2676 (vdc->max_xfer_sz * vdc->vdisk_bsize) / PAGESIZE; in vdc_init_descriptor_ring()
2678 vdc->dring_entry_size = (sizeof (vd_dring_entry_t) + in vdc_init_descriptor_ring()
2680 (vdc->dring_max_cookies - 1))); in vdc_init_descriptor_ring()
2681 vdc->dring_len = VD_DRING_LEN; in vdc_init_descriptor_ring()
2683 status = ldc_mem_dring_create(vdc->dring_len, in vdc_init_descriptor_ring()
2684 vdc->dring_entry_size, &vdc->dring_hdl); in vdc_init_descriptor_ring()
2685 if ((vdc->dring_hdl == 0) || (status != 0)) { in vdc_init_descriptor_ring()
2687 vdc->instance); in vdc_init_descriptor_ring()
2690 vdc->initialized |= VDC_DRING_INIT; in vdc_init_descriptor_ring()
2693 if ((vdc->initialized & VDC_DRING_BOUND) == 0) { in vdc_init_descriptor_ring()
2694 DMSG(vdc, 0, "[%d] ldc_mem_dring_bind\n", vdc->instance); in vdc_init_descriptor_ring()
2695 vdc->dring_cookie = in vdc_init_descriptor_ring()
2698 status = ldc_mem_dring_bind(vdc->curr_server->ldc_handle, in vdc_init_descriptor_ring()
2699 vdc->dring_hdl, in vdc_init_descriptor_ring()
2701 &vdc->dring_cookie[0], in vdc_init_descriptor_ring()
2702 &vdc->dring_cookie_count); in vdc_init_descriptor_ring()
2706 vdc->instance, vdc->dring_hdl, in vdc_init_descriptor_ring()
2707 vdc->curr_server->ldc_handle, status); in vdc_init_descriptor_ring()
2710 ASSERT(vdc->dring_cookie_count == 1); in vdc_init_descriptor_ring()
2711 vdc->initialized |= VDC_DRING_BOUND; in vdc_init_descriptor_ring()
2714 status = ldc_mem_dring_info(vdc->dring_hdl, &vdc->dring_mem_info); in vdc_init_descriptor_ring()
2718 vdc->instance, vdc->dring_hdl); in vdc_init_descriptor_ring()
2722 if ((vdc->initialized & VDC_DRING_LOCAL) == 0) { in vdc_init_descriptor_ring()
2723 DMSG(vdc, 0, "[%d] local dring\n", vdc->instance); in vdc_init_descriptor_ring()
2726 vdc->local_dring = in vdc_init_descriptor_ring()
2727 kmem_zalloc(vdc->dring_len * sizeof (vdc_local_desc_t), in vdc_init_descriptor_ring()
2729 vdc->initialized |= VDC_DRING_LOCAL; in vdc_init_descriptor_ring()
2738 vdc->initialized |= VDC_DRING_ENTRY; in vdc_init_descriptor_ring()
2739 for (i = 0; i < vdc->dring_len; i++) { in vdc_init_descriptor_ring()
2741 dep->hdr.dstate = VIO_DESC_FREE; in vdc_init_descriptor_ring()
2743 status = ldc_mem_alloc_handle(vdc->curr_server->ldc_handle, in vdc_init_descriptor_ring()
2744 &vdc->local_dring[i].desc_mhdl); in vdc_init_descriptor_ring()
2747 " descriptor %d", vdc->instance, i); in vdc_init_descriptor_ring()
2750 vdc->local_dring[i].is_free = B_TRUE; in vdc_init_descriptor_ring()
2751 vdc->local_dring[i].dep = dep; in vdc_init_descriptor_ring()
2755 vdc->dring_curr_idx = VDC_DRING_FIRST_ENTRY; in vdc_init_descriptor_ring()
2767 * vdc - soft state pointer for this instance of the device driver.
2778 int status = -1; in vdc_destroy_descriptor_ring()
2782 ASSERT(mutex_owned(&vdc->lock)); in vdc_destroy_descriptor_ring()
2784 DMSG(vdc, 0, "[%d] Entered\n", vdc->instance); in vdc_destroy_descriptor_ring()
2786 if (vdc->initialized & VDC_DRING_ENTRY) { in vdc_destroy_descriptor_ring()
2788 "[%d] Removing Local DRing entries\n", vdc->instance); in vdc_destroy_descriptor_ring()
2789 for (i = 0; i < vdc->dring_len; i++) { in vdc_destroy_descriptor_ring()
2790 ldep = &vdc->local_dring[i]; in vdc_destroy_descriptor_ring()
2791 mhdl = ldep->desc_mhdl; in vdc_destroy_descriptor_ring()
2806 ldep->desc_mhdl = 0; in vdc_destroy_descriptor_ring()
2816 ldep->desc_mhdl = 0; in vdc_destroy_descriptor_ring()
2818 vdc->initialized &= ~VDC_DRING_ENTRY; in vdc_destroy_descriptor_ring()
2821 if (vdc->initialized & VDC_DRING_LOCAL) { in vdc_destroy_descriptor_ring()
2822 DMSG(vdc, 0, "[%d] Freeing Local DRing\n", vdc->instance); in vdc_destroy_descriptor_ring()
2823 kmem_free(vdc->local_dring, in vdc_destroy_descriptor_ring()
2824 vdc->dring_len * sizeof (vdc_local_desc_t)); in vdc_destroy_descriptor_ring()
2825 vdc->initialized &= ~VDC_DRING_LOCAL; in vdc_destroy_descriptor_ring()
2828 if (vdc->initialized & VDC_DRING_BOUND) { in vdc_destroy_descriptor_ring()
2829 DMSG(vdc, 0, "[%d] Unbinding DRing\n", vdc->instance); in vdc_destroy_descriptor_ring()
2830 status = ldc_mem_dring_unbind(vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2832 vdc->initialized &= ~VDC_DRING_BOUND; in vdc_destroy_descriptor_ring()
2835 vdc->instance, status, vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2837 kmem_free(vdc->dring_cookie, sizeof (ldc_mem_cookie_t)); in vdc_destroy_descriptor_ring()
2840 if (vdc->initialized & VDC_DRING_INIT) { in vdc_destroy_descriptor_ring()
2841 DMSG(vdc, 0, "[%d] Destroying DRing\n", vdc->instance); in vdc_destroy_descriptor_ring()
2842 status = ldc_mem_dring_destroy(vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2844 vdc->dring_hdl = 0; in vdc_destroy_descriptor_ring()
2845 bzero(&vdc->dring_mem_info, sizeof (ldc_mem_info_t)); in vdc_destroy_descriptor_ring()
2846 vdc->initialized &= ~VDC_DRING_INIT; in vdc_destroy_descriptor_ring()
2849 vdc->instance, status, vdc->dring_hdl); in vdc_destroy_descriptor_ring()
2863 * vdcp - soft state pointer for this instance of the device driver.
2864 * idx - descriptor ring index
2876 ldep = &(vdcp->local_dring[idx]); in vdc_map_to_shared_dring()
2879 if (ldep->nbytes > 0) { in vdc_map_to_shared_dring()
2883 vdcp->instance); in vdc_map_to_shared_dring()
2891 dep = ldep->dep; in vdc_map_to_shared_dring()
2894 dep->payload.req_id = VDC_GET_NEXT_REQ_ID(vdcp); in vdc_map_to_shared_dring()
2895 dep->payload.operation = ldep->operation; in vdc_map_to_shared_dring()
2896 dep->payload.addr = ldep->offset; in vdc_map_to_shared_dring()
2897 dep->payload.nbytes = ldep->nbytes; in vdc_map_to_shared_dring()
2898 dep->payload.status = (uint32_t)-1; /* vds will set valid value */ in vdc_map_to_shared_dring()
2899 dep->payload.slice = ldep->slice; in vdc_map_to_shared_dring()
2900 dep->hdr.dstate = VIO_DESC_READY; in vdc_map_to_shared_dring()
2901 dep->hdr.ack = 1; /* request an ACK for every message */ in vdc_map_to_shared_dring()
2916 * vdcp - the soft state pointer
2917 * operation - operation we want vds to perform (VD_OP_XXX)
2918 * addr - address of data buf to be read/written.
2919 * nbytes - number of bytes to read/write
2920 * slice - the disk slice this request is for
2921 * offset - relative disk offset
2922 * bufp - buf of operation
2923 * dir - direction of operation (READ/WRITE/BOTH)
2931 size_t nbytes, int slice, diskaddr_t offset, buf_t *bufp, in vdc_send_request() argument
2937 ASSERT(slice == VD_SLICE_NONE || slice < V_NUMPAR); in vdc_send_request()
2939 mutex_enter(&vdcp->lock); in vdc_send_request()
2947 * calls) for performance reasons - we are already holding vdc->lock in vdc_send_request()
2964 nbytes, slice, offset, bufp, dir, flags); in vdc_send_request()
2969 while (vdcp->state != VDC_STATE_RUNNING) { in vdc_send_request()
2972 if (vdcp->state == VDC_STATE_DETACH) { in vdc_send_request()
2992 if (vdcp->state == VDC_STATE_FAILED) { in vdc_send_request()
2993 vdcp->io_pending = B_TRUE; in vdc_send_request()
2994 cv_signal(&vdcp->io_pending_cv); in vdc_send_request()
2997 cv_wait(&vdcp->running_cv, &vdcp->lock); in vdc_send_request()
3000 if (vdcp->state == VDC_STATE_FAILED) { in vdc_send_request()
3007 nbytes, slice, offset, bufp, dir, flags & ~VDC_OP_RESUBMIT)); in vdc_send_request()
3013 * processing (i.e sent to the vDisk server) - iostat(8) will in vdc_send_request()
3032 mutex_exit(&vdcp->lock); in vdc_send_request()
3048 * vdcp - the soft state pointer
3049 * operation - operation we want vds to perform (VD_OP_XXX)
3050 * addr - address of data buf to be read/written.
3051 * nbytes - number of bytes to read/write
3052 * slice - the disk slice this request is for
3053 * offset - relative disk offset
3054 * bufp - buf of operation
3055 * dir - direction of operation (READ/WRITE/BOTH)
3065 size_t nbytes, int slice, diskaddr_t offset, in vdc_populate_descriptor() argument
3075 ASSERT(MUTEX_HELD(&vdcp->lock)); in vdc_populate_descriptor()
3076 vdcp->threads_pending++; in vdc_populate_descriptor()
3078 DMSG(vdcp, 2, ": dring_curr_idx = %d\n", vdcp->dring_curr_idx); in vdc_populate_descriptor()
3081 /* use D-Ring reserved entry */ in vdc_populate_descriptor()
3083 local_dep = &(vdcp->local_dring[idx]); in vdc_populate_descriptor()
3085 /* Get next available D-Ring entry */ in vdc_populate_descriptor()
3086 idx = vdcp->dring_curr_idx; in vdc_populate_descriptor()
3087 local_dep = &(vdcp->local_dring[idx]); in vdc_populate_descriptor()
3089 if (!local_dep->is_free) { in vdc_populate_descriptor()
3090 DMSG(vdcp, 2, "[%d]: dring full - waiting for space\n", in vdc_populate_descriptor()
3091 vdcp->instance); in vdc_populate_descriptor()
3092 cv_wait(&vdcp->dring_free_cv, &vdcp->lock); in vdc_populate_descriptor()
3093 if (vdcp->state == VDC_STATE_RUNNING || in vdc_populate_descriptor()
3094 vdcp->state == VDC_STATE_HANDLE_PENDING) { in vdc_populate_descriptor()
3097 vdcp->threads_pending--; in vdc_populate_descriptor()
3102 if (next_idx >= vdcp->dring_len) in vdc_populate_descriptor()
3104 vdcp->dring_curr_idx = next_idx; in vdc_populate_descriptor()
3107 ASSERT(local_dep->is_free); in vdc_populate_descriptor()
3109 local_dep->operation = operation; in vdc_populate_descriptor()
3110 local_dep->addr = addr; in vdc_populate_descriptor()
3111 local_dep->nbytes = nbytes; in vdc_populate_descriptor()
3112 local_dep->slice = slice; in vdc_populate_descriptor()
3113 local_dep->offset = offset; in vdc_populate_descriptor()
3114 local_dep->buf = bufp; in vdc_populate_descriptor()
3115 local_dep->dir = dir; in vdc_populate_descriptor()
3116 local_dep->flags = flags; in vdc_populate_descriptor()
3118 local_dep->is_free = B_FALSE; in vdc_populate_descriptor()
3123 DMSG(vdcp, 0, "[%d]: cannot bind memory - error\n", in vdc_populate_descriptor()
3124 vdcp->instance); in vdc_populate_descriptor()
3129 local_dep->is_free = B_TRUE; in vdc_populate_descriptor()
3130 vdcp->threads_pending--; in vdc_populate_descriptor()
3133 DMSG(vdcp, 0, "[%d]: cannot bind memory - waiting ..\n", in vdc_populate_descriptor()
3134 vdcp->instance); in vdc_populate_descriptor()
3136 local_dep->is_free = B_TRUE; in vdc_populate_descriptor()
3137 vdcp->dring_curr_idx = idx; in vdc_populate_descriptor()
3138 cv_wait(&vdcp->membind_cv, &vdcp->lock); in vdc_populate_descriptor()
3139 if (vdcp->state == VDC_STATE_RUNNING || in vdc_populate_descriptor()
3140 vdcp->state == VDC_STATE_HANDLE_PENDING) { in vdc_populate_descriptor()
3143 vdcp->threads_pending--; in vdc_populate_descriptor()
3152 dmsg.dring_ident = vdcp->dring_ident; in vdc_populate_descriptor()
3155 vdcp->seq_num++; in vdc_populate_descriptor()
3157 DTRACE_PROBE2(populate, int, vdcp->instance, in vdc_populate_descriptor()
3160 vdcp->dring_ident, dmsg.start_idx, dmsg.end_idx, dmsg.seq_num); in vdc_populate_descriptor()
3191 vdcp->threads_pending--; in vdc_populate_descriptor()
3220 * vdc - the soft state pointer
3221 * op - operation we want vds to perform (VD_OP_XXX)
3222 * addr - address of data buf to be read/written.
3223 * nbytes - number of bytes to read/write
3224 * slice - the disk slice this request is for
3225 * offset - relative disk offset
3226 * bufp - buf structure associated with the request (can be NULL).
3227 * dir - direction of operation (READ/WRITE/BOTH)
3228 * flags - flags for the request.
3231 * 0 - the request has been succesfully submitted and completed.
3232 * != 0 - the request has failed. In that case, if a buf structure
3237 vdc_do_op(vdc_t *vdc, int op, caddr_t addr, size_t nbytes, int slice, in vdc_do_op() argument
3256 rv = vdc_send_request(vdc, op, addr, nbytes, slice, offset, bufp, in vdc_do_op()
3290 mutex_enter(&vdc->lock); in vdc_do_op()
3299 mutex_exit(&vdc->lock); in vdc_do_op()
3327 * vdcp - the soft state pointer
3328 * operation - operation we want vds to perform (VD_OP_XXX)
3329 * addr - address of data buf to be read/written.
3330 * nbytes - number of bytes to read/write
3331 * slice - the disk slice this request is for
3332 * offset - relative disk offset
3333 * dir - direction of operation (READ/WRITE/BOTH)
3334 * rconflict - check for reservation conflict in case of failure
3349 int slice, diskaddr_t offset, vio_desc_direction_t dir, boolean_t rconflict) in vdc_do_sync_op() argument
3358 mutex_enter(&vdcp->lock); in vdc_do_sync_op()
3359 vdcp->sync_op_cnt++; in vdc_do_sync_op()
3360 while (vdcp->sync_op_blocked && vdcp->state != VDC_STATE_DETACH) { in vdc_do_sync_op()
3363 vdcp->sync_op_cnt--; in vdc_do_sync_op()
3364 mutex_exit(&vdcp->lock); in vdc_do_sync_op()
3367 cv_wait(&vdcp->sync_blocked_cv, &vdcp->lock); in vdc_do_sync_op()
3371 if (vdcp->state == VDC_STATE_DETACH) { in vdc_do_sync_op()
3372 cv_broadcast(&vdcp->sync_blocked_cv); in vdc_do_sync_op()
3373 vdcp->sync_op_cnt--; in vdc_do_sync_op()
3374 mutex_exit(&vdcp->lock); in vdc_do_sync_op()
3379 vdcp->sync_op_blocked = B_TRUE; in vdc_do_sync_op()
3381 mutex_exit(&vdcp->lock); in vdc_do_sync_op()
3386 status = vdc_do_op(vdcp, operation, addr, nbytes, slice, offset, in vdc_do_sync_op()
3389 mutex_enter(&vdcp->lock); in vdc_do_sync_op()
3393 if (vdcp->state == VDC_STATE_DETACH) { in vdc_do_sync_op()
3397 vdcp->sync_op_blocked = B_FALSE; in vdc_do_sync_op()
3398 vdcp->sync_op_cnt--; in vdc_do_sync_op()
3401 cv_signal(&vdcp->sync_blocked_cv); in vdc_do_sync_op()
3403 mutex_exit(&vdcp->lock); in vdc_do_sync_op()
3422 * vdc - soft state pointer for this instance of the device driver.
3423 * buf - if buf is NULL then we drain all responses, otherwise we
3428 * 0 - Success. If we were expecting a response to a particular
3441 mutex_enter(&vdc->lock); in vdc_drain_response()
3446 rv = ldc_read(vdc->curr_server->ldc_handle, (caddr_t)&dmsg, in vdc_drain_response()
3496 if (idx >= vdc->dring_len) { in vdc_drain_response()
3498 vdc->instance, idx); in vdc_drain_response()
3501 ldep = &vdc->local_dring[idx]; in vdc_drain_response()
3502 if (ldep->dep->hdr.dstate != VIO_DESC_DONE) { in vdc_drain_response()
3503 DMSG(vdc, 0, "[%d] Entry @ %d - state !DONE %d\n", in vdc_drain_response()
3504 vdc->instance, idx, ldep->dep->hdr.dstate); in vdc_drain_response()
3508 mbuf = ldep->buf; in vdc_drain_response()
3510 mbuf->b_resid = mbuf->b_bcount - ldep->dep->payload.nbytes; in vdc_drain_response()
3511 bioerror(mbuf, ack ? ldep->dep->payload.status : EIO); in vdc_drain_response()
3520 /* if this is the last descriptor - break out of loop */ in vdc_drain_response()
3521 if ((idx + 1) % vdc->dring_len == vdc->dring_curr_idx) { in vdc_drain_response()
3533 mutex_exit(&vdc->lock); in vdc_drain_response()
3547 * vdc - soft state pointer for this instance of the device driver.
3548 * idx - Index of the Descriptor Ring entry being modified
3551 * 0 - Success
3562 ASSERT(idx < vdc->dring_len); in vdc_depopulate_descriptor()
3563 ldep = &vdc->local_dring[idx]; in vdc_depopulate_descriptor()
3565 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_depopulate_descriptor()
3567 DTRACE_PROBE2(depopulate, int, vdc->instance, vdc_local_desc_t *, ldep); in vdc_depopulate_descriptor()
3570 dep = ldep->dep; in vdc_depopulate_descriptor()
3572 ASSERT((dep->hdr.dstate == VIO_DESC_DONE) || in vdc_depopulate_descriptor()
3573 (dep->payload.status == ECANCELED)); in vdc_depopulate_descriptor()
3577 ldep->is_free = B_TRUE; in vdc_depopulate_descriptor()
3578 status = dep->payload.status; in vdc_depopulate_descriptor()
3579 DMSG(vdc, 2, ": is_free = %d : status = %d\n", ldep->is_free, status); in vdc_depopulate_descriptor()
3586 if (ldep->nbytes == 0) { in vdc_depopulate_descriptor()
3587 cv_signal(&vdc->dring_free_cv); in vdc_depopulate_descriptor()
3593 * data into an aligned buffer before sending it to LDC - we now in vdc_depopulate_descriptor()
3596 if (ldep->align_addr) { in vdc_depopulate_descriptor()
3597 ASSERT(ldep->addr != NULL); in vdc_depopulate_descriptor()
3599 if (dep->payload.nbytes > 0) in vdc_depopulate_descriptor()
3600 bcopy(ldep->align_addr, ldep->addr, in vdc_depopulate_descriptor()
3601 dep->payload.nbytes); in vdc_depopulate_descriptor()
3602 kmem_free(ldep->align_addr, in vdc_depopulate_descriptor()
3603 sizeof (caddr_t) * P2ROUNDUP(ldep->nbytes, 8)); in vdc_depopulate_descriptor()
3604 ldep->align_addr = NULL; in vdc_depopulate_descriptor()
3607 rv = ldc_mem_unbind_handle(ldep->desc_mhdl); in vdc_depopulate_descriptor()
3610 vdc->instance, ldep->desc_mhdl, idx, rv); in vdc_depopulate_descriptor()
3620 cv_signal(&vdc->membind_cv); in vdc_depopulate_descriptor()
3621 cv_signal(&vdc->dring_free_cv); in vdc_depopulate_descriptor()
3633 * vdc - soft state pointer for this instance of the device driver.
3634 * idx - Index of the Descriptor Ring entry being modified
3635 * addr - virtual address being mapped in
3636 * nybtes - number of bytes in 'addr'
3637 * operation - the vDisk operation being performed (VD_OP_xxx)
3640 * 0 - Success
3656 dep = ldep->dep; in vdc_populate_mem_hdl()
3657 mhdl = ldep->desc_mhdl; in vdc_populate_mem_hdl()
3659 switch (ldep->dir) { in vdc_populate_mem_hdl()
3677 * LDC expects any addresses passed in to be 8-byte aligned. We need in vdc_populate_mem_hdl()
3682 vaddr = ldep->addr; in vdc_populate_mem_hdl()
3683 nbytes = ldep->nbytes; in vdc_populate_mem_hdl()
3685 ASSERT(ldep->align_addr == NULL); in vdc_populate_mem_hdl()
3686 ldep->align_addr = in vdc_populate_mem_hdl()
3691 vdcp->instance, (void *)vaddr, (void *)ldep->align_addr, in vdc_populate_mem_hdl()
3692 nbytes, ldep->operation); in vdc_populate_mem_hdl()
3694 bcopy(vaddr, ldep->align_addr, nbytes); in vdc_populate_mem_hdl()
3695 vaddr = ldep->align_addr; in vdc_populate_mem_hdl()
3700 maptype, perm, &dep->payload.cookie[0], &dep->payload.ncookies); in vdc_populate_mem_hdl()
3702 vdcp->instance, dep->payload.ncookies); in vdc_populate_mem_hdl()
3706 vdcp->instance, (void *)mhdl, (void *)vaddr, rv); in vdc_populate_mem_hdl()
3707 if (ldep->align_addr) { in vdc_populate_mem_hdl()
3708 kmem_free(ldep->align_addr, in vdc_populate_mem_hdl()
3710 ldep->align_addr = NULL; in vdc_populate_mem_hdl()
3718 for (i = 1; i < dep->payload.ncookies; i++) { in vdc_populate_mem_hdl()
3719 rv = ldc_mem_nextcookie(mhdl, &dep->payload.cookie[i]); in vdc_populate_mem_hdl()
3724 vdcp->instance, mhdl, i, rv); in vdc_populate_mem_hdl()
3725 if (ldep->align_addr) { in vdc_populate_mem_hdl()
3726 kmem_free(ldep->align_addr, in vdc_populate_mem_hdl()
3727 sizeof (caddr_t) * ldep->nbytes); in vdc_populate_mem_hdl()
3728 ldep->align_addr = NULL; in vdc_populate_mem_hdl()
3748 * event - Type of event (LDC_EVT_xxx) that triggered the callback
3749 * arg - soft state pointer for this instance of the device driver.
3752 * 0 - Success
3760 vdc_t *vdc = srvr->vdcp; in vdc_handle_cb()
3764 DMSG(vdc, 1, "evt=%lx seqID=%ld\n", event, vdc->seq_num); in vdc_handle_cb()
3767 mutex_enter(&vdc->lock); in vdc_handle_cb()
3769 if (vdc->curr_server != srvr) { in vdc_handle_cb()
3771 vdc->instance, event, srvr->id); in vdc_handle_cb()
3772 mutex_exit(&vdc->lock); in vdc_handle_cb()
3785 DMSG(vdc, 0, "[%d] Received LDC_EVT_UP\n", vdc->instance); in vdc_handle_cb()
3788 rv = ldc_status(srvr->ldc_handle, &ldc_state); in vdc_handle_cb()
3791 vdc->instance, rv); in vdc_handle_cb()
3792 mutex_exit(&vdc->lock); in vdc_handle_cb()
3795 if (srvr->ldc_state != LDC_UP && in vdc_handle_cb()
3802 vdc->seq_num = 1; in vdc_handle_cb()
3803 vdc->seq_num_reply = 0; in vdc_handle_cb()
3804 vdc->io_pending = B_TRUE; in vdc_handle_cb()
3805 srvr->ldc_state = ldc_state; in vdc_handle_cb()
3806 cv_signal(&vdc->initwait_cv); in vdc_handle_cb()
3807 cv_signal(&vdc->io_pending_cv); in vdc_handle_cb()
3812 DMSG(vdc, 1, "[%d] Received LDC_EVT_READ\n", vdc->instance); in vdc_handle_cb()
3813 mutex_enter(&vdc->read_lock); in vdc_handle_cb()
3814 cv_signal(&vdc->read_cv); in vdc_handle_cb()
3815 vdc->read_state = VDC_READ_PENDING; in vdc_handle_cb()
3816 mutex_exit(&vdc->read_lock); in vdc_handle_cb()
3817 mutex_exit(&vdc->lock); in vdc_handle_cb()
3819 /* that's all we have to do - no need to handle DOWN/RESET */ in vdc_handle_cb()
3825 DMSG(vdc, 0, "[%d] Received LDC RESET event\n", vdc->instance); in vdc_handle_cb()
3831 mutex_enter(&vdc->read_lock); in vdc_handle_cb()
3832 if ((vdc->read_state == VDC_READ_WAITING) || in vdc_handle_cb()
3833 (vdc->read_state == VDC_READ_RESET)) in vdc_handle_cb()
3834 cv_signal(&vdc->read_cv); in vdc_handle_cb()
3835 vdc->read_state = VDC_READ_RESET; in vdc_handle_cb()
3836 mutex_exit(&vdc->read_lock); in vdc_handle_cb()
3839 if (vdc->state == VDC_STATE_INIT_WAITING) { in vdc_handle_cb()
3840 vdc->state = VDC_STATE_RESETTING; in vdc_handle_cb()
3841 cv_signal(&vdc->initwait_cv); in vdc_handle_cb()
3842 } else if (vdc->state == VDC_STATE_FAILED) { in vdc_handle_cb()
3843 vdc->io_pending = B_TRUE; in vdc_handle_cb()
3844 cv_signal(&vdc->io_pending_cv); in vdc_handle_cb()
3849 mutex_exit(&vdc->lock); in vdc_handle_cb()
3853 vdc->instance, event); in vdc_handle_cb()
3868 * vdcp - soft state pointer for this instance of the device driver.
3871 * 0 - Success
3881 DMSG(vdcp, 1, "[%d] Entered\n", vdcp->instance); in vdc_wait_for_response()
3888 vdcp->instance, status); in vdc_wait_for_response()
3894 vdcp->instance, sizeof (vio_msg_tag_t), nbytes); in vdc_wait_for_response()
3898 DMSG(vdcp, 2, "[%d] (%x/%x/%x)\n", vdcp->instance, in vdc_wait_for_response()
3899 msgp->tag.vio_msgtype, in vdc_wait_for_response()
3900 msgp->tag.vio_subtype, in vdc_wait_for_response()
3901 msgp->tag.vio_subtype_env); in vdc_wait_for_response()
3909 if ((msgp->tag.vio_sid != vdcp->session_id) && in vdc_wait_for_response()
3910 (msgp->tag.vio_subtype_env != VIO_VER_INFO)) { in vdc_wait_for_response()
3913 vdcp->instance, msgp->tag.vio_sid, in vdc_wait_for_response()
3914 vdcp->session_id, in vdc_wait_for_response()
3915 ((vio_dring_msg_t *)msgp)->seq_num, in vdc_wait_for_response()
3916 ((vio_dring_msg_t *)msgp)->start_idx); in vdc_wait_for_response()
3933 * vdcp - soft state pointer for this instance of the device driver.
3936 * 0 - Success
3948 ASSERT(MUTEX_NOT_HELD(&vdcp->lock)); in vdc_resubmit_backup_dring()
3949 ASSERT(vdcp->state == VDC_STATE_HANDLE_PENDING); in vdc_resubmit_backup_dring()
3951 if (vdcp->local_dring_backup == NULL) { in vdc_resubmit_backup_dring()
3957 vdcp->local_dring_backup_len, vdcp->local_dring_backup_tail); in vdc_resubmit_backup_dring()
3963 b_idx = vdcp->local_dring_backup_tail; in vdc_resubmit_backup_dring()
3964 for (count = 0; count < vdcp->local_dring_backup_len; count++) { in vdc_resubmit_backup_dring()
3966 curr_ldep = &(vdcp->local_dring_backup[b_idx]); in vdc_resubmit_backup_dring()
3969 if (!curr_ldep->is_free) { in vdc_resubmit_backup_dring()
3973 rv = vdc_do_op(vdcp, curr_ldep->operation, in vdc_resubmit_backup_dring()
3974 curr_ldep->addr, curr_ldep->nbytes, in vdc_resubmit_backup_dring()
3975 curr_ldep->slice, curr_ldep->offset, in vdc_resubmit_backup_dring()
3976 curr_ldep->buf, curr_ldep->dir, in vdc_resubmit_backup_dring()
3977 (curr_ldep->flags & ~VDC_OP_STATE_RUNNING) | in vdc_resubmit_backup_dring()
3982 vdcp->instance, b_idx); in vdc_resubmit_backup_dring()
3992 curr_ldep->is_free = B_TRUE; in vdc_resubmit_backup_dring()
3997 if (++b_idx >= vdcp->local_dring_backup_len) in vdc_resubmit_backup_dring()
4001 /* all done - now clear up pending dring copy */ in vdc_resubmit_backup_dring()
4002 dring_size = vdcp->local_dring_backup_len * in vdc_resubmit_backup_dring()
4003 sizeof (vdcp->local_dring_backup[0]); in vdc_resubmit_backup_dring()
4005 (void) kmem_free(vdcp->local_dring_backup, dring_size); in vdc_resubmit_backup_dring()
4007 vdcp->local_dring_backup = NULL; in vdc_resubmit_backup_dring()
4024 * vdcp - soft state pointer for this instance of the device driver.
4039 ASSERT(MUTEX_HELD(&vdcp->lock)); in vdc_cancel_backup_dring()
4040 ASSERT(vdcp->state == VDC_STATE_FAILED); in vdc_cancel_backup_dring()
4042 if (vdcp->local_dring_backup == NULL) { in vdc_cancel_backup_dring()
4048 vdcp->local_dring_backup_len, vdcp->local_dring_backup_tail); in vdc_cancel_backup_dring()
4054 b_idx = vdcp->local_dring_backup_tail; in vdc_cancel_backup_dring()
4055 for (count = 0; count < vdcp->local_dring_backup_len; count++) { in vdc_cancel_backup_dring()
4057 ldep = &(vdcp->local_dring_backup[b_idx]); in vdc_cancel_backup_dring()
4060 if (!ldep->is_free) { in vdc_cancel_backup_dring()
4072 bufp = ldep->buf; in vdc_cancel_backup_dring()
4074 bufp->b_resid = bufp->b_bcount; in vdc_cancel_backup_dring()
4075 if (ldep->operation == VD_OP_BREAD || in vdc_cancel_backup_dring()
4076 ldep->operation == VD_OP_BWRITE) { in vdc_cancel_backup_dring()
4086 if (++b_idx >= vdcp->local_dring_backup_len) in vdc_cancel_backup_dring()
4090 /* all done - now clear up pending dring copy */ in vdc_cancel_backup_dring()
4091 dring_size = vdcp->local_dring_backup_len * in vdc_cancel_backup_dring()
4092 sizeof (vdcp->local_dring_backup[0]); in vdc_cancel_backup_dring()
4094 (void) kmem_free(vdcp->local_dring_backup, dring_size); in vdc_cancel_backup_dring()
4096 vdcp->local_dring_backup = NULL; in vdc_cancel_backup_dring()
4112 * arg - argument of the timeout function actually a soft state
4123 mutex_enter(&vdcp->lock); in vdc_connection_timeout()
4125 vdcp->ctimeout_reached = B_TRUE; in vdc_connection_timeout()
4127 mutex_exit(&vdcp->lock); in vdc_connection_timeout()
4140 * vdcp - soft state pointer for this instance of the device driver.
4151 ASSERT(MUTEX_HELD(&vdcp->lock)); in vdc_backup_local_dring()
4152 ASSERT(vdcp->state == VDC_STATE_RESETTING); in vdc_backup_local_dring()
4160 if (vdcp->local_dring_backup != NULL) { in vdc_backup_local_dring()
4162 "(len=%d, tail=%d)\n", vdcp->local_dring_backup_len, in vdc_backup_local_dring()
4163 vdcp->local_dring_backup_tail); in vdc_backup_local_dring()
4175 if (!(vdcp->initialized & VDC_DRING_LOCAL)) in vdc_backup_local_dring()
4179 "tail=%d)\n", vdcp->dring_len, vdcp->dring_curr_idx); in vdc_backup_local_dring()
4181 dring_size = vdcp->dring_len * sizeof (vdcp->local_dring[0]); in vdc_backup_local_dring()
4183 vdcp->local_dring_backup = kmem_alloc(dring_size, KM_SLEEP); in vdc_backup_local_dring()
4184 bcopy(vdcp->local_dring, vdcp->local_dring_backup, dring_size); in vdc_backup_local_dring()
4186 vdcp->local_dring_backup_tail = vdcp->dring_curr_idx; in vdc_backup_local_dring()
4187 vdcp->local_dring_backup_len = vdcp->dring_len; in vdc_backup_local_dring()
4194 b_idx = vdcp->local_dring_backup_tail; in vdc_backup_local_dring()
4195 for (count = 0; count < vdcp->local_dring_backup_len; count++) { in vdc_backup_local_dring()
4197 curr_ldep = &(vdcp->local_dring_backup[b_idx]); in vdc_backup_local_dring()
4199 if (!curr_ldep->is_free && in vdc_backup_local_dring()
4200 (curr_ldep->operation == VD_OP_BREAD || in vdc_backup_local_dring()
4201 curr_ldep->operation == VD_OP_BWRITE)) { in vdc_backup_local_dring()
4206 if (++b_idx >= vdcp->local_dring_backup_len) in vdc_backup_local_dring()
4218 ASSERT(MUTEX_HELD(&vdcp->lock)); in vdc_switch_server()
4221 if (vdcp->num_servers == 1) { in vdc_switch_server()
4226 curr_server = vdcp->curr_server; in vdc_switch_server()
4228 (curr_server->next) ? curr_server->next : vdcp->server_list; in vdc_switch_server()
4232 rv = ldc_down(curr_server->ldc_handle); in vdc_switch_server()
4235 vdcp->instance, curr_server->id); in vdc_switch_server()
4240 vdcp->curr_server = new_server; in vdc_switch_server()
4243 vdcp->instance, vdcp->curr_server->id, vdcp->curr_server->ldc_id); in vdc_switch_server()
4253 ASSERT(mutex_owned(&vdcp->lock)); in vdc_print_svc_status()
4255 svc_state = vdcp->curr_server->svc_state; in vdc_print_svc_status()
4257 if (vdcp->curr_server->log_state == svc_state) in vdc_print_svc_status()
4260 instance = vdcp->instance; in vdc_print_svc_status()
4261 ldc_id = vdcp->curr_server->ldc_id; in vdc_print_svc_status()
4262 port_id = vdcp->curr_server->id; in vdc_print_svc_status()
4295 vdcp->curr_server->log_state = svc_state; in vdc_print_svc_status()
4324 * hshake_cnt - number of handshake attempts
4325 * hattr_cnt - number of attribute negotiation attempts
4328 * B_TRUE - handshake should be retried
4329 * B_FALSE - handshake should not be retried
4337 ASSERT(vdcp->lifecycle != VDC_LC_DETACHING); in vdc_handshake_retry()
4340 vdcp->curr_server->hshake_cnt = hshake_cnt; in vdc_handshake_retry()
4341 vdcp->curr_server->hattr_cnt = hattr_cnt; in vdc_handshake_retry()
4348 vdcp->curr_server->hattr_total = 0; in vdc_handshake_retry()
4350 vdcp->curr_server->hattr_total += hattr_cnt; in vdc_handshake_retry()
4356 if (vdcp->lifecycle == VDC_LC_ONLINE) in vdc_handshake_retry()
4363 if (vdcp->lifecycle == VDC_LC_ATTACHING) { in vdc_handshake_retry()
4365 for (srvr = vdcp->server_list; srvr != NULL; in vdc_handshake_retry()
4366 srvr = srvr->next) { in vdc_handshake_retry()
4367 if (srvr->hshake_cnt == 0) { in vdc_handshake_retry()
4379 ASSERT(vdcp->lifecycle == VDC_LC_ONLINE_PENDING); in vdc_handshake_retry()
4386 for (srvr = vdcp->server_list; srvr != NULL; srvr = srvr->next) { in vdc_handshake_retry()
4387 if (srvr->hattr_cnt == 0) { in vdc_handshake_retry()
4390 hattr_total += srvr->hattr_total; in vdc_handshake_retry()
4397 return (hattr_total < vdcp->hattr_min); in vdc_handshake_retry()
4400 /* -------------------------------------------------------------------------- */
4418 * vdc - soft state pointer for this instance of the device driver.
4436 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4438 ASSERT(vdcp->lifecycle == VDC_LC_ATTACHING); in vdc_process_msg_thread()
4442 #define Q(_s) (vdcp->state == _s) ? #_s : in vdc_process_msg_thread()
4443 DMSG(vdcp, 3, "state = %d (%s)\n", vdcp->state, in vdc_process_msg_thread()
4456 switch (vdcp->state) { in vdc_process_msg_thread()
4478 vdc_timeout : vdcp->curr_server->ctimeout; in vdc_process_msg_thread()
4486 if (vdcp->lifecycle == VDC_LC_DETACHING) { in vdc_process_msg_thread()
4487 vdcp->state = VDC_STATE_DETACH; in vdc_process_msg_thread()
4492 if (vdcp->ctimeout_reached) { in vdc_process_msg_thread()
4495 vdcp->state = VDC_STATE_FAILED; in vdc_process_msg_thread()
4501 * the number of handshake per server or if we have done in vdc_process_msg_thread()
4509 "handshakes", vdcp->instance); in vdc_process_msg_thread()
4510 vdcp->state = VDC_STATE_FAILED; in vdc_process_msg_thread()
4525 vdcp->state = VDC_STATE_INIT_WAITING; in vdc_process_msg_thread()
4527 vdcp->curr_server->svc_state = in vdc_process_msg_thread()
4536 if (vdcp->curr_server->ldc_state == LDC_UP) { in vdc_process_msg_thread()
4537 vdcp->state = VDC_STATE_NEGOTIATE; in vdc_process_msg_thread()
4547 status = cv_timedwait(&vdcp->initwait_cv, &vdcp->lock, in vdc_process_msg_thread()
4549 if (status == -1 && in vdc_process_msg_thread()
4550 vdcp->state == VDC_STATE_INIT_WAITING && in vdc_process_msg_thread()
4551 vdcp->curr_server->ldc_state != LDC_UP) { in vdc_process_msg_thread()
4553 vdcp->curr_server->svc_state = in vdc_process_msg_thread()
4556 vdcp->state = VDC_STATE_INIT; in vdc_process_msg_thread()
4560 if (vdcp->state != VDC_STATE_INIT_WAITING) { in vdc_process_msg_thread()
4563 vdcp->state); in vdc_process_msg_thread()
4599 vdcp->state = VDC_STATE_HANDLE_PENDING; in vdc_process_msg_thread()
4609 vdcp->state = VDC_STATE_RESETTING; in vdc_process_msg_thread()
4610 vdcp->self_reset = B_TRUE; in vdc_process_msg_thread()
4611 vdcp->curr_server->svc_state = VDC_SERVICE_FAILED; in vdc_process_msg_thread()
4615 vdcp->state); in vdc_process_msg_thread()
4621 vdcp->instance); in vdc_process_msg_thread()
4622 vdcp->curr_server->svc_state = VDC_SERVICE_CONNECTED; in vdc_process_msg_thread()
4624 mutex_exit(&vdcp->lock); in vdc_process_msg_thread()
4630 if (vdcp->num_servers > 1 && in vdc_process_msg_thread()
4632 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4633 vdcp->curr_server->svc_state = in vdc_process_msg_thread()
4635 vdcp->state = VDC_STATE_FAULTED; in vdc_process_msg_thread()
4642 vdcp->ctimeout_reached = B_FALSE; in vdc_process_msg_thread()
4652 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4655 vdcp->state = VDC_STATE_RESETTING; in vdc_process_msg_thread()
4656 vdcp->self_reset = B_TRUE; in vdc_process_msg_thread()
4657 vdcp->curr_server->svc_state = in vdc_process_msg_thread()
4661 vdcp->state = VDC_STATE_RUNNING; in vdc_process_msg_thread()
4675 for (srvr = vdcp->server_list; srvr != NULL; in vdc_process_msg_thread()
4676 srvr = srvr->next) { in vdc_process_msg_thread()
4677 svc_state = srvr->svc_state; in vdc_process_msg_thread()
4683 vdcp->state = VDC_STATE_RESETTING; in vdc_process_msg_thread()
4684 vdcp->self_reset = B_TRUE; in vdc_process_msg_thread()
4686 vdcp->state = VDC_STATE_FAILED; in vdc_process_msg_thread()
4703 vdcp->instance); in vdc_process_msg_thread()
4706 if (vdcp->lifecycle == VDC_LC_ATTACHING) { in vdc_process_msg_thread()
4707 vdcp->lifecycle = VDC_LC_ONLINE_PENDING; in vdc_process_msg_thread()
4708 vdcp->hattr_min = vdc_hattr_min_initial; in vdc_process_msg_thread()
4710 vdcp->hattr_min = vdc_hattr_min; in vdc_process_msg_thread()
4720 cv_broadcast(&vdcp->running_cv); in vdc_process_msg_thread()
4724 while (!vdcp->io_pending) in vdc_process_msg_thread()
4725 cv_wait(&vdcp->io_pending_cv, &vdcp->lock); in vdc_process_msg_thread()
4728 * There's a new IO pending. Try to re-establish a in vdc_process_msg_thread()
4733 for (srvr = vdcp->server_list; srvr != NULL; in vdc_process_msg_thread()
4734 srvr = srvr->next) { in vdc_process_msg_thread()
4735 srvr->svc_state = VDC_SERVICE_OFFLINE; in vdc_process_msg_thread()
4736 srvr->hshake_cnt = 0; in vdc_process_msg_thread()
4737 srvr->hattr_cnt = 0; in vdc_process_msg_thread()
4738 srvr->hattr_total = 0; in vdc_process_msg_thread()
4744 vdcp->ctimeout_reached = B_FALSE; in vdc_process_msg_thread()
4746 vdcp->state = VDC_STATE_RESETTING; in vdc_process_msg_thread()
4747 vdcp->self_reset = B_TRUE; in vdc_process_msg_thread()
4753 if (vdcp->lifecycle == VDC_LC_DETACHING) { in vdc_process_msg_thread()
4754 vdcp->state = VDC_STATE_DETACH; in vdc_process_msg_thread()
4758 vdcp->lifecycle = VDC_LC_ONLINE; in vdc_process_msg_thread()
4762 "recovered", vdcp->instance); in vdc_process_msg_thread()
4768 * to come on line. in vdc_process_msg_thread()
4770 cv_broadcast(&vdcp->running_cv); in vdc_process_msg_thread()
4773 if (vdcp->failfast_interval != 0 || in vdc_process_msg_thread()
4774 vdcp->num_servers > 1) in vdc_process_msg_thread()
4775 cv_signal(&vdcp->eio_cv); in vdc_process_msg_thread()
4778 if (vdcp->ownership & VDC_OWNERSHIP_WANTED) in vdc_process_msg_thread()
4779 vdcp->ownership |= VDC_OWNERSHIP_RESET; in vdc_process_msg_thread()
4780 cv_signal(&vdcp->ownership_cv); in vdc_process_msg_thread()
4782 vdcp->curr_server->svc_state = VDC_SERVICE_ONLINE; in vdc_process_msg_thread()
4785 mutex_exit(&vdcp->lock); in vdc_process_msg_thread()
4793 vdcp->instance); in vdc_process_msg_thread()
4797 "returned err=%d\n", vdcp->instance, in vdc_process_msg_thread()
4804 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4807 for (srvr = vdcp->server_list; srvr != NULL; in vdc_process_msg_thread()
4808 srvr = srvr->next) { in vdc_process_msg_thread()
4809 srvr->svc_state = VDC_SERVICE_OFFLINE; in vdc_process_msg_thread()
4810 srvr->log_state = VDC_SERVICE_NONE; in vdc_process_msg_thread()
4811 srvr->hshake_cnt = 0; in vdc_process_msg_thread()
4812 srvr->hattr_cnt = 0; in vdc_process_msg_thread()
4813 srvr->hattr_total = 0; in vdc_process_msg_thread()
4821 vdcp->state = VDC_STATE_RESETTING; in vdc_process_msg_thread()
4822 vdcp->self_reset = B_TRUE; in vdc_process_msg_thread()
4838 "(pending = %d)\n", (int)vdcp->threads_pending); in vdc_process_msg_thread()
4840 if (vdcp->self_reset) { in vdc_process_msg_thread()
4843 vdcp->instance); in vdc_process_msg_thread()
4845 vdcp->self_reset = B_FALSE; in vdc_process_msg_thread()
4852 while (vdcp->threads_pending) { in vdc_process_msg_thread()
4853 cv_broadcast(&vdcp->membind_cv); in vdc_process_msg_thread()
4854 cv_broadcast(&vdcp->dring_free_cv); in vdc_process_msg_thread()
4855 mutex_exit(&vdcp->lock); in vdc_process_msg_thread()
4858 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4861 ASSERT(vdcp->threads_pending == 0); in vdc_process_msg_thread()
4864 ASSERT(vdcp->read_state != VDC_READ_WAITING); in vdc_process_msg_thread()
4866 vdcp->read_state = VDC_READ_IDLE; in vdc_process_msg_thread()
4867 vdcp->io_pending = B_FALSE; in vdc_process_msg_thread()
4877 /* cleanup the old d-ring */ in vdc_process_msg_thread()
4881 vdcp->state = VDC_STATE_INIT; in vdc_process_msg_thread()
4887 vdcp->instance); in vdc_process_msg_thread()
4890 mutex_exit(&vdcp->lock); in vdc_process_msg_thread()
4895 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4901 cv_broadcast(&vdcp->running_cv); in vdc_process_msg_thread()
4903 while (vdcp->sync_op_cnt > 0) { in vdc_process_msg_thread()
4904 cv_broadcast(&vdcp->sync_blocked_cv); in vdc_process_msg_thread()
4905 mutex_exit(&vdcp->lock); in vdc_process_msg_thread()
4908 mutex_enter(&vdcp->lock); in vdc_process_msg_thread()
4911 mutex_exit(&vdcp->lock); in vdc_process_msg_thread()
4914 vdcp->instance); in vdc_process_msg_thread()
4930 * ACK - wake up the waiting thread
4931 * NACK - resend any messages necessary
4938 * vdc - soft state pointer for this instance of the device driver.
4939 * msg - the LDC message sent by vds
4942 * 0 - Success.
4943 * > 0 - error value returned by LDC
4957 ASSERT(msg->tag.vio_msgtype == VIO_TYPE_DATA); in vdc_process_data_msg()
4960 mutex_enter(&vdcp->lock); in vdc_process_data_msg()
4965 idx = start = dring_msg->start_idx; in vdc_process_data_msg()
4966 end = dring_msg->end_idx; in vdc_process_data_msg()
4967 if ((start >= vdcp->dring_len) || in vdc_process_data_msg()
4968 (end >= vdcp->dring_len) || (end < -1)) { in vdc_process_data_msg()
4976 vdcp->instance, start, end); in vdc_process_data_msg()
4977 mutex_exit(&vdcp->lock); in vdc_process_data_msg()
4988 mutex_exit(&vdcp->lock); in vdc_process_data_msg()
4997 DMSG(vdcp, 0, "[%d] invalid seqno\n", vdcp->instance); in vdc_process_data_msg()
4998 mutex_exit(&vdcp->lock); in vdc_process_data_msg()
5002 if (msg->tag.vio_subtype == VIO_SUBTYPE_NACK) { in vdc_process_data_msg()
5010 DMSG(vdcp, 0, "[%d] DATA NACK\n", vdcp->instance); in vdc_process_data_msg()
5011 mutex_exit(&vdcp->lock); in vdc_process_data_msg()
5014 } else if (msg->tag.vio_subtype == VIO_SUBTYPE_INFO) { in vdc_process_data_msg()
5021 mutex_exit(&vdcp->lock); in vdc_process_data_msg()
5028 ldep = &vdcp->local_dring[idx]; in vdc_process_data_msg()
5030 DMSG(vdcp, 1, ": state 0x%x\n", ldep->dep->hdr.dstate); in vdc_process_data_msg()
5032 if (ldep->dep->hdr.dstate == VIO_DESC_DONE) { in vdc_process_data_msg()
5035 status = ldep->dep->payload.status; in vdc_process_data_msg()
5037 bufp = ldep->buf; in vdc_process_data_msg()
5040 bufp->b_resid = bufp->b_bcount - ldep->dep->payload.nbytes; in vdc_process_data_msg()
5049 bufp->b_bcount, ldep->dep->payload.nbytes); in vdc_process_data_msg()
5059 ((vdcp->num_servers > 1 && in vdc_process_data_msg()
5060 (ldep->flags & VDC_OP_ERRCHK_BACKEND)) || in vdc_process_data_msg()
5061 (vdcp->failfast_interval != 0 && in vdc_process_data_msg()
5062 (ldep->flags & VDC_OP_ERRCHK_CONFLICT)))) { in vdc_process_data_msg()
5068 op = ldep->operation; in vdc_process_data_msg()
5072 ldep->dep->payload.nbytes); in vdc_process_data_msg()
5085 mutex_exit(&vdcp->lock); in vdc_process_data_msg()
5101 * vdc - soft state pointer for this instance of the device driver.
5102 * ver_msg - LDC message sent by vDisk server
5105 * 0 - Success
5113 ASSERT(mutex_owned(&vdc->lock)); in vdc_handle_ver_msg()
5115 if (ver_msg->tag.vio_subtype_env != VIO_VER_INFO) { in vdc_handle_ver_msg()
5119 if (ver_msg->dev_class != VDEV_DISK_SERVER) { in vdc_handle_ver_msg()
5123 switch (ver_msg->tag.vio_subtype) { in vdc_handle_ver_msg()
5131 vdc->ver.major = ver_msg->ver_major; in vdc_handle_ver_msg()
5132 vdc->ver.minor = ver_msg->ver_minor; in vdc_handle_ver_msg()
5133 ASSERT(vdc->ver.major > 0); in vdc_handle_ver_msg()
5145 if (ver_msg->ver_major > 0) { in vdc_handle_ver_msg()
5148 ASSERT(vdc->ver.major > 0); in vdc_handle_ver_msg()
5151 ver_msg->tag.vio_subtype = VIO_SUBTYPE_INFO; in vdc_handle_ver_msg()
5152 ver_msg->dev_class = VDEV_DISK; in vdc_handle_ver_msg()
5156 vdc->instance, status); in vdc_handle_ver_msg()
5161 vdc->instance); in vdc_handle_ver_msg()
5189 * vdc - soft state pointer for this instance of the device driver.
5190 * attr_msg - LDC message sent by vDisk server
5193 * 0 - Success
5202 ASSERT(mutex_owned(&vdc->lock)); in vdc_handle_attr_msg()
5204 if (attr_msg->tag.vio_subtype_env != VIO_ATTR_INFO) { in vdc_handle_attr_msg()
5208 switch (attr_msg->tag.vio_subtype) { in vdc_handle_attr_msg()
5213 if (attr_msg->vdisk_size == 0) { in vdc_handle_attr_msg()
5215 vdc->instance); in vdc_handle_attr_msg()
5220 if (attr_msg->max_xfer_sz == 0) { in vdc_handle_attr_msg()
5222 vdc->instance); in vdc_handle_attr_msg()
5227 if (attr_msg->vdisk_size == VD_SIZE_UNKNOWN) { in vdc_handle_attr_msg()
5229 vdc->instance); in vdc_handle_attr_msg()
5230 attr_msg->vdisk_size = 0; in vdc_handle_attr_msg()
5234 if (attr_msg->vdisk_block_size > 0 && in vdc_handle_attr_msg()
5236 attr_msg->vdisk_block_size) != 0) { in vdc_handle_attr_msg()
5238 vdc->instance, attr_msg->vdisk_block_size); in vdc_handle_attr_msg()
5244 old_type = vdc->vdisk_type; in vdc_handle_attr_msg()
5245 vdc_update_size(vdc, attr_msg->vdisk_size, in vdc_handle_attr_msg()
5246 attr_msg->vdisk_block_size, attr_msg->max_xfer_sz); in vdc_handle_attr_msg()
5247 vdc->vdisk_type = attr_msg->vdisk_type; in vdc_handle_attr_msg()
5248 vdc->operations = attr_msg->operations; in vdc_handle_attr_msg()
5249 if (vio_ver_is_supported(vdc->ver, 1, 1)) in vdc_handle_attr_msg()
5250 vdc->vdisk_media = attr_msg->vdisk_media; in vdc_handle_attr_msg()
5252 vdc->vdisk_media = 0; in vdc_handle_attr_msg()
5255 vdc->instance, vdc->max_xfer_sz, attr_msg->max_xfer_sz); in vdc_handle_attr_msg()
5257 vdc->instance, vdc->vdisk_bsize, in vdc_handle_attr_msg()
5258 attr_msg->vdisk_block_size); in vdc_handle_attr_msg()
5260 if ((attr_msg->xfer_mode != VIO_DRING_MODE_V1_0) || in vdc_handle_attr_msg()
5261 (attr_msg->vdisk_size > INT64_MAX) || in vdc_handle_attr_msg()
5262 (attr_msg->operations == 0) || in vdc_handle_attr_msg()
5263 (attr_msg->vdisk_type > VD_DISK_TYPE_DISK)) { in vdc_handle_attr_msg()
5265 vdc->instance); in vdc_handle_attr_msg()
5279 * nodes. If we now find out that this is a single-slice disk in vdc_handle_attr_msg()
5280 * then we need to re-create the appropriate device nodes. in vdc_handle_attr_msg()
5283 (vdc->initialized & VDC_MINOR) && in vdc_handle_attr_msg()
5284 vdc->vdisk_type == VD_DISK_TYPE_SLICE) { in vdc_handle_attr_msg()
5285 ddi_remove_minor_node(vdc->dip, NULL); in vdc_handle_attr_msg()
5286 (void) devfs_clean(ddi_get_parent(vdc->dip), in vdc_handle_attr_msg()
5290 "device nodes", vdc->instance); in vdc_handle_attr_msg()
5327 * vdc - soft state pointer for this instance of the driver.
5328 * dring_msg - LDC message sent by vDisk server
5331 * 0 - Success
5339 ASSERT(mutex_owned(&vdc->lock)); in vdc_handle_dring_reg_msg()
5341 if (dring_msg->tag.vio_subtype_env != VIO_DRING_REG) { in vdc_handle_dring_reg_msg()
5345 switch (dring_msg->tag.vio_subtype) { in vdc_handle_dring_reg_msg()
5348 vdc->dring_ident = dring_msg->dring_ident; in vdc_handle_dring_reg_msg()
5350 vdc->instance, vdc->dring_ident); in vdc_handle_dring_reg_msg()
5359 vdc->instance); in vdc_handle_dring_reg_msg()
5391 * vdc - soft state pointer for this instance of the driver.
5392 * dring_msg - pointer to the LDC message sent by vds
5395 * VDC_SEQ_NUM_TODO - Message needs to be processed
5396 * VDC_SEQ_NUM_SKIP - Message has already been processed
5397 * VDC_SEQ_NUM_INVALID - The seq numbers are so out of sync,
5405 ASSERT(mutex_owned(&vdc->lock)); in vdc_verify_seq_num()
5411 if ((dring_msg->seq_num <= vdc->seq_num_reply) || in vdc_verify_seq_num()
5412 (dring_msg->seq_num > vdc->seq_num)) { in vdc_verify_seq_num()
5415 vdc->instance, dring_msg->seq_num, in vdc_verify_seq_num()
5416 vdc->seq_num_reply, vdc->seq_num, in vdc_verify_seq_num()
5417 vdc->req_id_proc, vdc->req_id); in vdc_verify_seq_num()
5420 vdc->seq_num_reply = dring_msg->seq_num; in vdc_verify_seq_num()
5422 if (vdc->req_id_proc < vdc->req_id) in vdc_verify_seq_num()
5440 * ver_msg - LDC message sent by vDisk server
5443 * B_TRUE - Success
5444 * B_FALSE - Version not supported
5454 (vdc_version[i].major < vdc_version[i-1].major)); in vdc_is_supported_version()
5462 if (ver_msg->ver_major == vdc_version[i].major) { in vdc_is_supported_version()
5463 if (ver_msg->ver_minor > vdc_version[i].minor) { in vdc_is_supported_version()
5466 ver_msg->ver_minor, vdc_version[i].minor); in vdc_is_supported_version()
5467 ver_msg->ver_minor = vdc_version[i].minor; in vdc_is_supported_version()
5479 if (ver_msg->ver_major > vdc_version[i].major) { in vdc_is_supported_version()
5480 ver_msg->ver_major = vdc_version[i].major; in vdc_is_supported_version()
5481 ver_msg->ver_minor = vdc_version[i].minor; in vdc_is_supported_version()
5483 ver_msg->ver_major, ver_msg->ver_minor); in vdc_is_supported_version()
5499 ver_msg->ver_major = 0; in vdc_is_supported_version()
5500 ver_msg->ver_minor = 0; in vdc_is_supported_version()
5504 /* -------------------------------------------------------------------------- */
5526 * arg - a pointer to a vdc_dk_arg_t structure.
5540 dkc = &dk_arg->dkc; in vdc_dkio_flush_cb()
5541 vdc = dk_arg->vdc; in vdc_dkio_flush_cb()
5545 VDCPART(dk_arg->dev), 0, VIO_both_dir, B_TRUE); in vdc_dkio_flush_cb()
5548 vdc->instance, rv, in vdc_dkio_flush_cb()
5549 ddi_model_convert_from(dk_arg->mode & FMODELS)); in vdc_dkio_flush_cb()
5556 if ((dk_arg->mode & FKIOCTL) && in vdc_dkio_flush_cb()
5558 (dkc->dkc_callback != NULL)) { in vdc_dkio_flush_cb()
5559 ASSERT(dkc->dkc_cookie != NULL); in vdc_dkio_flush_cb()
5560 (*dkc->dkc_callback)(dkc->dkc_cookie, rv); in vdc_dkio_flush_cb()
5564 mutex_enter(&vdc->lock); in vdc_dkio_flush_cb()
5565 vdc->dkio_flush_pending--; in vdc_dkio_flush_cb()
5566 ASSERT(vdc->dkio_flush_pending >= 0); in vdc_dkio_flush_cb()
5567 mutex_exit(&vdc->lock); in vdc_dkio_flush_cb()
5581 * vdc - soft state pointer
5582 * arg - a pointer to a dk_map[NDKMAP] or dk_map32[NDKMAP] structure
5583 * flag - ioctl flags
5596 mutex_enter(&vdc->lock); in vdc_dkio_gapart()
5599 mutex_exit(&vdc->lock); in vdc_dkio_gapart()
5603 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) { in vdc_dkio_gapart()
5604 mutex_exit(&vdc->lock); in vdc_dkio_gapart()
5608 vtoc = vdc->vtoc; in vdc_dkio_gapart()
5609 geom = vdc->geom; in vdc_dkio_gapart()
5613 for (i = 0; i < vtoc->v_nparts; i++) { in vdc_dkio_gapart()
5614 data.map32[i].dkl_cylno = vtoc->v_part[i].p_start / in vdc_dkio_gapart()
5615 (geom->dkg_nhead * geom->dkg_nsect); in vdc_dkio_gapart()
5616 data.map32[i].dkl_nblk = vtoc->v_part[i].p_size; in vdc_dkio_gapart()
5622 for (i = 0; i < vtoc->v_nparts; i++) { in vdc_dkio_gapart()
5623 data.map[i].dkl_cylno = vtoc->v_part[i].p_start / in vdc_dkio_gapart()
5624 (geom->dkg_nhead * geom->dkg_nsect); in vdc_dkio_gapart()
5625 data.map[i].dkl_nblk = vtoc->v_part[i].p_size; in vdc_dkio_gapart()
5631 mutex_exit(&vdc->lock); in vdc_dkio_gapart()
5647 * vdc - soft state pointer
5648 * arg - a pointer to a struct partition64 structure
5649 * flag - ioctl flags
5673 if (partno >= gpt->efi_gpt_NumberOfPartitionEntries) { in vdc_dkio_partition()
5681 p64.p_size = gpe[partno].efi_gpe_EndingLBA - p64.p_start + 1; in vdc_dkio_partition()
5701 * dev - device
5702 * arg - a pointer to a dadkio_rwcmd or dadkio_rwcmd32 structure
5703 * flag - ioctl flags
5750 auio.uio_loffset = rwcmd.blkaddr * vdc->vdisk_bsize; in vdc_dioctl_rwcmd()
5760 buf->b_private = (void *)VD_SLICE_NONE; in vdc_dioctl_rwcmd()
5762 status = physio(vdc_strategy, buf, VD_MAKE_DEV(vdc->instance, 0), in vdc_dioctl_rwcmd()
5791 vd_scsi->cdb_len = cdb_len; in vdc_scsi_alloc()
5792 vd_scsi->sense_len = sense_len; in vdc_scsi_alloc()
5793 vd_scsi->datain_len = datain_len; in vdc_scsi_alloc()
5794 vd_scsi->dataout_len = dataout_len; in vdc_scsi_alloc()
5805 * vd_scsi - The SCSI operation buffer.
5806 * log_error - indicate if an error message should be logged.
5813 * 0 - Status is good.
5814 * EACCES - Status reports a reservation conflict.
5815 * ENOTSUP - Status reports a check condition and sense key
5817 * EIO - Any other status.
5828 if (vd_scsi->cmd_status == STATUS_GOOD) in vdc_scsi_status()
5838 ddi_pathname(vdc->dip, path_str), vdc->instance, in vdc_scsi_status()
5845 switch (vd_scsi->cmd_status) { in vdc_scsi_status()
5853 if (vd_scsi->sense_len == 0 || in vdc_scsi_status()
5854 vd_scsi->sense_status != STATUS_GOOD) { in vdc_scsi_status()
5887 if (vdc->failfast_interval != 0 && in vdc_scsi_status()
5888 cdb->scc_cmd != SCMD_PERSISTENT_RESERVE_IN && in vdc_scsi_status()
5889 cdb->scc_cmd != SCMD_PERSISTENT_RESERVE_OUT) { in vdc_scsi_status()
5893 ddi_pathname(vdc->dip, path_str)); in vdc_scsi_status()
5914 vd_scsi->cmd_status); in vdc_scsi_status()
5921 vd_scsi->cmd_status); in vdc_scsi_status()
5995 vd_scsi->options |= VD_SCSI_OPT_NORETRY; in vdc_uscsi_cmd()
5999 vd_scsi->task_attribute = 0; in vdc_uscsi_cmd()
6002 vd_scsi->task_attribute = VD_SCSI_TASK_ACA; in vdc_uscsi_cmd()
6004 vd_scsi->task_attribute = VD_SCSI_TASK_HQUEUE; in vdc_uscsi_cmd()
6006 vd_scsi->task_attribute = VD_SCSI_TASK_ORDERED; in vdc_uscsi_cmd()
6008 vd_scsi->task_attribute = 0; in vdc_uscsi_cmd()
6012 vd_scsi->timeout = uscsi.uscsi_timeout; in vdc_uscsi_cmd()
6014 /* copy-in cdb data */ in vdc_uscsi_cmd()
6024 /* keep a pointer to the data-in buffer */ in vdc_uscsi_cmd()
6027 /* copy-in request data to the data-out buffer */ in vdc_uscsi_cmd()
6045 uscsi.uscsi_status = vd_scsi->cmd_status; in vdc_uscsi_cmd()
6052 uscsi.uscsi_rqstatus = vd_scsi->sense_status; in vdc_uscsi_cmd()
6055 uscsi.uscsi_rqresid = uscsi.uscsi_rqlen - in vdc_uscsi_cmd()
6056 vd_scsi->sense_len; in vdc_uscsi_cmd()
6058 vd_scsi->sense_len, mode) != 0) { in vdc_uscsi_cmd()
6068 uscsi.uscsi_resid = uscsi.uscsi_buflen - in vdc_uscsi_cmd()
6069 vd_scsi->datain_len; in vdc_uscsi_cmd()
6071 vd_scsi->datain_len, mode) != 0) { in vdc_uscsi_cmd()
6076 uscsi.uscsi_resid = uscsi.uscsi_buflen - in vdc_uscsi_cmd()
6077 vd_scsi->dataout_len; in vdc_uscsi_cmd()
6081 /* copy-out result */ in vdc_uscsi_cmd()
6110 * cmd - SCSI PERSISTENT IN command
6111 * len - length of the SCSI input buffer
6112 * vd_scsi_len - return the length of the allocated buffer
6135 cdb->scc_cmd = SCMD_PERSISTENT_RESERVE_IN; in vdc_scsi_alloc_persistent_in()
6136 cdb->cdb_opaque[1] = cmd; in vdc_scsi_alloc_persistent_in()
6139 vd_scsi->timeout = vdc_scsi_timeout; in vdc_scsi_alloc_persistent_in()
6148 * cmd - SCSI PERSISTENT OUT command
6149 * len - length of the SCSI output buffer
6150 * vd_scsi_len - return the length of the allocated buffer
6173 cdb->scc_cmd = SCMD_PERSISTENT_RESERVE_OUT; in vdc_scsi_alloc_persistent_out()
6174 cdb->cdb_opaque[1] = cmd; in vdc_scsi_alloc_persistent_out()
6177 vd_scsi->timeout = vdc_scsi_timeout; in vdc_scsi_alloc_persistent_out()
6226 sizeof (sd_prin_readkeys_t) - sizeof (caddr_t) + in vdc_mhd_inkeys()
6238 listlen = scsi_keys->len / MHIOC_RESV_KEY_SIZE; in vdc_mhd_inkeys()
6241 inkeys32.generation = scsi_keys->generation; in vdc_mhd_inkeys()
6258 inkeys.generation = scsi_keys->generation; in vdc_mhd_inkeys()
6279 rv = ddi_copyout(&scsi_keys->keylist, user_keys, in vdc_mhd_inkeys()
6340 sizeof (sd_prin_readresv_t) - sizeof (caddr_t) + in vdc_mhd_inresv()
6352 listlen = scsi_resv->len / SCSI3_RESV_DESC_LEN; in vdc_mhd_inresv()
6355 inresv32.generation = scsi_resv->generation; in vdc_mhd_inresv()
6372 inresv.generation = scsi_resv->generation; in vdc_mhd_inresv()
6393 resv = (sd_readresv_desc_t *)&scsi_resv->readresv_desc; in vdc_mhd_inresv()
6396 mhd_resv.type = resv->type; in vdc_mhd_inresv()
6397 mhd_resv.scope = resv->scope; in vdc_mhd_inresv()
6399 BE_32(resv->scope_specific_addr); in vdc_mhd_inresv()
6400 bcopy(&resv->resvkey, &mhd_resv.key, in vdc_mhd_inresv()
6446 bcopy(mhd_reg.oldkey.key, scsi_prout->res_key, MHIOC_RESV_KEY_SIZE); in vdc_mhd_register()
6447 bcopy(mhd_reg.newkey.key, scsi_prout->service_key, MHIOC_RESV_KEY_SIZE); in vdc_mhd_register()
6448 scsi_prout->aptpl = (uchar_t)mhd_reg.aptpl; in vdc_mhd_register()
6487 bcopy(mhd_resv.key.key, scsi_prout->res_key, MHIOC_RESV_KEY_SIZE); in vdc_mhd_reserve()
6488 scsi_prout->scope_address = mhd_resv.scope_specific_addr; in vdc_mhd_reserve()
6489 cdb->cdb_opaque[2] = mhd_resv.type; in vdc_mhd_reserve()
6526 vd_scsi->task_attribute = VD_SCSI_TASK_ACA; in vdc_mhd_preemptabort()
6529 bcopy(mhd_preempt.resvdesc.key.key, scsi_prout->res_key, in vdc_mhd_preemptabort()
6531 bcopy(mhd_preempt.victim_key.key, scsi_prout->service_key, in vdc_mhd_preemptabort()
6533 scsi_prout->scope_address = mhd_preempt.resvdesc.scope_specific_addr; in vdc_mhd_preemptabort()
6534 cdb->cdb_opaque[2] = mhd_preempt.resvdesc.type; in vdc_mhd_preemptabort()
6571 bcopy(mhd_regi.newkey.key, scsi_prout->service_key, in vdc_mhd_registerignore()
6573 scsi_prout->aptpl = (uchar_t)mhd_regi.aptpl; in vdc_mhd_registerignore()
6610 cdb->scc_cmd = scmd; in vdc_eio_scsi_cmd()
6612 vd_scsi->timeout = vdc_scsi_timeout; in vdc_eio_scsi_cmd()
6637 * 0 - disk is accessible
6638 * != 0 - disk is inaccessible or unable to check if disk is accessible
6659 if (vdc->failfast_interval == 0) in vdc_eio_scsi_check()
6663 * With SPC-3 compliant devices TEST UNIT READY will succeed on in vdc_eio_scsi_check()
6678 * 0 - disk is accessible
6679 * != 0 - disk is inaccessible or unable to check if disk is accessible
6692 if (VD_OP_SUPPORTED(vdc->operations, VD_OP_SCSICMD)) in vdc_eio_check()
6695 ASSERT(vdc->failfast_interval == 0); in vdc_eio_check()
6706 buffer = kmem_alloc(vdc->vdisk_bsize, KM_SLEEP); in vdc_eio_check()
6708 if (vdc->vdisk_size > 0) { in vdc_eio_check()
6713 blkno = blkno % vdc->vdisk_size; in vdc_eio_check()
6715 vdc->vdisk_bsize, VD_SLICE_NONE, blkno, NULL, in vdc_eio_check()
6722 blkno = vdc->vdisk_size - 1; in vdc_eio_check()
6724 vdc->vdisk_bsize, VD_SLICE_NONE, blkno, NULL, in vdc_eio_check()
6733 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)buffer, vdc->vdisk_bsize, in vdc_eio_check()
6737 kmem_free(buffer, vdc->vdisk_bsize); in vdc_eio_check()
6752 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_eio_queue()
6755 vio->vio_next = vdc->eio_queue; in vdc_eio_queue()
6756 vio->vio_index = index; in vdc_eio_queue()
6757 vio->vio_qtime = ddi_get_lbolt(); in vdc_eio_queue()
6759 vdc->eio_queue = vio; in vdc_eio_queue()
6762 cv_signal(&vdc->eio_cv); in vdc_eio_queue()
6779 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_eio_unqueue()
6782 vio = vdc->eio_queue; in vdc_eio_unqueue()
6789 while (vio != NULL && vio->vio_qtime > deadline) { in vdc_eio_unqueue()
6791 vio = vio->vio_next; in vdc_eio_unqueue()
6801 vdc->eio_queue = NULL; in vdc_eio_unqueue()
6803 vio_tmp->vio_next = NULL; in vdc_eio_unqueue()
6811 vio_tmp = vio->vio_next; in vdc_eio_unqueue()
6814 index = vio->vio_index; in vdc_eio_unqueue()
6815 op = vdc->local_dring[index].operation; in vdc_eio_unqueue()
6816 buf = vdc->local_dring[index].buf; in vdc_eio_unqueue()
6818 ASSERT(buf->b_flags & B_ERROR); in vdc_eio_unqueue()
6852 clock_t starttime, timeout = drv_usectohz(vdc->failfast_interval); in vdc_eio_thread()
6854 mutex_enter(&vdc->lock); in vdc_eio_thread()
6856 while (vdc->failfast_interval != 0 || vdc->num_servers > 1) { in vdc_eio_thread()
6861 if (vdc->eio_queue == NULL || vdc->state != VDC_STATE_RUNNING) { in vdc_eio_thread()
6862 if (vdc->failfast_interval != 0) { in vdc_eio_thread()
6864 drv_usectohz(vdc->failfast_interval); in vdc_eio_thread()
6865 (void) cv_timedwait(&vdc->eio_cv, &vdc->lock, in vdc_eio_thread()
6868 ASSERT(vdc->num_servers > 1); in vdc_eio_thread()
6869 (void) cv_wait(&vdc->eio_cv, &vdc->lock); in vdc_eio_thread()
6872 if (vdc->state != VDC_STATE_RUNNING) in vdc_eio_thread()
6876 mutex_exit(&vdc->lock); in vdc_eio_thread()
6883 mutex_enter(&vdc->lock); in vdc_eio_thread()
6888 if (vdc->failfast_interval == 0 && vdc->num_servers <= 1) in vdc_eio_thread()
6895 if (vdc->state != VDC_STATE_RUNNING || vdc->eio_queue == NULL) in vdc_eio_thread()
6905 } else if (vdc->num_servers > 1) { in vdc_eio_thread()
6912 mutex_enter(&vdc->read_lock); in vdc_eio_thread()
6913 vdc->read_state = VDC_READ_RESET; in vdc_eio_thread()
6914 cv_signal(&vdc->read_cv); in vdc_eio_thread()
6915 mutex_exit(&vdc->read_lock); in vdc_eio_thread()
6931 vdc->eio_thread = NULL; in vdc_eio_thread()
6932 mutex_exit(&vdc->lock); in vdc_eio_thread()
6947 mutex_enter(&vdc->lock); in vdc_failfast()
6948 if (mh_time != 0 && vdc->eio_thread == NULL) { in vdc_failfast()
6949 vdc->eio_thread = thread_create(NULL, 0, in vdc_failfast()
6951 v.v_maxsyspri - 2); in vdc_failfast()
6954 vdc->failfast_interval = ((long)mh_time) * MILLISEC; in vdc_failfast()
6955 cv_signal(&vdc->eio_cv); in vdc_failfast()
6956 mutex_exit(&vdc->lock); in vdc_failfast()
7011 mutex_enter(&vdc->ownership_lock); in vdc_ownership_thread()
7012 mutex_enter(&vdc->lock); in vdc_ownership_thread()
7014 while (vdc->ownership & VDC_OWNERSHIP_WANTED) { in vdc_ownership_thread()
7016 if ((vdc->ownership & VDC_OWNERSHIP_RESET) || in vdc_ownership_thread()
7017 !(vdc->ownership & VDC_OWNERSHIP_GRANTED)) { in vdc_ownership_thread()
7025 vdc->instance); in vdc_ownership_thread()
7027 vdc->ownership &= ~(VDC_OWNERSHIP_RESET | in vdc_ownership_thread()
7030 mutex_exit(&vdc->lock); in vdc_ownership_thread()
7035 mutex_enter(&vdc->lock); in vdc_ownership_thread()
7039 vdc->instance); in vdc_ownership_thread()
7040 vdc->ownership |= VDC_OWNERSHIP_GRANTED; in vdc_ownership_thread()
7043 vdc->instance); in vdc_ownership_thread()
7053 if (vdc->ownership & VDC_OWNERSHIP_GRANTED) in vdc_ownership_thread()
7059 mutex_exit(&vdc->ownership_lock); in vdc_ownership_thread()
7062 (void) cv_wait(&vdc->ownership_cv, &vdc->lock); in vdc_ownership_thread()
7064 (void) cv_reltimedwait(&vdc->ownership_cv, &vdc->lock, in vdc_ownership_thread()
7067 mutex_exit(&vdc->lock); in vdc_ownership_thread()
7069 mutex_enter(&vdc->ownership_lock); in vdc_ownership_thread()
7070 mutex_enter(&vdc->lock); in vdc_ownership_thread()
7073 vdc->ownership_thread = NULL; in vdc_ownership_thread()
7074 mutex_exit(&vdc->lock); in vdc_ownership_thread()
7075 mutex_exit(&vdc->ownership_lock); in vdc_ownership_thread()
7083 ASSERT(MUTEX_HELD(&vdc->ownership_lock)); in vdc_ownership_update()
7085 mutex_enter(&vdc->lock); in vdc_ownership_update()
7086 vdc->ownership = ownership_flags; in vdc_ownership_update()
7087 if ((vdc->ownership & VDC_OWNERSHIP_WANTED) && in vdc_ownership_update()
7088 vdc->ownership_thread == NULL) { in vdc_ownership_update()
7090 vdc->ownership_thread = thread_create(NULL, 0, in vdc_ownership_update()
7092 v.v_maxsyspri - 2); in vdc_ownership_update()
7095 cv_signal(&vdc->ownership_cv); in vdc_ownership_update()
7097 mutex_exit(&vdc->lock); in vdc_ownership_update()
7110 ASSERT(MUTEX_NOT_HELD(&vdc->lock)); in vdc_get_capacity()
7119 *dsk_size = vd_cap->vdisk_size; in vdc_get_capacity()
7120 *blk_size = vd_cap->vdisk_block_size; in vdc_get_capacity()
7130 * Return 0 if the disk capacity is available, or non-zero if it is not.
7143 if (!VD_OP_SUPPORTED(vdc->operations, VD_OP_GET_CAPACITY)) in vdc_check_capacity()
7152 mutex_enter(&vdc->lock); in vdc_check_capacity()
7161 vdc_update_size(vdc, dsk_size, blk_size, vdc->max_xfer_sz); in vdc_check_capacity()
7163 mutex_exit(&vdc->lock); in vdc_check_capacity()
7215 /* mhd(4I) non-shared multihost disks ioctls */
7233 * These particular ioctls are not sent to the server - vdc fakes up
7256 dev = makedevice(ddi_driver_major(vdc->dip), in vd_process_efi_ioctl()
7257 VD_MAKE_DEV(vdc->instance, 0)); in vd_process_efi_ioctl()
7270 * dev - the device number
7271 * cmd - the operation [dkio(4I)] to be processed
7272 * arg - pointer to user provided structure
7274 * mode - bit flag, indicating open settings, 32/64 bit type, etc
7275 * rvalp - pointer to return value for calling process.
7289 int rv = -1; in vd_process_ioctl()
7325 vdc->instance, cmd); in vd_process_ioctl()
7339 len = sizeof (vd_efi_t) - 1 + dk_efi.dki_length; in vd_process_ioctl()
7341 len = iop->nbytes; in vd_process_ioctl()
7362 if (vdc->cinfo == NULL) in vd_process_ioctl()
7364 if (vdc->cinfo->dki_ctype != DKC_SCSI_CCS) in vd_process_ioctl()
7369 if (vdc->cinfo == NULL) in vd_process_ioctl()
7371 if (vdc->cinfo->dki_ctype != DKC_DIRECT) in vd_process_ioctl()
7376 if (vdc->cinfo == NULL) in vd_process_ioctl()
7381 if (vdc->minfo == NULL) in vd_process_ioctl()
7403 mutex_enter(&vdc->ownership_lock); in vd_process_ioctl()
7419 mutex_exit(&vdc->ownership_lock); in vd_process_ioctl()
7425 mutex_enter(&vdc->ownership_lock); in vd_process_ioctl()
7430 mutex_exit(&vdc->ownership_lock); in vd_process_ioctl()
7505 bcopy(vdc->cinfo, &cinfo, sizeof (struct dk_cinfo)); in vd_process_ioctl()
7518 ASSERT(vdc->vdisk_size != 0); in vd_process_ioctl()
7519 ASSERT(vdc->minfo->dki_capacity != 0); in vd_process_ioctl()
7520 rv = ddi_copyout(vdc->minfo, (void *)arg, in vd_process_ioctl()
7563 dkarg->mode = mode; in vd_process_ioctl()
7564 dkarg->dev = dev; in vd_process_ioctl()
7565 bcopy(dkc, &dkarg->dkc, sizeof (*dkc)); in vd_process_ioctl()
7567 mutex_enter(&vdc->lock); in vd_process_ioctl()
7568 vdc->dkio_flush_pending++; in vd_process_ioctl()
7569 dkarg->vdc = vdc; in vd_process_ioctl()
7570 mutex_exit(&vdc->lock); in vd_process_ioctl()
7577 mutex_enter(&vdc->lock); in vd_process_ioctl()
7578 vdc->dkio_flush_pending--; in vd_process_ioctl()
7579 mutex_exit(&vdc->lock); in vd_process_ioctl()
7588 /* catch programming error in vdc - should be a VD_OP_XXX ioctl */ in vd_process_ioctl()
7589 ASSERT(iop->op != 0); in vd_process_ioctl()
7592 if (VD_OP_SUPPORTED(vdc->operations, iop->op) == B_FALSE) { in vd_process_ioctl()
7594 vdc->instance, iop->op); in vd_process_ioctl()
7598 /* LDC requires that the memory being mapped is 8-byte aligned */ in vd_process_ioctl()
7611 ASSERT(iop->convert != NULL); in vd_process_ioctl()
7612 rv = (iop->convert)(vdc, arg, mem_p, mode, VD_COPYIN); in vd_process_ioctl()
7624 rv = vdc_do_sync_op(vdc, iop->op, mem_p, alloc_len, in vd_process_ioctl()
7647 rv = (iop->convert)(vdc, mem_p, arg, mode, VD_COPYOUT); in vd_process_ioctl()
7725 * vdc - the vDisk client
7726 * from - the buffer containing the data to be copied from
7727 * to - the buffer to be copied to
7728 * mode - flags passed to ioctl() call
7729 * dir - the "direction" of the copy - VD_COPYIN or VD_COPYOUT
7732 * 0 - Success
7733 * ENXIO - incorrect buffer passed in.
7734 * EFAULT - ddi_copyout routine encountered an error.
7751 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) in vdc_get_vtoc_convert()
7758 evtoc.timestamp[i] = vdc->vtoc->timestamp[i]; in vdc_get_vtoc_convert()
7786 * vdc - the vDisk client
7787 * from - Buffer with data
7788 * to - Buffer where data is to be copied to
7789 * mode - flags passed to ioctl
7790 * dir - direction of copy (in or out)
7793 * 0 - Success
7794 * ENXIO - Invalid buffer passed in
7795 * EFAULT - ddi_copyin of data failed
7809 if (vdc->vdisk_size > VD_OLDVTOC_LIMIT) in vdc_set_vtoc_convert()
7837 vdc->vtoc->timestamp[i] = evtoc.timestamp[i]; in vdc_set_vtoc_convert()
7863 evtoc.timestamp[i] = vdc->vtoc->timestamp[i]; in vdc_get_extvtoc_convert()
7900 vdc->vtoc->timestamp[i] = evtoc.timestamp[i]; in vdc_set_extvtoc_convert()
7920 * vdc - the vDisk client
7921 * from - Buffer with data
7922 * to - Buffer where data is to be copied to
7923 * mode - flags passed to ioctl
7924 * dir - direction of copy (in or out)
7927 * 0 - Success
7928 * ENXIO - Invalid buffer passed in
7929 * EFAULT - ddi_copyout of data failed
7963 * vdc - the vDisk client
7964 * from - Buffer with data
7965 * to - Buffer where data is to be copied to
7966 * mode - flags passed to ioctl
7967 * dir - direction of copy (in or out)
7970 * 0 - Success
7971 * ENXIO - Invalid buffer passed in
7972 * EFAULT - ddi_copyin of data failed
8025 vd_efi->lba = dk_efi.dki_lba; in vdc_get_efi_convert()
8026 vd_efi->length = dk_efi.dki_length; in vdc_get_efi_convert()
8027 bzero(vd_efi->data, vd_efi->length); in vdc_get_efi_convert()
8090 /* -------------------------------------------------------------------------- */
8104 * vdc - soft state pointer for this instance of the device driver.
8113 ASSERT(vdc->max_xfer_sz != 0); in vdc_create_fake_geometry()
8118 if (vdc->cinfo == NULL) in vdc_create_fake_geometry()
8119 vdc->cinfo = kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP); in vdc_create_fake_geometry()
8121 (void) strcpy(vdc->cinfo->dki_cname, VDC_DRIVER_NAME); in vdc_create_fake_geometry()
8122 (void) strcpy(vdc->cinfo->dki_dname, VDC_DRIVER_NAME); in vdc_create_fake_geometry()
8124 vdc->cinfo->dki_maxtransfer = vdc->max_xfer_sz; in vdc_create_fake_geometry()
8135 switch (vdc->vdisk_media) { in vdc_create_fake_geometry()
8138 vdc->cinfo->dki_ctype = DKC_CDROM; in vdc_create_fake_geometry()
8141 if (VD_OP_SUPPORTED(vdc->operations, VD_OP_SCSICMD)) in vdc_create_fake_geometry()
8142 vdc->cinfo->dki_ctype = DKC_SCSI_CCS; in vdc_create_fake_geometry()
8144 vdc->cinfo->dki_ctype = DKC_DIRECT; in vdc_create_fake_geometry()
8148 vdc->cinfo->dki_ctype = DKC_DIRECT; in vdc_create_fake_geometry()
8151 vdc->cinfo->dki_flags = DKI_FMTVOL; in vdc_create_fake_geometry()
8152 vdc->cinfo->dki_cnum = 0; in vdc_create_fake_geometry()
8153 vdc->cinfo->dki_addr = 0; in vdc_create_fake_geometry()
8154 vdc->cinfo->dki_space = 0; in vdc_create_fake_geometry()
8155 vdc->cinfo->dki_prio = 0; in vdc_create_fake_geometry()
8156 vdc->cinfo->dki_vec = 0; in vdc_create_fake_geometry()
8157 vdc->cinfo->dki_unit = vdc->instance; in vdc_create_fake_geometry()
8158 vdc->cinfo->dki_slave = 0; in vdc_create_fake_geometry()
8161 * actual slice (i.e. minor node) that is used to request the data. in vdc_create_fake_geometry()
8163 vdc->cinfo->dki_partition = 0; in vdc_create_fake_geometry()
8168 if (vdc->minfo == NULL) in vdc_create_fake_geometry()
8169 vdc->minfo = kmem_zalloc(sizeof (struct dk_minfo), KM_SLEEP); in vdc_create_fake_geometry()
8171 if (vio_ver_is_supported(vdc->ver, 1, 1)) { in vdc_create_fake_geometry()
8172 vdc->minfo->dki_media_type = in vdc_create_fake_geometry()
8173 VD_MEDIATYPE2DK_MEDIATYPE(vdc->vdisk_media); in vdc_create_fake_geometry()
8175 vdc->minfo->dki_media_type = DK_FIXED_DISK; in vdc_create_fake_geometry()
8178 vdc->minfo->dki_capacity = vdc->vdisk_size; in vdc_create_fake_geometry()
8179 vdc->minfo->dki_lbsize = vdc->vdisk_bsize; in vdc_create_fake_geometry()
8188 count = (sizeof (struct dk_label)) / (sizeof (short)) - 1; in vdc_lbl2cksum()
8191 while (count--) { in vdc_lbl2cksum()
8203 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_update_size()
8211 (blk_size == vdc->vdisk_bsize && dsk_size == vdc->vdisk_size && in vdc_update_size()
8212 xfr_size == vdc->max_xfer_sz)) in vdc_update_size()
8224 " using max supported by vdc", vdc->instance); in vdc_update_size()
8228 vdc->max_xfer_sz = xfr_size; in vdc_update_size()
8229 vdc->vdisk_bsize = blk_size; in vdc_update_size()
8230 vdc->vdisk_size = dsk_size; in vdc_update_size()
8232 stp = (vd_err_stats_t *)vdc->err_stats->ks_data; in vdc_update_size()
8233 stp->vd_capacity.value.ui64 = dsk_size * blk_size; in vdc_update_size()
8235 vdc->minfo->dki_capacity = dsk_size; in vdc_update_size()
8236 vdc->minfo->dki_lbsize = (uint_t)blk_size; in vdc_update_size()
8241 * same as the vdisk block size which is stored in vdc->vdisk_bsize so we
8259 vdc->vio_bmask = 0; in vdc_update_vio_bsize()
8260 vdc->vio_bshift = 0; in vdc_update_vio_bsize()
8277 vdc->vio_bshift = nshift; in vdc_update_vio_bsize()
8278 vdc->vio_bmask = ratio - 1; in vdc_update_vio_bsize()
8294 * vdc - soft state pointer for this instance of the device driver.
8297 * 0 - success.
8298 * EINVAL - unknown disk label.
8299 * ENOTSUP - geometry not applicable (EFI label).
8300 * EIO - error accessing the disk.
8315 ASSERT(vdc->vtoc != NULL && vdc->geom != NULL); in vdc_validate_geometry()
8316 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_validate_geometry()
8318 mutex_exit(&vdc->lock); in vdc_validate_geometry()
8324 dev = makedevice(ddi_driver_major(vdc->dip), in vdc_validate_geometry()
8325 VD_MAKE_DEV(vdc->instance, 0)); in vdc_validate_geometry()
8340 if (vdc->vdisk_size == 0) { in vdc_validate_geometry()
8341 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8352 vdc->instance, rv); in vdc_validate_geometry()
8353 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8358 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8366 vdc->instance, rv); in vdc_validate_geometry()
8367 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8377 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8396 * Single slice disk does not support read using an absolute disk in vdc_validate_geometry()
8399 if (vdc->vdisk_type == VD_DISK_TYPE_SLICE) { in vdc_validate_geometry()
8400 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8410 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8417 * generated by the disk driver. So the on-disk label check in vdc_validate_geometry()
8420 if (vdc->vdisk_media == VD_MEDIA_CD || in vdc_validate_geometry()
8421 vdc->vdisk_media == VD_MEDIA_DVD) { in vdc_validate_geometry()
8422 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8430 label = kmem_alloc(vdc->vdisk_bsize, KM_SLEEP); in vdc_validate_geometry()
8432 rv = vdc_do_op(vdc, VD_OP_BREAD, (caddr_t)label, vdc->vdisk_bsize, in vdc_validate_geometry()
8435 if (rv != 0 || label->dkl_magic != DKL_MAGIC || in vdc_validate_geometry()
8436 label->dkl_cksum != vdc_lbl2cksum(label)) { in vdc_validate_geometry()
8438 vdc->instance); in vdc_validate_geometry()
8439 kmem_free(label, vdc->vdisk_bsize); in vdc_validate_geometry()
8440 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8445 kmem_free(label, vdc->vdisk_bsize); in vdc_validate_geometry()
8446 mutex_enter(&vdc->lock); in vdc_validate_geometry()
8460 * vdc - soft state pointer for this instance of the device driver.
8472 ASSERT(!MUTEX_HELD(&vdc->lock)); in vdc_validate()
8474 mutex_enter(&vdc->lock); in vdc_validate()
8477 old_label = vdc->vdisk_label; in vdc_validate()
8478 bcopy(vdc->slice, &old_slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_validate()
8484 if (vdc->vdisk_type == VD_DISK_TYPE_DISK && in vdc_validate()
8485 vdc->vdisk_label != old_label) { in vdc_validate()
8487 if (vdc->vdisk_label == VD_DISK_LABEL_EFI) in vdc_validate()
8494 vdc->instance); in vdc_validate()
8498 mutex_exit(&vdc->lock); in vdc_validate()
8508 mutex_enter(&vdc->lock); in vdc_validate_task()
8509 ASSERT(vdc->validate_pending > 0); in vdc_validate_task()
8510 vdc->validate_pending--; in vdc_validate_task()
8511 mutex_exit(&vdc->lock); in vdc_validate_task()
8525 * vdc - soft state pointer for this instance of the device driver.
8528 * 0 - A devid was succesfully registered for the vDisk
8545 * ldc requires size to be 8-byte aligned. in vdc_setup_devid()
8550 bufid_len = bufsize - sizeof (vd_efi_t) - 1; in vdc_setup_devid()
8562 if (vd_devid->length > bufid_len) { in vdc_setup_devid()
8568 bufsize = P2ROUNDUP(VD_DEVID_SIZE(vd_devid->length), in vdc_setup_devid()
8571 bufid_len = bufsize - sizeof (vd_efi_t) - 1; in vdc_setup_devid()
8585 * between a LDom and a non-LDom may not work (for example for a shared in vdc_setup_devid()
8593 DMSG(vdc, 2, ": devid length = %d\n", vd_devid->length); in vdc_setup_devid()
8596 if (ddi_devid_init(vdc->dip, DEVID_ENCAP, vd_devid->length, in vdc_setup_devid()
8597 vd_devid->id, &vdisk_devid) != DDI_SUCCESS) { in vdc_setup_devid()
8598 DMSG(vdc, 1, "[%d] Fail to created devid\n", vdc->instance); in vdc_setup_devid()
8603 DEVID_FORMTYPE((impl_devid_t *)vdisk_devid, vd_devid->type); in vdc_setup_devid()
8609 if (vdc->devid != NULL) { in vdc_setup_devid()
8611 if (ddi_devid_compare(vdisk_devid, vdc->devid) == 0) { in vdc_setup_devid()
8617 vdc->instance); in vdc_setup_devid()
8619 devid_str = ddi_devid_str_encode(vdc->devid, NULL); in vdc_setup_devid()
8622 vdc->instance, in vdc_setup_devid()
8631 vdc->instance, in vdc_setup_devid()
8641 if (ddi_devid_register(vdc->dip, vdisk_devid) != DDI_SUCCESS) { in vdc_setup_devid()
8642 DMSG(vdc, 1, "[%d] Fail to register devid\n", vdc->instance); in vdc_setup_devid()
8647 vdc->devid = vdisk_devid; in vdc_setup_devid()
8657 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_store_label_efi()
8659 vdc->vdisk_label = VD_DISK_LABEL_EFI; in vdc_store_label_efi()
8660 bzero(vdc->vtoc, sizeof (struct extvtoc)); in vdc_store_label_efi()
8661 bzero(vdc->geom, sizeof (struct dk_geom)); in vdc_store_label_efi()
8662 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_store_label_efi()
8664 nparts = gpt->efi_gpt_NumberOfPartitionEntries; in vdc_store_label_efi()
8673 vdc->slice[i].start = gpe[i].efi_gpe_StartingLBA; in vdc_store_label_efi()
8674 vdc->slice[i].nblocks = gpe[i].efi_gpe_EndingLBA - in vdc_store_label_efi()
8678 ASSERT(vdc->vdisk_size != 0); in vdc_store_label_efi()
8679 vdc->slice[VD_EFI_WD_SLICE].start = 0; in vdc_store_label_efi()
8680 vdc->slice[VD_EFI_WD_SLICE].nblocks = vdc->vdisk_size; in vdc_store_label_efi()
8689 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_store_label_vtoc()
8690 ASSERT(vdc->vdisk_bsize == vtoc->v_sectorsz); in vdc_store_label_vtoc()
8692 vdc->vdisk_label = VD_DISK_LABEL_VTOC; in vdc_store_label_vtoc()
8693 bcopy(vtoc, vdc->vtoc, sizeof (struct extvtoc)); in vdc_store_label_vtoc()
8694 bcopy(geom, vdc->geom, sizeof (struct dk_geom)); in vdc_store_label_vtoc()
8695 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_store_label_vtoc()
8697 for (i = 0; i < vtoc->v_nparts; i++) { in vdc_store_label_vtoc()
8698 vdc->slice[i].start = vtoc->v_part[i].p_start; in vdc_store_label_vtoc()
8699 vdc->slice[i].nblocks = vtoc->v_part[i].p_size; in vdc_store_label_vtoc()
8706 ASSERT(MUTEX_HELD(&vdc->lock)); in vdc_store_label_unk()
8708 vdc->vdisk_label = VD_DISK_LABEL_UNK; in vdc_store_label_unk()
8709 bzero(vdc->vtoc, sizeof (struct extvtoc)); in vdc_store_label_unk()
8710 bzero(vdc->geom, sizeof (struct dk_geom)); in vdc_store_label_unk()
8711 bzero(vdc->slice, sizeof (vd_slice_t) * V_NUMPAR); in vdc_store_label_unk()