Lines Matching refs:vd

218 	    (vd->dring + (i)*vd->descriptor_size))
221 #define VD_CLIENT(vd) \ argument
222 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \
223 (((vd)->xfer_mode == VIO_DRING_MODE_V1_0) ? "dring client" : \
224 (((vd)->xfer_mode == 0) ? "null client" : \
228 #define VD_DSKIMG_LABEL_READ(vd, labelp) \ argument
229 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \
233 #define VD_DSKIMG_LABEL_WRITE(vd, labelp) \ argument
234 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \
238 #define VD_DSKIMG(vd) ((vd)->vdisk_type == VD_DISK_TYPE_DISK && \ argument
239 ((vd)->file || (vd)->volume))
242 #define VD_WRITE_INDEX_NEXT(vd, id) \ argument
243 ((((id) + 1) >= vd->dring_len)? 0 : (id) + 1)
424 struct vd *vd; /* vd instance task is for */ member
440 typedef struct vd { struct
520 #define VD_LABEL_VTOC(vd) \ argument
521 ((struct dk_label *)(void *)((vd)->flabel))
523 #define VD_LABEL_EFI_GPT(vd, lba) \ argument
524 ((efi_gpt_t *)(void *)((vd)->flabel + (lba)))
525 #define VD_LABEL_EFI_GPE(vd, lba) \ argument
526 ((efi_gpe_t *)(void *)((vd)->flabel + 2 * (lba)))
667 static int vd_setup_vd(vd_t *vd);
668 static int vd_setup_single_slice_disk(vd_t *vd);
669 static int vd_setup_slice_image(vd_t *vd);
670 static int vd_setup_disk_image(vd_t *vd);
671 static int vd_backend_check_size(vd_t *vd);
672 static boolean_t vd_enabled(vd_t *vd);
674 static int vd_dskimg_validate_geometry(vd_t *vd);
675 static boolean_t vd_dskimg_is_iso_image(vd_t *vd);
676 static void vd_set_exported_operations(vd_t *vd);
677 static void vd_reset_access(vd_t *vd);
678 static int vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg);
686 static boolean_t vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom);
687 static boolean_t vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc);
755 vd_dskimg_io_params(vd_t *vd, int slice, size_t *blkp, size_t *lenp) in vd_dskimg_io_params() argument
761 ASSERT(vd->file || VD_DSKIMG(vd)); in vd_dskimg_io_params()
763 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_io_params()
771 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) { in vd_dskimg_io_params()
774 if (offset >= vd->dskimg_size) { in vd_dskimg_io_params()
777 offset, vd->dskimg_size); in vd_dskimg_io_params()
780 maxlen = vd->dskimg_size - offset; in vd_dskimg_io_params()
790 if (vd->vdisk_label == VD_DISK_LABEL_UNK && in vd_dskimg_io_params()
791 vio_ver_is_supported(vd->version, 1, 1)) { in vd_dskimg_io_params()
792 (void) vd_dskimg_validate_geometry(vd); in vd_dskimg_io_params()
793 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { in vd_dskimg_io_params()
800 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_dskimg_io_params()
801 ASSERT(vd->vtoc.v_sectorsz == DEV_BSIZE); in vd_dskimg_io_params()
803 ASSERT(vd->vdisk_label == VD_DISK_LABEL_EFI); in vd_dskimg_io_params()
806 if (blk >= vd->slices[slice].nblocks) { in vd_dskimg_io_params()
809 blk, vd->slices[slice].nblocks); in vd_dskimg_io_params()
813 offset = (vd->slices[slice].start + blk) * DEV_BSIZE; in vd_dskimg_io_params()
814 maxlen = (vd->slices[slice].nblocks - blk) * DEV_BSIZE; in vd_dskimg_io_params()
833 if ((offset + len) > vd->dskimg_size) { in vd_dskimg_io_params()
835 "dskimg_size (0x%lx)", offset, len, vd->dskimg_size); in vd_dskimg_io_params()
872 vd_dskimg_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t offset, in vd_dskimg_rw() argument
879 ASSERT(vd->file || VD_DSKIMG(vd)); in vd_dskimg_rw()
881 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_rw()
883 if ((status = vd_dskimg_io_params(vd, slice, &offset, &len)) != 0) in vd_dskimg_rw()
886 if (vd->volume) { in vd_dskimg_rw()
893 buf.b_edev = vd->dev[0]; in vd_dskimg_rw()
906 if (ldi_strategy(vd->ldi_handle[0], &buf) != 0) { in vd_dskimg_rw()
923 ASSERT(vd->file); in vd_dskimg_rw()
926 vd->file_vnode, data, len, offset * DEV_BSIZE, UIO_SYSSPACE, FSYNC, in vd_dskimg_rw()
1080 vd_dskimg_set_vtoc(vd_t *vd, struct dk_label *label) in vd_dskimg_set_vtoc() argument
1084 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_set_vtoc()
1086 if (VD_DSKIMG_LABEL_WRITE(vd, label) < 0) { in vd_dskimg_set_vtoc()
1119 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, in vd_dskimg_set_vtoc()
1150 vd_dskimg_get_devid_block(vd_t *vd, size_t *blkp) in vd_dskimg_get_devid_block() argument
1154 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_get_devid_block()
1156 if (vd->vdisk_label == VD_DISK_LABEL_UNK) { in vd_dskimg_get_devid_block()
1164 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { in vd_dskimg_get_devid_block()
1169 if (vd->efi_reserved == -1) { in vd_dskimg_get_devid_block()
1174 *blkp = vd->slices[vd->efi_reserved].start; in vd_dskimg_get_devid_block()
1178 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_dskimg_get_devid_block()
1181 if (vd->dk_geom.dkg_acyl < 2) { in vd_dskimg_get_devid_block()
1183 "(acyl=%u)", vd->dk_geom.dkg_acyl); in vd_dskimg_get_devid_block()
1188 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2; in vd_dskimg_get_devid_block()
1189 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_dskimg_get_devid_block()
1190 head = vd->dk_geom.dkg_nhead - 1; in vd_dskimg_get_devid_block()
1192 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) + in vd_dskimg_get_devid_block()
1193 (head * vd->dk_geom.dkg_nsect) + 1; in vd_dskimg_get_devid_block()
1233 vd_dskimg_read_devid(vd_t *vd, ddi_devid_t *devid) in vd_dskimg_read_devid() argument
1240 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_read_devid()
1242 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) in vd_dskimg_read_devid()
1248 if ((vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk, in vd_dskimg_read_devid()
1309 vd_dskimg_write_devid(vd_t *vd, ddi_devid_t devid) in vd_dskimg_write_devid() argument
1316 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_dskimg_write_devid()
1323 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0) in vd_dskimg_write_devid()
1342 if ((status = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, in vd_dskimg_write_devid()
1375 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len) in vd_do_scsi_rdwr() argument
1383 ASSERT(!vd->file); in vd_do_scsi_rdwr()
1384 ASSERT(!vd->volume); in vd_do_scsi_rdwr()
1385 ASSERT(vd->vdisk_bsize > 0); in vd_do_scsi_rdwr()
1387 max_sectors = vd->max_xfer_sz; in vd_do_scsi_rdwr()
1388 nblk = (len / vd->vdisk_bsize); in vd_do_scsi_rdwr()
1390 if (len % vd->vdisk_bsize != 0) in vd_do_scsi_rdwr()
1411 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) { in vd_do_scsi_rdwr()
1428 ucmd.uscsi_buflen = nsectors * vd->backend_bsize; in vd_do_scsi_rdwr()
1443 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE], in vd_do_scsi_rdwr()
1444 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL), in vd_do_scsi_rdwr()
1473 data += nsectors * vd->vdisk_bsize; in vd_do_scsi_rdwr()
1502 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen) in vd_scsi_rdwr() argument
1512 if (vd->backend_bsize == 0) { in vd_scsi_rdwr()
1517 if (vd_backend_check_size(vd) != 0) in vd_scsi_rdwr()
1528 if (vd->vdisk_bsize == vd->backend_bsize) in vd_scsi_rdwr()
1529 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen)); in vd_scsi_rdwr()
1531 if (vd->vdisk_bsize > vd->backend_bsize) in vd_scsi_rdwr()
1567 pblk = (vblk * vd->vdisk_bsize) / vd->backend_bsize; in vd_scsi_rdwr()
1568 delta = (vblk * vd->vdisk_bsize) - (pblk * vd->backend_bsize); in vd_scsi_rdwr()
1569 pnblk = ((delta + vlen - 1) / vd->backend_bsize) + 1; in vd_scsi_rdwr()
1570 plen = pnblk * vd->backend_bsize; in vd_scsi_rdwr()
1575 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen); in vd_scsi_rdwr()
1602 vd_slice_flabel_read(vd_t *vd, caddr_t data, size_t offset, size_t length) in vd_slice_flabel_read() argument
1605 uint_t limit = vd->flabel_limit * vd->vdisk_bsize; in vd_slice_flabel_read()
1607 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_flabel_read()
1608 ASSERT(vd->flabel != NULL); in vd_slice_flabel_read()
1615 if (offset < vd->flabel_size) { in vd_slice_flabel_read()
1617 if (offset + length <= vd->flabel_size) { in vd_slice_flabel_read()
1618 bcopy(vd->flabel + offset, data, length); in vd_slice_flabel_read()
1622 n = vd->flabel_size - offset; in vd_slice_flabel_read()
1623 bcopy(vd->flabel + offset, data, n); in vd_slice_flabel_read()
1658 vd_slice_flabel_write(vd_t *vd, caddr_t data, size_t offset, size_t length) in vd_slice_flabel_write() argument
1660 uint_t limit = vd->flabel_limit * vd->vdisk_bsize; in vd_slice_flabel_write()
1665 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_flabel_write()
1666 ASSERT(vd->flabel != NULL); in vd_slice_flabel_write()
1676 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && in vd_slice_flabel_write()
1677 offset == 0 && length == vd->vdisk_bsize) { in vd_slice_flabel_write()
1687 if (vd_slice_geom_isvalid(vd, &geom) && in vd_slice_flabel_write()
1688 vd_slice_vtoc_isvalid(vd, &vtoc)) in vd_slice_flabel_write()
1744 vd_slice_fake_rdwr(vd_t *vd, int slice, int operation, caddr_t *datap, in vd_slice_fake_rdwr() argument
1753 size_t bsize = vd->vdisk_bsize; in vd_slice_fake_rdwr()
1755 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_fake_rdwr()
1768 vd->vdisk_label != VD_DISK_LABEL_VTOC) && in vd_slice_fake_rdwr()
1770 vd->vdisk_label != VD_DISK_LABEL_EFI)) { in vd_slice_fake_rdwr()
1779 n = vd_slice_flabel_write(vd, data, blk * bsize, length); in vd_slice_fake_rdwr()
1781 n = vd_slice_flabel_read(vd, data, blk * bsize, length); in vd_slice_fake_rdwr()
1799 if (vd->vdisk_label == VD_DISK_LABEL_VTOC && in vd_slice_fake_rdwr()
1805 if (vd->vdisk_label == VD_DISK_LABEL_EFI) { in vd_slice_fake_rdwr()
1807 ablk = vd->vdisk_size - asize; in vd_slice_fake_rdwr()
1809 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_slice_fake_rdwr()
1810 ASSERT(vd->dk_geom.dkg_apc == 0); in vd_slice_fake_rdwr()
1812 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_slice_fake_rdwr()
1813 ablk = vd->dk_geom.dkg_ncyl * csize; in vd_slice_fake_rdwr()
1814 asize = vd->dk_geom.dkg_acyl * csize; in vd_slice_fake_rdwr()
1854 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_slice_fake_rdwr()
1856 label = VD_LABEL_VTOC(vd); in vd_slice_fake_rdwr()
1877 ASSERT(length == 0 || blk >= vd->flabel_limit); in vd_slice_fake_rdwr()
1884 *blkp = blk - vd->flabel_limit; in vd_slice_fake_rdwr()
1891 vd_flush_write(vd_t *vd) in vd_flush_write() argument
1895 if (vd->file) { in vd_flush_write()
1896 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL); in vd_flush_write()
1898 status = ldi_ioctl(vd->ldi_handle[0], DKIOCFLUSHWRITECACHE, in vd_flush_write()
1899 NULL, vd->open_flags | FKIOCTL, kcred, &rval); in vd_flush_write()
1910 vd_t *vd = task->vd; in vd_bio_task() local
1914 ASSERT(vd->vdisk_bsize == DEV_BSIZE); in vd_bio_task()
1916 if (vd->zvol) { in vd_bio_task()
1918 status = ldi_strategy(vd->ldi_handle[0], buf); in vd_bio_task()
1922 ASSERT(vd->file); in vd_bio_task()
1925 vd->file_vnode, buf->b_un.b_addr, buf->b_bcount, in vd_bio_task()
1971 vd_t *vd = task->vd; in vd_start_bio() local
1980 ASSERT(vd != NULL); in vd_start_bio()
1985 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices); in vd_start_bio()
2003 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) { in vd_start_bio()
2041 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_start_bio()
2045 rv = vd_slice_fake_rdwr(vd, slice, request->operation, in vd_start_bio()
2079 } else if (vd->volume || vd->file) { in vd_start_bio()
2081 rv = vd_dskimg_io_params(vd, slice, &offset, &length); in vd_start_bio()
2101 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, offset, in vd_start_bio()
2118 buf->b_edev = vd->dev[slice]; in vd_start_bio()
2122 if (vd->file || vd->zvol) { in vd_start_bio()
2157 task->write_index = vd->write_index; in vd_start_bio()
2158 vd->write_queue[task->write_index] = buf; in vd_start_bio()
2159 vd->write_index = in vd_start_bio()
2160 VD_WRITE_INDEX_NEXT(vd, vd->write_index); in vd_start_bio()
2165 ASSERT(vd->ioq != NULL); in vd_start_bio()
2168 (void) ddi_taskq_dispatch(task->vd->ioq, vd_bio_task, buf, in vd_start_bio()
2180 buf->b_lblkno = offset << vd->vio_bshift; in vd_start_bio()
2182 request->status = ldi_strategy(vd->ldi_handle[slice], buf); in vd_start_bio()
2249 vd_need_reset(vd_t *vd, boolean_t reset_ldc) in vd_need_reset() argument
2251 mutex_enter(&vd->lock); in vd_need_reset()
2252 vd->reset_state = B_TRUE; in vd_need_reset()
2253 vd->reset_ldc = reset_ldc; in vd_need_reset()
2254 mutex_exit(&vd->lock); in vd_need_reset()
2263 vd_reset_if_needed(vd_t *vd) in vd_reset_if_needed() argument
2267 mutex_enter(&vd->lock); in vd_reset_if_needed()
2268 if (!vd->reset_state) { in vd_reset_if_needed()
2269 ASSERT(!vd->reset_ldc); in vd_reset_if_needed()
2270 mutex_exit(&vd->lock); in vd_reset_if_needed()
2273 mutex_exit(&vd->lock); in vd_reset_if_needed()
2275 PR0("Resetting connection state with %s", VD_CLIENT(vd)); in vd_reset_if_needed()
2282 if (vd->ioq != NULL) in vd_reset_if_needed()
2283 ddi_taskq_wait(vd->ioq); in vd_reset_if_needed()
2284 ddi_taskq_wait(vd->completionq); in vd_reset_if_needed()
2286 status = vd_flush_write(vd); in vd_reset_if_needed()
2291 if ((vd->initialized & VD_DRING) && in vd_reset_if_needed()
2292 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)) in vd_reset_if_needed()
2295 vd_free_dring_task(vd); in vd_reset_if_needed()
2298 if (vd->vio_msgp != NULL) { in vd_reset_if_needed()
2299 kmem_free(vd->vio_msgp, vd->max_msglen); in vd_reset_if_needed()
2300 vd->vio_msgp = NULL; in vd_reset_if_needed()
2304 if (vd->inband_task.msg != NULL) { in vd_reset_if_needed()
2305 kmem_free(vd->inband_task.msg, vd->max_msglen); in vd_reset_if_needed()
2306 vd->inband_task.msg = NULL; in vd_reset_if_needed()
2309 mutex_enter(&vd->lock); in vd_reset_if_needed()
2311 if (vd->reset_ldc) in vd_reset_if_needed()
2313 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0)) in vd_reset_if_needed()
2317 vd_reset_access(vd); in vd_reset_if_needed()
2319 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING); in vd_reset_if_needed()
2320 vd->state = VD_STATE_INIT; in vd_reset_if_needed()
2321 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ in vd_reset_if_needed()
2324 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_reset_if_needed()
2327 (void) ldc_up(vd->ldc_handle); in vd_reset_if_needed()
2329 vd->reset_state = B_FALSE; in vd_reset_if_needed()
2330 vd->reset_ldc = B_FALSE; in vd_reset_if_needed()
2332 mutex_exit(&vd->lock); in vd_reset_if_needed()
2338 vd_mark_in_reset(vd_t *vd) in vd_mark_in_reset() argument
2344 vd_need_reset(vd, B_FALSE); in vd_mark_in_reset()
2345 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP); in vd_mark_in_reset()
2348 vd_need_reset(vd, B_TRUE); in vd_mark_in_reset()
2354 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes) in vd_mark_elem_done() argument
2361 if (vd->reset_state) in vd_mark_elem_done()
2365 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, in vd_mark_elem_done()
2366 vd->dring_handle, idx, idx)) != 0) { in vd_mark_elem_done()
2368 vd_mark_in_reset(vd); in vd_mark_elem_done()
2387 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, in vd_mark_elem_done()
2388 vd->dring_handle, idx, idx)) != 0) { in vd_mark_elem_done()
2390 vd_mark_in_reset(vd); in vd_mark_elem_done()
2415 vd_t *vd = task->vd; in vd_complete_bio() local
2421 ASSERT(vd != NULL); in vd_complete_bio()
2457 if (vd->write_queue[wid] != NULL) { in vd_complete_bio()
2459 vd->write_queue[wid] = NULL; in vd_complete_bio()
2460 wid = VD_WRITE_INDEX_NEXT(vd, wid); in vd_complete_bio()
2469 while (vd->write_queue[wid] != NULL) { in vd_complete_bio()
2470 (void) biowait(vd->write_queue[wid]); in vd_complete_bio()
2471 vd->write_queue[wid] = NULL; in vd_complete_bio()
2472 wid = VD_WRITE_INDEX_NEXT(vd, wid); in vd_complete_bio()
2480 request->status = vd_flush_write(vd); in vd_complete_bio()
2483 (void (*)(void *))vd_flush_write, vd, in vd_complete_bio()
2494 if (!vd->reset_state) in vd_complete_bio()
2500 vd_mark_in_reset(vd); in vd_complete_bio()
2511 vd_mark_in_reset(vd); in vd_complete_bio()
2539 ASSERT(task->vd != NULL); in vd_notify()
2549 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen); in vd_notify()
2554 vd_mark_in_reset(task->vd); in vd_notify()
2558 vd_need_reset(task->vd, B_TRUE); in vd_notify()
2580 vd_t *vd = task->vd; in vd_complete_notify() local
2584 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { in vd_complete_notify()
2585 status = vd_mark_elem_done(vd, task->index, in vd_complete_notify()
2588 vd_mark_in_reset(vd); in vd_complete_notify()
2590 vd_need_reset(vd, B_TRUE); in vd_complete_notify()
2615 if (!vd->reset_state) in vd_complete_notify()
2963 vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom) in vd_slice_geom_isvalid() argument
2965 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_geom_isvalid()
2966 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_slice_geom_isvalid()
2968 if (geom->dkg_ncyl != vd->dk_geom.dkg_ncyl || in vd_slice_geom_isvalid()
2969 geom->dkg_acyl != vd->dk_geom.dkg_acyl || in vd_slice_geom_isvalid()
2970 geom->dkg_nsect != vd->dk_geom.dkg_nsect || in vd_slice_geom_isvalid()
2971 geom->dkg_pcyl != vd->dk_geom.dkg_pcyl) in vd_slice_geom_isvalid()
2983 vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc) in vd_slice_vtoc_isvalid() argument
2988 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_slice_vtoc_isvalid()
2989 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC); in vd_slice_vtoc_isvalid()
2991 if (vtoc->v_sanity != vd->vtoc.v_sanity || in vd_slice_vtoc_isvalid()
2992 vtoc->v_version != vd->vtoc.v_version || in vd_slice_vtoc_isvalid()
2993 vtoc->v_nparts != vd->vtoc.v_nparts || in vd_slice_vtoc_isvalid()
2994 strcmp(vtoc->v_volume, vd->vtoc.v_volume) != 0 || in vd_slice_vtoc_isvalid()
2995 strcmp(vtoc->v_asciilabel, vd->vtoc.v_asciilabel) != 0) in vd_slice_vtoc_isvalid()
3000 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start || in vd_slice_vtoc_isvalid()
3002 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size) in vd_slice_vtoc_isvalid()
3014 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_slice_vtoc_isvalid()
3020 if (vd->vtoc.v_part[0].p_size >= 4 * csize && in vd_slice_vtoc_isvalid()
3021 vtoc->v_part[0].p_size < vd->vtoc.v_part[0].p_size - 4 *csize) in vd_slice_vtoc_isvalid()
3044 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg) in vd_do_slice_ioctl() argument
3051 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_do_slice_ioctl()
3054 return (vd_flush_write(vd)); in vd_do_slice_ioctl()
3056 switch (vd->vdisk_label) { in vd_do_slice_ioctl()
3065 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom)); in vd_do_slice_ioctl()
3070 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc)); in vd_do_slice_ioctl()
3080 if (!vd_slice_geom_isvalid(vd, geom)) in vd_do_slice_ioctl()
3092 if (!vd_slice_vtoc_isvalid(vd, vtoc)) in vd_do_slice_ioctl()
3120 len = vd_slice_flabel_read(vd, in vd_do_slice_ioctl()
3122 lba * vd->vdisk_bsize, len); in vd_do_slice_ioctl()
3143 vds_efi_alloc_and_read(vd_t *vd, efi_gpt_t **gpt, efi_gpe_t **gpe) in vds_efi_alloc_and_read() argument
3148 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); in vds_efi_alloc_and_read()
3156 vds_efi_free(vd_t *vd, efi_gpt_t *gpt, efi_gpe_t *gpe) in vds_efi_free() argument
3160 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl); in vds_efi_free()
3166 vd_dskimg_validate_efi(vd_t *vd) in vd_dskimg_validate_efi() argument
3173 if ((status = vds_efi_alloc_and_read(vd, &gpt, &gpe)) != 0) in vd_dskimg_validate_efi()
3176 bzero(&vd->vtoc, sizeof (struct extvtoc)); in vd_dskimg_validate_efi()
3177 bzero(&vd->dk_geom, sizeof (struct dk_geom)); in vd_dskimg_validate_efi()
3178 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); in vd_dskimg_validate_efi()
3180 vd->efi_reserved = -1; in vd_dskimg_validate_efi()
3191 vd->slices[i].start = gpe[i].efi_gpe_StartingLBA; in vd_dskimg_validate_efi()
3192 vd->slices[i].nblocks = gpe[i].efi_gpe_EndingLBA - in vd_dskimg_validate_efi()
3197 vd->efi_reserved = i; in vd_dskimg_validate_efi()
3201 ASSERT(vd->vdisk_size != 0); in vd_dskimg_validate_efi()
3202 vd->slices[VD_EFI_WD_SLICE].start = 0; in vd_dskimg_validate_efi()
3203 vd->slices[VD_EFI_WD_SLICE].nblocks = vd->vdisk_size; in vd_dskimg_validate_efi()
3205 vds_efi_free(vd, gpt, gpe); in vd_dskimg_validate_efi()
3233 vd_dskimg_validate_geometry(vd_t *vd) in vd_dskimg_validate_geometry() argument
3236 struct dk_geom *geom = &vd->dk_geom; in vd_dskimg_validate_geometry()
3237 struct extvtoc *vtoc = &vd->vtoc; in vd_dskimg_validate_geometry()
3241 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_validate_geometry()
3243 if (VD_DSKIMG_LABEL_READ(vd, &label) < 0) in vd_dskimg_validate_geometry()
3252 if (vd_dskimg_validate_efi(vd) == 0) { in vd_dskimg_validate_geometry()
3253 vd->vdisk_label = VD_DISK_LABEL_EFI; in vd_dskimg_validate_geometry()
3257 vd->vdisk_label = VD_DISK_LABEL_UNK; in vd_dskimg_validate_geometry()
3258 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize, in vd_dskimg_validate_geometry()
3262 vd->vdisk_label = VD_DISK_LABEL_VTOC; in vd_dskimg_validate_geometry()
3269 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART); in vd_dskimg_validate_geometry()
3270 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { in vd_dskimg_validate_geometry()
3272 vd->slices[i].start = vtoc->v_part[i].p_start; in vd_dskimg_validate_geometry()
3273 vd->slices[i].nblocks = vtoc->v_part[i].p_size; in vd_dskimg_validate_geometry()
3288 vd_do_dskimg_ioctl(vd_t *vd, int cmd, void *ioctl_arg) in vd_do_dskimg_ioctl() argument
3296 ASSERT(VD_DSKIMG(vd)); in vd_do_dskimg_ioctl()
3304 rc = vd_dskimg_validate_geometry(vd); in vd_do_dskimg_ioctl()
3307 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom)); in vd_do_dskimg_ioctl()
3314 rc = vd_dskimg_validate_geometry(vd); in vd_do_dskimg_ioctl()
3317 bcopy(&vd->vtoc, vtoc, sizeof (struct extvtoc)); in vd_do_dskimg_ioctl()
3333 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom)); in vd_do_dskimg_ioctl()
3338 ASSERT(vd->dk_geom.dkg_nhead != 0 && in vd_do_dskimg_ioctl()
3339 vd->dk_geom.dkg_nsect != 0); in vd_do_dskimg_ioctl()
3347 vd_vtocgeom_to_label(vtoc, &vd->dk_geom, &label); in vd_do_dskimg_ioctl()
3350 if ((rc = vd_dskimg_set_vtoc(vd, &label)) != 0) in vd_do_dskimg_ioctl()
3356 return (vd_flush_write(vd)); in vd_do_dskimg_ioctl()
3362 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, in vd_do_dskimg_ioctl()
3372 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, in vd_do_dskimg_ioctl()
3386 (void) vd_dskimg_validate_geometry(vd); in vd_do_dskimg_ioctl()
3393 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { in vd_do_dskimg_ioctl()
3401 vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg) in vd_backend_ioctl() argument
3410 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_backend_ioctl()
3413 status = vd_do_slice_ioctl(vd, cmd, arg); in vd_backend_ioctl()
3415 } else if (VD_DSKIMG(vd)) { in vd_backend_ioctl()
3418 status = vd_do_dskimg_ioctl(vd, cmd, arg); in vd_backend_ioctl()
3423 status = ldi_ioctl(vd->ldi_handle[0], cmd, (intptr_t)arg, in vd_backend_ioctl()
3424 vd->open_flags | FKIOCTL, kcred, &rval); in vd_backend_ioctl()
3437 status = ldi_ioctl(vd->ldi_handle[0], cmd, in vd_backend_ioctl()
3438 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, in vd_backend_ioctl()
3448 status = ldi_ioctl(vd->ldi_handle[0], cmd, in vd_backend_ioctl()
3449 (intptr_t)&vtoc, vd->open_flags | FKIOCTL, in vd_backend_ioctl()
3480 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl) in vd_do_ioctl() argument
3486 ASSERT(request->slice < vd->nslices); in vd_do_ioctl()
3493 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, in vd_do_ioctl()
3520 if (!(vd->open_flags & FWRITE) && in vd_do_ioctl()
3531 request->status = vd_backend_ioctl(vd, ioctl->cmd, ioctl->arg); in vd_do_ioctl()
3559 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes, in vd_do_ioctl()
3611 vd_t *vd = task->vd; in vd_ioctl() local
3656 ASSERT(vd != NULL); in vd_ioctl()
3658 ASSERT(request->slice < vd->nslices); in vd_ioctl()
3692 if (!(vd->open_flags & FWRITE) && ioctl[i].write) { in vd_ioctl()
3701 status = vd_do_ioctl(vd, request, buf, &ioctl[i]); in vd_ioctl()
3711 vd_t *vd = task->vd; in vd_get_devid() local
3720 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_get_devid()
3733 if (VD_DSKIMG(vd)) { in vd_get_devid()
3734 if (vd->dskimg_devid == NULL) { in vd_get_devid()
3739 sz = ddi_devid_sizeof(vd->dskimg_devid); in vd_get_devid()
3741 bcopy(vd->dskimg_devid, devid, sz); in vd_get_devid()
3744 if (ddi_lyr_get_devid(vd->dev[request->slice], in vd_get_devid()
3775 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0, in vd_get_devid()
3790 vd_scsi_reset(vd_t *vd) in vd_scsi_reset() argument
3798 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi, in vd_scsi_reset()
3799 (vd->open_flags | FKIOCTL), kcred, &rval); in vd_scsi_reset()
3807 vd_t *vd = task->vd; in vd_reset() local
3811 ASSERT(vd->scsi); in vd_reset()
3821 request->status = vd_scsi_reset(vd); in vd_reset()
3831 vd_t *vd = task->vd; in vd_get_capacity() local
3852 (void) vd_backend_check_size(vd); in vd_get_capacity()
3853 ASSERT(vd->vdisk_size != 0); in vd_get_capacity()
3857 vd_cap.vdisk_block_size = vd->vdisk_bsize; in vd_get_capacity()
3858 vd_cap.vdisk_size = vd->vdisk_size; in vd_get_capacity()
3860 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes, in vd_get_capacity()
3875 vd_t *vd = task->vd; in vd_get_access() local
3879 ASSERT(vd->scsi); in vd_get_access()
3891 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS, in vd_get_access()
3892 NULL, (vd->open_flags | FKIOCTL), kcred, &rval); in vd_get_access()
3899 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes, in vd_get_access()
3914 vd_t *vd = task->vd; in vd_set_access() local
3918 ASSERT(vd->scsi); in vd_set_access()
3928 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes, in vd_set_access()
3936 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3937 MHIOCRELEASE, NULL, (vd->open_flags | FKIOCTL), kcred, in vd_set_access()
3940 vd->ownership = B_FALSE; in vd_set_access()
3963 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3964 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); in vd_set_access()
3978 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3979 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, in vd_set_access()
3983 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3984 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval); in vd_set_access()
3995 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
3996 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, in vd_set_access()
4002 (void) vd_scsi_reset(vd); in vd_set_access()
4005 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
4006 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, in vd_set_access()
4013 request->status = ldi_ioctl(vd->ldi_handle[request->slice], in vd_set_access()
4014 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred, in vd_set_access()
4020 vd->ownership = B_TRUE; in vd_set_access()
4028 vd_reset_access(vd_t *vd) in vd_reset_access() argument
4032 if (vd->file || vd->volume || !vd->ownership) in vd_reset_access()
4036 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, in vd_reset_access()
4037 (vd->open_flags | FKIOCTL), kcred, &rval); in vd_reset_access()
4044 vd->ownership = B_FALSE; in vd_reset_access()
4055 status = vd_scsi_reset(vd); in vd_reset_access()
4061 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL, in vd_reset_access()
4062 (vd->open_flags | FKIOCTL), kcred, &rval); in vd_reset_access()
4065 vd->ownership = B_FALSE; in vd_reset_access()
4082 ", rebooting the system", vd->device_path); in vd_reset_access()
4085 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); in vd_reset_access()
4088 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path); in vd_reset_access()
4136 vd_t *vd = task->vd; in vd_do_process_task() local
4139 ASSERT(vd != NULL); in vd_do_process_task()
4159 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) || in vd_do_process_task()
4167 if (request->slice >= vd->nslices && in vd_do_process_task()
4168 ((vd->vdisk_type != VD_DISK_TYPE_DISK && vd_slice_single_slice) || in vd_do_process_task()
4171 request->slice, (vd->nslices - 1)); in vd_do_process_task()
4208 vd_t *vd = task->vd; in vd_process_task() local
4230 (void) ddi_taskq_dispatch(vd->completionq, vd_complete, in vd_process_task()
4235 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) { in vd_process_task()
4237 status = vd_mark_elem_done(vd, task->index, in vd_process_task()
4240 vd_mark_in_reset(vd); in vd_process_task()
4242 vd_need_reset(vd, B_TRUE); in vd_process_task()
4325 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_ver_msg() argument
4370 ASSERT(!(vd->initialized & VD_SID)); in vd_process_ver_msg()
4371 vd->sid = ver_msg->tag.vio_sid; in vd_process_ver_msg()
4372 vd->initialized |= VD_SID; in vd_process_ver_msg()
4379 vd->version.major = ver_msg->ver_major; in vd_process_ver_msg()
4380 vd->version.minor = ver_msg->ver_minor; in vd_process_ver_msg()
4388 vd_set_exported_operations(vd_t *vd) in vd_set_exported_operations() argument
4390 vd->operations = 0; /* clear field */ in vd_set_exported_operations()
4397 if (vio_ver_is_supported(vd->version, 1, 1)) { in vd_set_exported_operations()
4398 ASSERT(vd->open_flags & FREAD); in vd_set_exported_operations()
4399 vd->operations |= VD_OP_MASK_READ | (1 << VD_OP_GET_CAPACITY); in vd_set_exported_operations()
4401 if (vd->open_flags & FWRITE) in vd_set_exported_operations()
4402 vd->operations |= VD_OP_MASK_WRITE; in vd_set_exported_operations()
4404 if (vd->scsi) in vd_set_exported_operations()
4405 vd->operations |= VD_OP_MASK_SCSI; in vd_set_exported_operations()
4407 if (VD_DSKIMG(vd) && vd_dskimg_is_iso_image(vd)) { in vd_set_exported_operations()
4413 vd->operations &= ~VD_OP_MASK_WRITE; in vd_set_exported_operations()
4415 } else if (vio_ver_is_supported(vd->version, 1, 0)) { in vd_set_exported_operations()
4416 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE; in vd_set_exported_operations()
4420 ASSERT(vd->operations != 0); in vd_set_exported_operations()
4424 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_attr_msg() argument
4460 if ((vd->initialized & VD_DISK_READY) == 0) { in vd_process_attr_msg()
4461 PR0("Retry setting up disk (%s)", vd->device_path); in vd_process_attr_msg()
4463 status = vd_setup_vd(vd); in vd_process_attr_msg()
4471 if (!vd_enabled(vd)) in vd_process_attr_msg()
4479 vd->initialized |= VD_DISK_READY; in vd_process_attr_msg()
4480 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); in vd_process_attr_msg()
4482 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), in vd_process_attr_msg()
4483 (vd->volume ? "yes" : "no"), in vd_process_attr_msg()
4484 (vd->file ? "yes" : "no"), in vd_process_attr_msg()
4485 vd->nslices); in vd_process_attr_msg()
4489 vd->xfer_mode = attr_msg->xfer_mode; in vd_process_attr_msg()
4491 if (vd->xfer_mode == VIO_DESC_MODE) { in vd_process_attr_msg()
4515 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen); in vd_process_attr_msg()
4521 vd->inband_task.vd = vd; in vd_process_attr_msg()
4522 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_process_attr_msg()
4523 vd->inband_task.index = 0; in vd_process_attr_msg()
4524 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */ in vd_process_attr_msg()
4528 attr_msg->vdisk_block_size = vd->vdisk_bsize; in vd_process_attr_msg()
4529 attr_msg->max_xfer_sz = vd->max_xfer_sz; in vd_process_attr_msg()
4531 attr_msg->vdisk_size = vd->vdisk_size; in vd_process_attr_msg()
4532 attr_msg->vdisk_type = (vd_slice_single_slice)? vd->vdisk_type : in vd_process_attr_msg()
4534 attr_msg->vdisk_media = vd->vdisk_media; in vd_process_attr_msg()
4537 vd_set_exported_operations(vd); in vd_process_attr_msg()
4538 attr_msg->operations = vd->operations; in vd_process_attr_msg()
4540 PR0("%s", VD_CLIENT(vd)); in vd_process_attr_msg()
4542 ASSERT(vd->dring_task == NULL); in vd_process_attr_msg()
4548 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_dring_reg_msg() argument
4579 if (vd->initialized & VD_DRING) { in vd_process_dring_reg_msg()
4613 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie, in vd_process_dring_reg_msg()
4615 reg_msg->descriptor_size, mtype, &vd->dring_handle); in vd_process_dring_reg_msg()
4629 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) { in vd_process_dring_reg_msg()
4631 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0) in vd_process_dring_reg_msg()
4643 vd->initialized |= VD_DRING; in vd_process_dring_reg_msg()
4644 vd->dring_ident = 1; /* "There Can Be Only One" */ in vd_process_dring_reg_msg()
4645 vd->dring = dring_minfo.vaddr; in vd_process_dring_reg_msg()
4646 vd->descriptor_size = reg_msg->descriptor_size; in vd_process_dring_reg_msg()
4647 vd->dring_len = reg_msg->num_descriptors; in vd_process_dring_reg_msg()
4648 vd->dring_mtype = dring_minfo.mtype; in vd_process_dring_reg_msg()
4649 reg_msg->dring_ident = vd->dring_ident; in vd_process_dring_reg_msg()
4651 vd->descriptor_size, vd->dring_len); in vd_process_dring_reg_msg()
4657 vd->dring_task = in vd_process_dring_reg_msg()
4658 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP); in vd_process_dring_reg_msg()
4659 for (int i = 0; i < vd->dring_len; i++) { in vd_process_dring_reg_msg()
4660 vd->dring_task[i].vd = vd; in vd_process_dring_reg_msg()
4661 vd->dring_task[i].index = i; in vd_process_dring_reg_msg()
4663 status = ldc_mem_alloc_handle(vd->ldc_handle, in vd_process_dring_reg_msg()
4664 &(vd->dring_task[i].mhdl)); in vd_process_dring_reg_msg()
4675 vd->dring_task[i].request = kmem_zalloc((vd->descriptor_size - in vd_process_dring_reg_msg()
4677 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_process_dring_reg_msg()
4680 if (vd->file || vd->zvol) { in vd_process_dring_reg_msg()
4681 vd->write_queue = in vd_process_dring_reg_msg()
4682 kmem_zalloc(sizeof (buf_t *) * vd->dring_len, KM_SLEEP); in vd_process_dring_reg_msg()
4689 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_dring_unreg_msg() argument
4708 if (unreg_msg->dring_ident != vd->dring_ident) { in vd_process_dring_unreg_msg()
4710 vd->dring_ident, unreg_msg->dring_ident); in vd_process_dring_unreg_msg()
4738 vd_check_seq_num(vd_t *vd, uint64_t seq_num) in vd_check_seq_num() argument
4740 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) { in vd_check_seq_num()
4742 seq_num, (vd->seq_num + 1)); in vd_check_seq_num()
4744 vd_need_reset(vd, B_FALSE); in vd_check_seq_num()
4748 vd->seq_num = seq_num; in vd_check_seq_num()
4749 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */ in vd_check_seq_num()
4770 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_desc_msg() argument
4796 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0) in vd_process_desc_msg()
4808 ASSERT(vd->inband_task.msg != NULL); in vd_process_desc_msg()
4810 bcopy(msg, vd->inband_task.msg, msglen); in vd_process_desc_msg()
4811 vd->inband_task.msglen = msglen; in vd_process_desc_msg()
4817 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg; in vd_process_desc_msg()
4818 vd->inband_task.request = &desc_msg->payload; in vd_process_desc_msg()
4820 return (vd_process_task(&vd->inband_task)); in vd_process_desc_msg()
4824 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx, in vd_process_element() argument
4833 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype, in vd_process_element()
4834 vd->dring_handle, idx, idx)) != 0) { in vd_process_element()
4840 bcopy(&elem->payload, vd->dring_task[idx].request, in vd_process_element()
4841 (vd->descriptor_size - sizeof (vio_dring_entry_hdr_t))); in vd_process_element()
4846 if ((status = VIO_DRING_RELEASE(vd->dring_mtype, in vd_process_element()
4847 vd->dring_handle, idx, idx)) != 0) { in vd_process_element()
4857 vd->dring_task[idx].type = type; in vd_process_element()
4860 bcopy(msg, vd->dring_task[idx].msg, msglen); in vd_process_element()
4862 vd->dring_task[idx].msglen = msglen; in vd_process_element()
4863 return (vd_process_task(&vd->dring_task[idx])); in vd_process_element()
4867 vd_process_element_range(vd_t *vd, int start, int end, in vd_process_element_range() argument
4888 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1; in vd_process_element_range()
4889 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) { in vd_process_element_range()
4892 status = vd_process_element(vd, type, i, msg, msglen); in vd_process_element_range()
4908 if (vd->ioq != NULL) in vd_process_element_range()
4909 ddi_taskq_wait(vd->ioq); in vd_process_element_range()
4910 ddi_taskq_wait(vd->completionq); in vd_process_element_range()
4917 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_dring_msg() argument
4936 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0) in vd_process_dring_msg()
4939 if (dring_msg->dring_ident != vd->dring_ident) { in vd_process_dring_msg()
4941 vd->dring_ident, dring_msg->dring_ident); in vd_process_dring_msg()
4945 if (dring_msg->start_idx >= vd->dring_len) { in vd_process_dring_msg()
4947 dring_msg->start_idx, vd->dring_len); in vd_process_dring_msg()
4952 (dring_msg->end_idx >= vd->dring_len)) { in vd_process_dring_msg()
4954 dring_msg->end_idx, vd->dring_len); in vd_process_dring_msg()
4961 return (vd_process_element_range(vd, dring_msg->start_idx, in vd_process_dring_msg()
4995 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_do_process_msg() argument
5010 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) { in vd_do_process_msg()
5011 PR0("Expected SID %u, received %u", vd->sid, in vd_do_process_msg()
5016 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state)); in vd_do_process_msg()
5021 switch (vd->state) { in vd_do_process_msg()
5023 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0) in vd_do_process_msg()
5027 vd->state = VD_STATE_VER; in vd_do_process_msg()
5031 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0) in vd_do_process_msg()
5035 vd->state = VD_STATE_ATTR; in vd_do_process_msg()
5039 switch (vd->xfer_mode) { in vd_do_process_msg()
5045 vd->state = VD_STATE_DATA; in vd_do_process_msg()
5050 vd_process_dring_reg_msg(vd, msg, msglen)) != 0) in vd_do_process_msg()
5054 vd->state = VD_STATE_DRING; in vd_do_process_msg()
5066 vd->state = VD_STATE_DATA; in vd_do_process_msg()
5080 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG) in vd_do_process_msg()
5089 status = vd_process_dring_unreg_msg(vd, msg, msglen); in vd_do_process_msg()
5093 switch (vd->xfer_mode) { in vd_do_process_msg()
5095 return (vd_process_desc_msg(vd, msg, msglen)); in vd_do_process_msg()
5102 if ((status = vd_process_dring_msg(vd, msg, in vd_do_process_msg()
5112 status = vd_process_dring_unreg_msg(vd, msg, msglen); in vd_do_process_msg()
5129 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen) in vd_process_msg() argument
5143 vd_need_reset(vd, B_TRUE); in vd_process_msg()
5150 switch (status = vd_do_process_msg(vd, msg, msglen)) { in vd_process_msg()
5176 PR1("\tResulting in state %d (%s)", vd->state, in vd_process_msg()
5177 vd_decode_state(vd->state)); in vd_process_msg()
5180 task.vd = vd; in vd_process_msg()
5190 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify, in vd_process_msg()
5198 ddi_taskq_wait(vd->completionq); in vd_process_msg()
5204 vd_need_reset(vd, reset_ldc); in vd_process_msg()
5211 vd_enabled(vd_t *vd) in vd_enabled() argument
5215 mutex_enter(&vd->lock); in vd_enabled()
5216 enabled = vd->enabled; in vd_enabled()
5217 mutex_exit(&vd->lock); in vd_enabled()
5224 vd_t *vd = (vd_t *)arg; in vd_recv_msg() local
5227 ASSERT(vd != NULL); in vd_recv_msg()
5232 while (vd_enabled(vd) && status == 0) { in vd_recv_msg()
5239 vd_reset_if_needed(vd); /* can change vd->max_msglen */ in vd_recv_msg()
5244 status = ldc_status(vd->ldc_handle, &lstatus); in vd_recv_msg()
5251 ASSERT(vd->max_msglen != 0); in vd_recv_msg()
5253 msgsize = vd->max_msglen; /* stable copy for alloc/free */ in vd_recv_msg()
5256 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen); in vd_recv_msg()
5259 rv = vd_process_msg(vd, (void *)vd->vio_msgp, msglen); in vd_recv_msg()
5261 if (msgsize != vd->max_msglen) { in vd_recv_msg()
5263 msgsize, vd->max_msglen); in vd_recv_msg()
5264 kmem_free(vd->vio_msgp, msgsize); in vd_recv_msg()
5265 vd->vio_msgp = in vd_recv_msg()
5266 kmem_alloc(vd->max_msglen, KM_SLEEP); in vd_recv_msg()
5277 vd_need_reset(vd, B_FALSE); in vd_recv_msg()
5284 vd_need_reset(vd, B_TRUE); in vd_recv_msg()
5295 vd_t *vd = (vd_t *)(void *)arg; in vd_handle_ldc_events() local
5298 ASSERT(vd != NULL); in vd_handle_ldc_events()
5300 if (!vd_enabled(vd)) in vd_handle_ldc_events()
5306 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5307 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, in vd_handle_ldc_events()
5311 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5318 if (vd->state != VD_STATE_INIT) { in vd_handle_ldc_events()
5320 vd_need_reset(vd, B_FALSE); in vd_handle_ldc_events()
5321 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, in vd_handle_ldc_events()
5322 vd, DDI_SLEEP); in vd_handle_ldc_events()
5325 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5331 (void) ldc_up(vd->ldc_handle); in vd_handle_ldc_events()
5340 vd_need_reset(vd, B_FALSE); in vd_handle_ldc_events()
5341 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, in vd_handle_ldc_events()
5342 vd, DDI_SLEEP); in vd_handle_ldc_events()
5345 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5355 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, in vd_handle_ldc_events()
5360 vd_need_reset(vd, B_TRUE); in vd_handle_ldc_events()
5443 vd_dskimg_is_iso_image(vd_t *vd) in vd_dskimg_is_iso_image() argument
5449 ASSERT(VD_DSKIMG(vd)); in vd_dskimg_is_iso_image()
5455 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD) in vd_dskimg_is_iso_image()
5464 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_bsize; in vd_dskimg_is_iso_image()
5465 rv = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf, in vd_dskimg_is_iso_image()
5493 vd_is_atapi_device(vd_t *vd) in vd_is_atapi_device() argument
5499 ASSERT(vd->ldi_handle[0] != NULL); in vd_is_atapi_device()
5500 ASSERT(!vd->file); in vd_is_atapi_device()
5502 rv = ldi_prop_lookup_string(vd->ldi_handle[0], in vd_is_atapi_device()
5505 PR0("'variant' property exists for %s", vd->device_path); in vd_is_atapi_device()
5511 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi"); in vd_is_atapi_device()
5513 PR0("'atapi' property exists for %s", vd->device_path); in vd_is_atapi_device()
5521 vd_setup_full_disk(vd_t *vd) in vd_setup_full_disk() argument
5524 major_t major = getmajor(vd->dev[0]); in vd_setup_full_disk()
5525 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE; in vd_setup_full_disk()
5527 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); in vd_setup_full_disk()
5530 status = vd_backend_check_size(vd); in vd_setup_full_disk()
5533 if (!vd->scsi) { in vd_setup_full_disk()
5536 vd->device_path, status); in vd_setup_full_disk()
5545 vd->vdisk_size = VD_SIZE_UNKNOWN; in vd_setup_full_disk()
5546 vd->vdisk_bsize = 0; in vd_setup_full_disk()
5547 vd->backend_bsize = 0; in vd_setup_full_disk()
5548 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_full_disk()
5552 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0]; in vd_setup_full_disk()
5553 vd->dev[0] = 0; in vd_setup_full_disk()
5554 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0]; in vd_setup_full_disk()
5555 vd->ldi_handle[0] = NULL; in vd_setup_full_disk()
5558 for (int slice = 0; slice < vd->nslices; slice++) { in vd_setup_full_disk()
5565 ASSERT(vd->dev[slice] == 0); in vd_setup_full_disk()
5566 ASSERT(vd->ldi_handle[slice] == NULL); in vd_setup_full_disk()
5571 vd->dev[slice] = makedevice(major, (minor + slice)); in vd_setup_full_disk()
5595 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, in vd_setup_full_disk()
5596 vd->open_flags, kcred, &vd->ldi_handle[slice], in vd_setup_full_disk()
5597 vd->vds->ldi_ident); in vd_setup_full_disk()
5600 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK, in vd_setup_full_disk()
5601 vd->open_flags | FNDELAY, kcred, in vd_setup_full_disk()
5602 &vd->ldi_handle[slice], vd->vds->ldi_ident); in vd_setup_full_disk()
5609 vd->ldi_handle[slice] = NULL; in vd_setup_full_disk()
5658 vd_setup_partition_vtoc(vd_t *vd) in vd_setup_partition_vtoc() argument
5660 char *device_path = vd->device_path; in vd_setup_partition_vtoc()
5665 if (vd->dk_geom.dkg_nsect == 0) { in vd_setup_partition_vtoc()
5669 if (vd->dk_geom.dkg_nhead == 0) { in vd_setup_partition_vtoc()
5675 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect; in vd_setup_partition_vtoc()
5681 vd->dk_geom.dkg_ncyl = vd->vdisk_size / csize + 1; in vd_setup_partition_vtoc()
5684 vd->dk_geom.dkg_acyl = 2; in vd_setup_partition_vtoc()
5685 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl; in vd_setup_partition_vtoc()
5689 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part)); in vd_setup_partition_vtoc()
5690 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED; in vd_setup_partition_vtoc()
5691 vd->vtoc.v_part[0].p_flag = 0; in vd_setup_partition_vtoc()
5696 vd->vtoc.v_part[0].p_start = csize; /* start on cylinder 1 */ in vd_setup_partition_vtoc()
5697 vd->vtoc.v_part[0].p_size = (vd->vdisk_size / csize) * csize; in vd_setup_partition_vtoc()
5700 vd->vtoc.v_nparts = 1; in vd_setup_partition_vtoc()
5701 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel, in vd_setup_partition_vtoc()
5703 sizeof (vd->vtoc.v_asciilabel))); in vd_setup_partition_vtoc()
5704 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume, in vd_setup_partition_vtoc()
5705 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume))); in vd_setup_partition_vtoc()
5708 vd->nslices = V_NUMPAR; in vd_setup_partition_vtoc()
5709 vd->vtoc.v_nparts = V_NUMPAR; in vd_setup_partition_vtoc()
5712 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP; in vd_setup_partition_vtoc()
5713 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_flag = 0; in vd_setup_partition_vtoc()
5714 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start = 0; in vd_setup_partition_vtoc()
5715 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size = in vd_setup_partition_vtoc()
5716 vd->dk_geom.dkg_ncyl * csize; in vd_setup_partition_vtoc()
5718 vd_get_readable_size(vd->vdisk_size * vd->vdisk_bsize, in vd_setup_partition_vtoc()
5726 vd->dk_geom.dkg_bcyl = 0; in vd_setup_partition_vtoc()
5727 vd->dk_geom.dkg_intrlv = 1; in vd_setup_partition_vtoc()
5728 vd->dk_geom.dkg_write_reinstruct = 0; in vd_setup_partition_vtoc()
5729 vd->dk_geom.dkg_read_reinstruct = 0; in vd_setup_partition_vtoc()
5735 (void) snprintf(vd->vtoc.v_asciilabel, LEN_DKL_ASCII, in vd_setup_partition_vtoc()
5738 vd->dk_geom.dkg_ncyl, vd->dk_geom.dkg_acyl, in vd_setup_partition_vtoc()
5739 vd->dk_geom.dkg_nhead, vd->dk_geom.dkg_nsect); in vd_setup_partition_vtoc()
5740 bzero(vd->vtoc.v_volume, sizeof (vd->vtoc.v_volume)); in vd_setup_partition_vtoc()
5743 vd->flabel_limit = (uint_t)csize; in vd_setup_partition_vtoc()
5744 vd->flabel_size = VD_LABEL_VTOC_SIZE(vd->vdisk_bsize); in vd_setup_partition_vtoc()
5745 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); in vd_setup_partition_vtoc()
5746 vd_vtocgeom_to_label(&vd->vtoc, &vd->dk_geom, in vd_setup_partition_vtoc()
5747 VD_LABEL_VTOC(vd)); in vd_setup_partition_vtoc()
5751 vd->vdisk_size += csize * 3; in vd_setup_partition_vtoc()
5801 vd_setup_partition_efi(vd_t *vd) in vd_setup_partition_efi() argument
5811 ASSERT(vd->vdisk_bsize > 0); in vd_setup_partition_efi()
5813 bsize = vd->vdisk_bsize; in vd_setup_partition_efi()
5819 vd->flabel_limit = (uint_t)first_u_lba; in vd_setup_partition_efi()
5820 vd->flabel_size = VD_LABEL_EFI_SIZE(bsize); in vd_setup_partition_efi()
5821 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP); in vd_setup_partition_efi()
5822 gpt = VD_LABEL_EFI_GPT(vd, bsize); in vd_setup_partition_efi()
5823 gpe = VD_LABEL_EFI_GPE(vd, bsize); in vd_setup_partition_efi()
5829 vd->vdisk_size += first_u_lba; in vd_setup_partition_efi()
5831 s0_end = vd->vdisk_size - 1; in vd_setup_partition_efi()
5849 vd->nslices = V_NUMPAR; in vd_setup_partition_efi()
5860 vd->vdisk_size += EFI_MIN_RESV_SIZE; in vd_setup_partition_efi()
5863 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1); in vd_setup_partition_efi()
5866 vd->vdisk_size += (EFI_MIN_ARRAY_SIZE / bsize) + 1; in vd_setup_partition_efi()
5867 gpt->efi_gpt_AlternateLBA = LE_64(vd->vdisk_size - 1); in vd_setup_partition_efi()
5884 vd_setup_backend_vnode(vd_t *vd) in vd_setup_backend_vnode() argument
5888 char *file_path = vd->device_path; in vd_setup_backend_vnode()
5892 ASSERT(!vd->volume); in vd_setup_backend_vnode()
5894 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX, in vd_setup_backend_vnode()
5895 0, &vd->file_vnode, 0, 0)) != 0) { in vd_setup_backend_vnode()
5897 status == EROFS) && (!(vd->initialized & VD_SETUP_ERROR) && in vd_setup_backend_vnode()
5898 !(DEVI_IS_ATTACHING(vd->vds->dip)))) { in vd_setup_backend_vnode()
5908 vd->file = B_TRUE; in vd_setup_backend_vnode()
5910 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */ in vd_setup_backend_vnode()
5915 dev = vd->file_vnode->v_vfsp->vfs_dev; in vd_setup_backend_vnode()
5920 vd->vds->ldi_ident); in vd_setup_backend_vnode()
5927 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, in vd_setup_backend_vnode()
5936 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; in vd_setup_backend_vnode()
5944 file_path, getmajor(dev), getminor(dev), vd->max_xfer_sz); in vd_setup_backend_vnode()
5946 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) in vd_setup_backend_vnode()
5947 status = vd_setup_slice_image(vd); in vd_setup_backend_vnode()
5949 status = vd_setup_disk_image(vd); in vd_setup_backend_vnode()
5955 vd_setup_slice_image(vd_t *vd) in vd_setup_slice_image() argument
5960 if ((status = vd_backend_check_size(vd)) != 0) { in vd_setup_slice_image()
5962 vd->device_path, status); in vd_setup_slice_image()
5966 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_slice_image()
5967 vd->vdisk_label = (vd_slice_label == VD_DISK_LABEL_UNK)? in vd_setup_slice_image()
5970 if (vd->vdisk_label == VD_DISK_LABEL_EFI || in vd_setup_slice_image()
5971 vd->dskimg_size >= 2 * ONE_TERABYTE) { in vd_setup_slice_image()
5972 status = vd_setup_partition_efi(vd); in vd_setup_slice_image()
5980 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize, in vd_setup_slice_image()
5982 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); in vd_setup_slice_image()
5983 status = vd_setup_partition_vtoc(vd); in vd_setup_slice_image()
5990 vd_setup_disk_image(vd_t *vd) in vd_setup_disk_image() argument
5993 char *backend_path = vd->device_path; in vd_setup_disk_image()
5995 if ((status = vd_backend_check_size(vd)) != 0) { in vd_setup_disk_image()
6002 if (vd->dskimg_size < sizeof (struct dk_label)) { in vd_setup_disk_image()
6011 status = vd_dskimg_validate_geometry(vd); in vd_setup_disk_image()
6017 if (vd_dskimg_is_iso_image(vd)) { in vd_setup_disk_image()
6023 if ((vd->vdisk_size * vd->vdisk_bsize) > ONE_GIGABYTE) in vd_setup_disk_image()
6024 vd->vdisk_media = VD_MEDIA_DVD; in vd_setup_disk_image()
6026 vd->vdisk_media = VD_MEDIA_CD; in vd_setup_disk_image()
6028 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_disk_image()
6033 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { in vd_setup_disk_image()
6035 status = vd_dskimg_read_devid(vd, &vd->dskimg_devid); in vd_setup_disk_image()
6049 vd->dskimg_devid = NULL; in vd_setup_disk_image()
6061 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, NULL, 0, in vd_setup_disk_image()
6062 &vd->dskimg_devid) != DDI_SUCCESS) { in vd_setup_disk_image()
6064 vd->dskimg_devid = NULL; in vd_setup_disk_image()
6073 if (vd->vdisk_label != VD_DISK_LABEL_UNK) { in vd_setup_disk_image()
6074 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) { in vd_setup_disk_image()
6076 ddi_devid_free(vd->dskimg_devid); in vd_setup_disk_image()
6077 vd->dskimg_devid = NULL; in vd_setup_disk_image()
6098 vd_open_using_ldi_by_name(vd_t *vd, int flags) in vd_open_using_ldi_by_name() argument
6101 char *device_path = vd->device_path; in vd_open_using_ldi_by_name()
6105 &vd->ldi_handle[0], vd->vds->ldi_ident); in vd_open_using_ldi_by_name()
6114 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident); in vd_open_using_ldi_by_name()
6118 vd->ldi_handle[0] = NULL; in vd_open_using_ldi_by_name()
6131 vd_setup_backend_ldi(vd_t *vd) in vd_setup_backend_ldi() argument
6135 char *device_path = vd->device_path; in vd_setup_backend_ldi()
6138 ASSERT(vd->ldi_handle[0] != NULL); in vd_setup_backend_ldi()
6139 ASSERT(vd->dev[0] != NULL); in vd_setup_backend_ldi()
6141 vd->file = B_FALSE; in vd_setup_backend_ldi()
6144 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO, in vd_setup_backend_ldi()
6145 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred, in vd_setup_backend_ldi()
6169 vd->open_flags &= ~FWRITE; in vd_setup_backend_ldi()
6171 } else if (vd->open_flags & FWRITE) { in vd_setup_backend_ldi()
6173 (void) ldi_close(vd->ldi_handle[0], vd->open_flags & ~FWRITE, in vd_setup_backend_ldi()
6175 status = vd_open_using_ldi_by_name(vd, vd->open_flags); in vd_setup_backend_ldi()
6184 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer; in vd_setup_backend_ldi()
6190 vd->is_atapi_dev = vd_is_atapi_device(vd); in vd_setup_backend_ldi()
6202 if (vd->vdisk_type == VD_DISK_TYPE_DISK) { in vd_setup_backend_ldi()
6204 if (vd->volume) { in vd_setup_backend_ldi()
6206 return (vd_setup_disk_image(vd)); in vd_setup_backend_ldi()
6211 ASSERT(!vd->volume); in vd_setup_backend_ldi()
6213 vd->scsi = B_TRUE; in vd_setup_backend_ldi()
6214 return (vd_setup_full_disk(vd)); in vd_setup_backend_ldi()
6227 return (vd_setup_single_slice_disk(vd)); in vd_setup_backend_ldi()
6231 vd_setup_single_slice_disk(vd_t *vd) in vd_setup_single_slice_disk() argument
6235 char *device_path = vd->device_path; in vd_setup_single_slice_disk()
6238 vd->vdisk_media = VD_MEDIA_FIXED; in vd_setup_single_slice_disk()
6240 if (vd->volume) { in vd_setup_single_slice_disk()
6241 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE); in vd_setup_single_slice_disk()
6248 vd->vdisk_type = VD_DISK_TYPE_SLICE; in vd_setup_single_slice_disk()
6249 vd->nslices = 1; in vd_setup_single_slice_disk()
6252 if ((status = vd_backend_check_size(vd)) != 0) { in vd_setup_single_slice_disk()
6267 vd->vdisk_size >= ONE_TERABYTE / vd->vdisk_bsize) { in vd_setup_single_slice_disk()
6268 vd->vdisk_label = VD_DISK_LABEL_EFI; in vd_setup_single_slice_disk()
6270 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGEXTVTOC, in vd_setup_single_slice_disk()
6271 (intptr_t)&vd->vtoc, (vd->open_flags | FKIOCTL), in vd_setup_single_slice_disk()
6276 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC, in vd_setup_single_slice_disk()
6277 (intptr_t)&vtoc, (vd->open_flags | FKIOCTL), in vd_setup_single_slice_disk()
6279 vtoctoextvtoc(vtoc, vd->vtoc); in vd_setup_single_slice_disk()
6283 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM, in vd_setup_single_slice_disk()
6284 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL), in vd_setup_single_slice_disk()
6292 vd->vdisk_label = VD_DISK_LABEL_VTOC; in vd_setup_single_slice_disk()
6296 vd->vdisk_label = VD_DISK_LABEL_VTOC; in vd_setup_single_slice_disk()
6297 vd_build_default_label(vd->vdisk_size * vd->vdisk_bsize, in vd_setup_single_slice_disk()
6298 vd->vdisk_bsize, &label); in vd_setup_single_slice_disk()
6299 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom); in vd_setup_single_slice_disk()
6302 vd->vdisk_label = VD_DISK_LABEL_EFI; in vd_setup_single_slice_disk()
6306 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_setup_single_slice_disk()
6308 status = vd_setup_partition_vtoc(vd); in vd_setup_single_slice_disk()
6312 status = vd_setup_partition_efi(vd); in vd_setup_single_slice_disk()
6354 vd_backend_check_size(vd_t *vd) in vd_backend_check_size() argument
6363 if (vd->file) { in vd_backend_check_size()
6367 rv = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL); in vd_backend_check_size()
6369 PR0("VOP_GETATTR(%s) = errno %d", vd->device_path, rv); in vd_backend_check_size()
6376 } else if (vd->volume) { in vd_backend_check_size()
6379 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); in vd_backend_check_size()
6381 PR0("ldi_get_size() failed for %s", vd->device_path); in vd_backend_check_size()
6390 rv = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO, in vd_backend_check_size()
6391 (intptr_t)&minfo, (vd->open_flags | FKIOCTL), in vd_backend_check_size()
6395 vd->device_path, rv); in vd_backend_check_size()
6399 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_backend_check_size()
6400 rv = ldi_get_size(vd->ldi_handle[0], &backend_size); in vd_backend_check_size()
6403 vd->device_path); in vd_backend_check_size()
6407 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); in vd_backend_check_size()
6429 old_size = vd->vdisk_size; in vd_backend_check_size()
6434 vd->vdisk_bsize == vdisk_bsize) in vd_backend_check_size()
6446 vd->vio_bshift = nshift; in vd_backend_check_size()
6447 vd->vdisk_size = new_size; in vd_backend_check_size()
6448 vd->vdisk_bsize = vdisk_bsize; in vd_backend_check_size()
6449 vd->backend_bsize = backend_bsize; in vd_backend_check_size()
6451 if (vd->file || vd->volume) in vd_backend_check_size()
6452 vd->dskimg_size = backend_size; in vd_backend_check_size()
6460 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) { in vd_backend_check_size()
6462 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) { in vd_backend_check_size()
6463 rv = vd_setup_partition_vtoc(vd); in vd_backend_check_size()
6466 "(err = %d)", vd->device_path, rv); in vd_backend_check_size()
6470 rv = vd_setup_partition_efi(vd); in vd_backend_check_size()
6473 "(err = %d)", vd->device_path, rv); in vd_backend_check_size()
6478 } else if (!vd->file && !vd->volume) { in vd_backend_check_size()
6480 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK); in vd_backend_check_size()
6481 vd->vdisk_media = media; in vd_backend_check_size()
6501 vd_identify_dev(vd_t *vd, int *dtype) in vd_identify_dev() argument
6504 char *device_path = vd->device_path; in vd_identify_dev()
6507 vds_t *vds = vd->vds; in vd_identify_dev()
6509 status = vd_open_using_ldi_by_name(vd, vd->open_flags & ~FWRITE); in vd_identify_dev()
6516 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) { in vd_identify_dev()
6526 drv_name = ddi_major_to_name(getmajor(vd->dev[0])); in vd_identify_dev()
6555 vd->zvol = B_TRUE; in vd_identify_dev()
6563 vd_setup_vd(vd_t *vd) in vd_setup_vd() argument
6568 char *path = vd->device_path; in vd_setup_vd()
6585 vd->volume = B_FALSE; in vd_setup_vd()
6586 status = vd_setup_backend_vnode(vd); in vd_setup_vd()
6625 if ((status = vd_identify_dev(vd, &drv_type)) != 0) { in vd_setup_vd()
6641 vd->volume = B_TRUE; in vd_setup_vd()
6653 if (vd->volume && vd_volume_force_slice) { in vd_setup_vd()
6654 vd->vdisk_type = VD_DISK_TYPE_SLICE; in vd_setup_vd()
6655 vd->nslices = 1; in vd_setup_vd()
6658 status = vd_setup_backend_ldi(vd); in vd_setup_vd()
6675 if (!(vd->initialized & VD_SETUP_ERROR) && in vd_setup_vd()
6676 !(DEVI_IS_ATTACHING(vd->vds->dip))) { in vd_setup_vd()
6685 vd->initialized |= VD_SETUP_ERROR; in vd_setup_vd()
6687 } else if (vd->initialized & VD_SETUP_ERROR) { in vd_setup_vd()
6690 vd->initialized &= ~VD_SETUP_ERROR; in vd_setup_vd()
6701 if ((vd->file || vd->zvol) && vd->ioq == NULL) { in vd_setup_vd()
6702 (void) snprintf(tq_name, sizeof (tq_name), "vd_ioq%lu", vd->id); in vd_setup_vd()
6704 if ((vd->ioq = ddi_taskq_create(vd->vds->dip, tq_name, in vd_setup_vd()
6722 vd_t *vd; in vds_do_init_vd() local
6730 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) { in vds_do_init_vd()
6734 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */ in vds_do_init_vd()
6735 vd->id = id; in vds_do_init_vd()
6736 vd->vds = vds; in vds_do_init_vd()
6737 (void) strncpy(vd->device_path, device_path, MAXPATHLEN); in vds_do_init_vd()
6740 vd->open_flags = FREAD; in vds_do_init_vd()
6743 vd->open_flags |= FWRITE; in vds_do_init_vd()
6746 vd->open_flags |= FEXCL; in vds_do_init_vd()
6750 vd->vdisk_type = VD_DISK_TYPE_SLICE; in vds_do_init_vd()
6751 vd->nslices = 1; in vds_do_init_vd()
6753 vd->vdisk_type = VD_DISK_TYPE_DISK; in vds_do_init_vd()
6754 vd->nslices = V_NUMPAR; in vds_do_init_vd()
6758 vd->vdisk_label = VD_DISK_LABEL_UNK; in vds_do_init_vd()
6761 if ((status = vd_setup_vd(vd)) == 0) { in vds_do_init_vd()
6762 vd->initialized |= VD_DISK_READY; in vds_do_init_vd()
6764 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR); in vds_do_init_vd()
6766 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"), in vds_do_init_vd()
6767 (vd->volume ? "yes" : "no"), (vd->file ? "yes" : "no"), in vds_do_init_vd()
6768 vd->nslices); in vds_do_init_vd()
6781 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock); in vds_do_init_vd()
6782 vd->initialized |= VD_LOCKING; in vds_do_init_vd()
6788 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1, in vds_do_init_vd()
6795 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1, in vds_do_init_vd()
6802 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */ in vds_do_init_vd()
6803 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP); in vds_do_init_vd()
6805 vd->enabled = 1; /* before callback can dispatch to startq */ in vds_do_init_vd()
6813 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) { in vds_do_init_vd()
6818 vd->initialized |= VD_LDC; in vds_do_init_vd()
6820 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events, in vds_do_init_vd()
6821 (caddr_t)vd)) != 0) { in vds_do_init_vd()
6827 if ((status = ldc_open(vd->ldc_handle)) != 0) { in vds_do_init_vd()
6833 if ((status = ldc_up(vd->ldc_handle)) != 0) { in vds_do_init_vd()
6838 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl)); in vds_do_init_vd()
6846 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) { in vds_do_init_vd()
6852 vd->state = VD_STATE_INIT; in vds_do_init_vd()
6889 vd_t *vd = (vd_t *)arg; in vds_destroy_vd() local
6892 if (vd == NULL) in vds_destroy_vd()
6898 if (vd->initialized & VD_LOCKING) { in vds_destroy_vd()
6899 mutex_enter(&vd->lock); in vds_destroy_vd()
6900 vd->enabled = 0; in vds_destroy_vd()
6901 mutex_exit(&vd->lock); in vds_destroy_vd()
6905 if (vd->startq != NULL) in vds_destroy_vd()
6906 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */ in vds_destroy_vd()
6909 if (vd->ioq != NULL) in vds_destroy_vd()
6910 ddi_taskq_destroy(vd->ioq); in vds_destroy_vd()
6913 if (vd->completionq != NULL) in vds_destroy_vd()
6914 ddi_taskq_destroy(vd->completionq); /* waits for tasks */ in vds_destroy_vd()
6916 vd_free_dring_task(vd); in vds_destroy_vd()
6919 (void) ldc_mem_free_handle(vd->inband_task.mhdl); in vds_destroy_vd()
6922 if (vd->initialized & VD_LDC) { in vds_destroy_vd()
6924 if (vd->initialized & VD_DRING) in vds_destroy_vd()
6925 (void) ldc_mem_dring_unmap(vd->dring_handle); in vds_destroy_vd()
6928 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) { in vds_destroy_vd()
6936 (void) ldc_unreg_callback(vd->ldc_handle); in vds_destroy_vd()
6937 (void) ldc_fini(vd->ldc_handle); in vds_destroy_vd()
6946 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE); in vds_destroy_vd()
6947 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN) in vds_destroy_vd()
6953 if (vd->vio_msgp != NULL) { in vds_destroy_vd()
6954 kmem_free(vd->vio_msgp, vd->max_msglen); in vds_destroy_vd()
6955 vd->vio_msgp = NULL; in vds_destroy_vd()
6959 if (vd->inband_task.msg != NULL) { in vds_destroy_vd()
6960 kmem_free(vd->inband_task.msg, vd->max_msglen); in vds_destroy_vd()
6961 vd->inband_task.msg = NULL; in vds_destroy_vd()
6964 if (vd->file) { in vds_destroy_vd()
6966 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1, in vds_destroy_vd()
6968 VN_RELE(vd->file_vnode); in vds_destroy_vd()
6972 if (vd->ldi_handle[slice] != NULL) { in vds_destroy_vd()
6974 (void) ldi_close(vd->ldi_handle[slice], in vds_destroy_vd()
6975 vd->open_flags, kcred); in vds_destroy_vd()
6981 if (vd->dskimg_devid != NULL) in vds_destroy_vd()
6982 ddi_devid_free(vd->dskimg_devid); in vds_destroy_vd()
6985 if (vd->flabel) { in vds_destroy_vd()
6986 kmem_free(vd->flabel, vd->flabel_size); in vds_destroy_vd()
6987 vd->flabel = NULL; in vds_destroy_vd()
6988 vd->flabel_size = 0; in vds_destroy_vd()
6992 if (vd->initialized & VD_LOCKING) in vds_destroy_vd()
6993 mutex_destroy(&vd->lock); in vds_destroy_vd()
6996 kmem_free(vd, sizeof (*vd)); in vds_destroy_vd()
7004 vd_t *vd = NULL; in vds_init_vd() local
7008 ldc_id, &vd)) != 0) in vds_init_vd()
7009 vds_destroy_vd(vd); in vds_init_vd()