xref: /titanic_44/usr/src/uts/common/fs/zfs/zvol.c (revision 5fd03bc0f2e00e7ba02316c2e08f45d52aab15db)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Portions Copyright 2010 Robert Milkowski
25  *
26  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
27  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  */
30 
31 /*
32  * ZFS volume emulation driver.
33  *
34  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
35  * Volumes are accessed through the symbolic links named:
36  *
37  * /dev/zvol/dsk/<pool_name>/<dataset_name>
38  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
39  *
40  * These links are created by the /dev filesystem (sdev_zvolops.c).
41  * Volumes are persistent through reboot.  No user command needs to be
42  * run before opening and using a device.
43  */
44 
45 #include <sys/types.h>
46 #include <sys/param.h>
47 #include <sys/errno.h>
48 #include <sys/uio.h>
49 #include <sys/buf.h>
50 #include <sys/modctl.h>
51 #include <sys/open.h>
52 #include <sys/kmem.h>
53 #include <sys/conf.h>
54 #include <sys/cmn_err.h>
55 #include <sys/stat.h>
56 #include <sys/zap.h>
57 #include <sys/spa.h>
58 #include <sys/spa_impl.h>
59 #include <sys/zio.h>
60 #include <sys/dmu_traverse.h>
61 #include <sys/dnode.h>
62 #include <sys/dsl_dataset.h>
63 #include <sys/dsl_prop.h>
64 #include <sys/dkio.h>
65 #include <sys/efi_partition.h>
66 #include <sys/byteorder.h>
67 #include <sys/pathname.h>
68 #include <sys/ddi.h>
69 #include <sys/sunddi.h>
70 #include <sys/crc32.h>
71 #include <sys/dirent.h>
72 #include <sys/policy.h>
73 #include <sys/fs/zfs.h>
74 #include <sys/zfs_ioctl.h>
75 #include <sys/mkdev.h>
76 #include <sys/zil.h>
77 #include <sys/refcount.h>
78 #include <sys/zfs_znode.h>
79 #include <sys/zfs_rlock.h>
80 #include <sys/vdev_disk.h>
81 #include <sys/vdev_impl.h>
82 #include <sys/vdev_raidz.h>
83 #include <sys/zvol.h>
84 #include <sys/dumphdr.h>
85 #include <sys/zil_impl.h>
86 #include <sys/dbuf.h>
87 #include <sys/dmu_tx.h>
88 #include <sys/zfeature.h>
89 #include <sys/zio_checksum.h>
90 
91 #include "zfs_namecheck.h"
92 
93 void *zfsdev_state;
94 static char *zvol_tag = "zvol_tag";
95 
96 #define	ZVOL_DUMPSIZE		"dumpsize"
97 
98 /*
99  * This lock protects the zfsdev_state structure from being modified
100  * while it's being used, e.g. an open that comes in before a create
101  * finishes.  It also protects temporary opens of the dataset so that,
102  * e.g., an open doesn't get a spurious EBUSY.
103  */
104 kmutex_t zfsdev_state_lock;
105 static uint32_t zvol_minors;
106 
107 typedef struct zvol_extent {
108 	list_node_t	ze_node;
109 	dva_t		ze_dva;		/* dva associated with this extent */
110 	uint64_t	ze_nblks;	/* number of blocks in extent */
111 } zvol_extent_t;
112 
113 /*
114  * The in-core state of each volume.
115  */
116 typedef struct zvol_state {
117 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
118 	uint64_t	zv_volsize;	/* amount of space we advertise */
119 	uint64_t	zv_volblocksize; /* volume block size */
120 	minor_t		zv_minor;	/* minor number */
121 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
122 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
123 	objset_t	*zv_objset;	/* objset handle */
124 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
125 	uint32_t	zv_total_opens;	/* total open count */
126 	zilog_t		*zv_zilog;	/* ZIL handle */
127 	list_t		zv_extents;	/* List of extents for dump */
128 	znode_t		zv_znode;	/* for range locking */
129 	dmu_buf_t	*zv_dbuf;	/* bonus handle */
130 } zvol_state_t;
131 
132 /*
133  * zvol specific flags
134  */
135 #define	ZVOL_RDONLY	0x1
136 #define	ZVOL_DUMPIFIED	0x2
137 #define	ZVOL_EXCL	0x4
138 #define	ZVOL_WCE	0x8
139 
140 /*
141  * zvol maximum transfer in one DMU tx.
142  */
143 int zvol_maxphys = DMU_MAX_ACCESS/2;
144 
145 /*
146  * Toggle unmap functionality.
147  */
148 boolean_t zvol_unmap_enabled = B_TRUE;
149 
150 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
151     nvlist_t *, nvlist_t *);
152 static int zvol_remove_zv(zvol_state_t *);
153 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
154 static int zvol_dumpify(zvol_state_t *zv);
155 static int zvol_dump_fini(zvol_state_t *zv);
156 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
157 
158 static void
159 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
160 {
161 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
162 
163 	zv->zv_volsize = volsize;
164 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
165 	    "Size", volsize) == DDI_SUCCESS);
166 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
167 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
168 
169 	/* Notify specfs to invalidate the cached size */
170 	spec_size_invalidate(dev, VBLK);
171 	spec_size_invalidate(dev, VCHR);
172 }
173 
174 int
175 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
176 {
177 	if (volsize == 0)
178 		return (SET_ERROR(EINVAL));
179 
180 	if (volsize % blocksize != 0)
181 		return (SET_ERROR(EINVAL));
182 
183 #ifdef _ILP32
184 	if (volsize - 1 > SPEC_MAXOFFSET_T)
185 		return (SET_ERROR(EOVERFLOW));
186 #endif
187 	return (0);
188 }
189 
190 int
191 zvol_check_volblocksize(uint64_t volblocksize)
192 {
193 	if (volblocksize < SPA_MINBLOCKSIZE ||
194 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
195 	    !ISP2(volblocksize))
196 		return (SET_ERROR(EDOM));
197 
198 	return (0);
199 }
200 
201 int
202 zvol_get_stats(objset_t *os, nvlist_t *nv)
203 {
204 	int error;
205 	dmu_object_info_t doi;
206 	uint64_t val;
207 
208 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
209 	if (error)
210 		return (error);
211 
212 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
213 
214 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
215 
216 	if (error == 0) {
217 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
218 		    doi.doi_data_block_size);
219 	}
220 
221 	return (error);
222 }
223 
224 static zvol_state_t *
225 zvol_minor_lookup(const char *name)
226 {
227 	minor_t minor;
228 	zvol_state_t *zv;
229 
230 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
231 
232 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
233 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
234 		if (zv == NULL)
235 			continue;
236 		if (strcmp(zv->zv_name, name) == 0)
237 			return (zv);
238 	}
239 
240 	return (NULL);
241 }
242 
243 /* extent mapping arg */
244 struct maparg {
245 	zvol_state_t	*ma_zv;
246 	uint64_t	ma_blks;
247 };
248 
249 /*ARGSUSED*/
250 static int
251 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
252     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
253 {
254 	struct maparg *ma = arg;
255 	zvol_extent_t *ze;
256 	int bs = ma->ma_zv->zv_volblocksize;
257 
258 	if (BP_IS_HOLE(bp) ||
259 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
260 		return (0);
261 
262 	VERIFY(!BP_IS_EMBEDDED(bp));
263 
264 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
265 	ma->ma_blks++;
266 
267 	/* Abort immediately if we have encountered gang blocks */
268 	if (BP_IS_GANG(bp))
269 		return (SET_ERROR(EFRAGS));
270 
271 	/*
272 	 * See if the block is at the end of the previous extent.
273 	 */
274 	ze = list_tail(&ma->ma_zv->zv_extents);
275 	if (ze &&
276 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
277 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
278 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
279 		ze->ze_nblks++;
280 		return (0);
281 	}
282 
283 	dprintf_bp(bp, "%s", "next blkptr:");
284 
285 	/* start a new extent */
286 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
287 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
288 	ze->ze_nblks = 1;
289 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
290 	return (0);
291 }
292 
293 static void
294 zvol_free_extents(zvol_state_t *zv)
295 {
296 	zvol_extent_t *ze;
297 
298 	while (ze = list_head(&zv->zv_extents)) {
299 		list_remove(&zv->zv_extents, ze);
300 		kmem_free(ze, sizeof (zvol_extent_t));
301 	}
302 }
303 
304 static int
305 zvol_get_lbas(zvol_state_t *zv)
306 {
307 	objset_t *os = zv->zv_objset;
308 	struct maparg	ma;
309 	int		err;
310 
311 	ma.ma_zv = zv;
312 	ma.ma_blks = 0;
313 	zvol_free_extents(zv);
314 
315 	/* commit any in-flight changes before traversing the dataset */
316 	txg_wait_synced(dmu_objset_pool(os), 0);
317 	err = traverse_dataset(dmu_objset_ds(os), 0,
318 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
319 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
320 		zvol_free_extents(zv);
321 		return (err ? err : EIO);
322 	}
323 
324 	return (0);
325 }
326 
327 /* ARGSUSED */
328 void
329 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
330 {
331 	zfs_creat_t *zct = arg;
332 	nvlist_t *nvprops = zct->zct_props;
333 	int error;
334 	uint64_t volblocksize, volsize;
335 
336 	VERIFY(nvlist_lookup_uint64(nvprops,
337 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
338 	if (nvlist_lookup_uint64(nvprops,
339 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
340 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
341 
342 	/*
343 	 * These properties must be removed from the list so the generic
344 	 * property setting step won't apply to them.
345 	 */
346 	VERIFY(nvlist_remove_all(nvprops,
347 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
348 	(void) nvlist_remove_all(nvprops,
349 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
350 
351 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
352 	    DMU_OT_NONE, 0, tx);
353 	ASSERT(error == 0);
354 
355 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
356 	    DMU_OT_NONE, 0, tx);
357 	ASSERT(error == 0);
358 
359 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
360 	ASSERT(error == 0);
361 }
362 
363 /*
364  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
365  * implement DKIOCFREE/free-long-range.
366  */
367 static int
368 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
369 {
370 	uint64_t offset, length;
371 
372 	if (byteswap)
373 		byteswap_uint64_array(lr, sizeof (*lr));
374 
375 	offset = lr->lr_offset;
376 	length = lr->lr_length;
377 
378 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
379 }
380 
381 /*
382  * Replay a TX_WRITE ZIL transaction that didn't get committed
383  * after a system failure
384  */
385 static int
386 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
387 {
388 	objset_t *os = zv->zv_objset;
389 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
390 	uint64_t offset, length;
391 	dmu_tx_t *tx;
392 	int error;
393 
394 	if (byteswap)
395 		byteswap_uint64_array(lr, sizeof (*lr));
396 
397 	offset = lr->lr_offset;
398 	length = lr->lr_length;
399 
400 	/* If it's a dmu_sync() block, write the whole block */
401 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
402 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
403 		if (length < blocksize) {
404 			offset -= offset % blocksize;
405 			length = blocksize;
406 		}
407 	}
408 
409 	tx = dmu_tx_create(os);
410 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
411 	error = dmu_tx_assign(tx, TXG_WAIT);
412 	if (error) {
413 		dmu_tx_abort(tx);
414 	} else {
415 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
416 		dmu_tx_commit(tx);
417 	}
418 
419 	return (error);
420 }
421 
422 /* ARGSUSED */
423 static int
424 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
425 {
426 	return (SET_ERROR(ENOTSUP));
427 }
428 
429 /*
430  * Callback vectors for replaying records.
431  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
432  */
433 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
434 	zvol_replay_err,	/* 0 no such transaction type */
435 	zvol_replay_err,	/* TX_CREATE */
436 	zvol_replay_err,	/* TX_MKDIR */
437 	zvol_replay_err,	/* TX_MKXATTR */
438 	zvol_replay_err,	/* TX_SYMLINK */
439 	zvol_replay_err,	/* TX_REMOVE */
440 	zvol_replay_err,	/* TX_RMDIR */
441 	zvol_replay_err,	/* TX_LINK */
442 	zvol_replay_err,	/* TX_RENAME */
443 	zvol_replay_write,	/* TX_WRITE */
444 	zvol_replay_truncate,	/* TX_TRUNCATE */
445 	zvol_replay_err,	/* TX_SETATTR */
446 	zvol_replay_err,	/* TX_ACL */
447 	zvol_replay_err,	/* TX_CREATE_ACL */
448 	zvol_replay_err,	/* TX_CREATE_ATTR */
449 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
450 	zvol_replay_err,	/* TX_MKDIR_ACL */
451 	zvol_replay_err,	/* TX_MKDIR_ATTR */
452 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
453 	zvol_replay_err,	/* TX_WRITE2 */
454 };
455 
456 int
457 zvol_name2minor(const char *name, minor_t *minor)
458 {
459 	zvol_state_t *zv;
460 
461 	mutex_enter(&zfsdev_state_lock);
462 	zv = zvol_minor_lookup(name);
463 	if (minor && zv)
464 		*minor = zv->zv_minor;
465 	mutex_exit(&zfsdev_state_lock);
466 	return (zv ? 0 : -1);
467 }
468 
469 /*
470  * Create a minor node (plus a whole lot more) for the specified volume.
471  */
472 int
473 zvol_create_minor(const char *name)
474 {
475 	zfs_soft_state_t *zs;
476 	zvol_state_t *zv;
477 	objset_t *os;
478 	dmu_object_info_t doi;
479 	minor_t minor = 0;
480 	char chrbuf[30], blkbuf[30];
481 	int error;
482 
483 	mutex_enter(&zfsdev_state_lock);
484 
485 	if (zvol_minor_lookup(name) != NULL) {
486 		mutex_exit(&zfsdev_state_lock);
487 		return (SET_ERROR(EEXIST));
488 	}
489 
490 	/* lie and say we're read-only */
491 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
492 
493 	if (error) {
494 		mutex_exit(&zfsdev_state_lock);
495 		return (error);
496 	}
497 
498 	if ((minor = zfsdev_minor_alloc()) == 0) {
499 		dmu_objset_disown(os, FTAG);
500 		mutex_exit(&zfsdev_state_lock);
501 		return (SET_ERROR(ENXIO));
502 	}
503 
504 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
505 		dmu_objset_disown(os, FTAG);
506 		mutex_exit(&zfsdev_state_lock);
507 		return (SET_ERROR(EAGAIN));
508 	}
509 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
510 	    (char *)name);
511 
512 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
513 
514 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
515 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
516 		ddi_soft_state_free(zfsdev_state, minor);
517 		dmu_objset_disown(os, FTAG);
518 		mutex_exit(&zfsdev_state_lock);
519 		return (SET_ERROR(EAGAIN));
520 	}
521 
522 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
523 
524 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
525 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
526 		ddi_remove_minor_node(zfs_dip, chrbuf);
527 		ddi_soft_state_free(zfsdev_state, minor);
528 		dmu_objset_disown(os, FTAG);
529 		mutex_exit(&zfsdev_state_lock);
530 		return (SET_ERROR(EAGAIN));
531 	}
532 
533 	zs = ddi_get_soft_state(zfsdev_state, minor);
534 	zs->zss_type = ZSST_ZVOL;
535 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
536 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
537 	zv->zv_min_bs = DEV_BSHIFT;
538 	zv->zv_minor = minor;
539 	zv->zv_objset = os;
540 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
541 		zv->zv_flags |= ZVOL_RDONLY;
542 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
543 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
544 	    sizeof (rl_t), offsetof(rl_t, r_node));
545 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
546 	    offsetof(zvol_extent_t, ze_node));
547 	/* get and cache the blocksize */
548 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
549 	ASSERT(error == 0);
550 	zv->zv_volblocksize = doi.doi_data_block_size;
551 
552 	if (spa_writeable(dmu_objset_spa(os))) {
553 		if (zil_replay_disable)
554 			zil_destroy(dmu_objset_zil(os), B_FALSE);
555 		else
556 			zil_replay(os, zv, zvol_replay_vector);
557 	}
558 	dmu_objset_disown(os, FTAG);
559 	zv->zv_objset = NULL;
560 
561 	zvol_minors++;
562 
563 	mutex_exit(&zfsdev_state_lock);
564 
565 	return (0);
566 }
567 
568 /*
569  * Remove minor node for the specified volume.
570  */
571 static int
572 zvol_remove_zv(zvol_state_t *zv)
573 {
574 	char nmbuf[20];
575 	minor_t minor = zv->zv_minor;
576 
577 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
578 	if (zv->zv_total_opens != 0)
579 		return (SET_ERROR(EBUSY));
580 
581 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
582 	ddi_remove_minor_node(zfs_dip, nmbuf);
583 
584 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
585 	ddi_remove_minor_node(zfs_dip, nmbuf);
586 
587 	avl_destroy(&zv->zv_znode.z_range_avl);
588 	mutex_destroy(&zv->zv_znode.z_range_lock);
589 
590 	kmem_free(zv, sizeof (zvol_state_t));
591 
592 	ddi_soft_state_free(zfsdev_state, minor);
593 
594 	zvol_minors--;
595 	return (0);
596 }
597 
598 int
599 zvol_remove_minor(const char *name)
600 {
601 	zvol_state_t *zv;
602 	int rc;
603 
604 	mutex_enter(&zfsdev_state_lock);
605 	if ((zv = zvol_minor_lookup(name)) == NULL) {
606 		mutex_exit(&zfsdev_state_lock);
607 		return (SET_ERROR(ENXIO));
608 	}
609 	rc = zvol_remove_zv(zv);
610 	mutex_exit(&zfsdev_state_lock);
611 	return (rc);
612 }
613 
614 int
615 zvol_first_open(zvol_state_t *zv)
616 {
617 	objset_t *os;
618 	uint64_t volsize;
619 	int error;
620 	uint64_t readonly;
621 
622 	/* lie and say we're read-only */
623 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
624 	    zvol_tag, &os);
625 	if (error)
626 		return (error);
627 
628 	zv->zv_objset = os;
629 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
630 	if (error) {
631 		ASSERT(error == 0);
632 		dmu_objset_disown(os, zvol_tag);
633 		return (error);
634 	}
635 
636 	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
637 	if (error) {
638 		dmu_objset_disown(os, zvol_tag);
639 		return (error);
640 	}
641 
642 	zvol_size_changed(zv, volsize);
643 	zv->zv_zilog = zil_open(os, zvol_get_data);
644 
645 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
646 	    NULL) == 0);
647 	if (readonly || dmu_objset_is_snapshot(os) ||
648 	    !spa_writeable(dmu_objset_spa(os)))
649 		zv->zv_flags |= ZVOL_RDONLY;
650 	else
651 		zv->zv_flags &= ~ZVOL_RDONLY;
652 	return (error);
653 }
654 
655 void
656 zvol_last_close(zvol_state_t *zv)
657 {
658 	zil_close(zv->zv_zilog);
659 	zv->zv_zilog = NULL;
660 
661 	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
662 	zv->zv_dbuf = NULL;
663 
664 	/*
665 	 * Evict cached data
666 	 */
667 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
668 	    !(zv->zv_flags & ZVOL_RDONLY))
669 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
670 	dmu_objset_evict_dbufs(zv->zv_objset);
671 
672 	dmu_objset_disown(zv->zv_objset, zvol_tag);
673 	zv->zv_objset = NULL;
674 }
675 
676 int
677 zvol_prealloc(zvol_state_t *zv)
678 {
679 	objset_t *os = zv->zv_objset;
680 	dmu_tx_t *tx;
681 	uint64_t refd, avail, usedobjs, availobjs;
682 	uint64_t resid = zv->zv_volsize;
683 	uint64_t off = 0;
684 
685 	/* Check the space usage before attempting to allocate the space */
686 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
687 	if (avail < zv->zv_volsize)
688 		return (SET_ERROR(ENOSPC));
689 
690 	/* Free old extents if they exist */
691 	zvol_free_extents(zv);
692 
693 	while (resid != 0) {
694 		int error;
695 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
696 
697 		tx = dmu_tx_create(os);
698 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
699 		error = dmu_tx_assign(tx, TXG_WAIT);
700 		if (error) {
701 			dmu_tx_abort(tx);
702 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
703 			return (error);
704 		}
705 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
706 		dmu_tx_commit(tx);
707 		off += bytes;
708 		resid -= bytes;
709 	}
710 	txg_wait_synced(dmu_objset_pool(os), 0);
711 
712 	return (0);
713 }
714 
715 static int
716 zvol_update_volsize(objset_t *os, uint64_t volsize)
717 {
718 	dmu_tx_t *tx;
719 	int error;
720 
721 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
722 
723 	tx = dmu_tx_create(os);
724 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
725 	dmu_tx_mark_netfree(tx);
726 	error = dmu_tx_assign(tx, TXG_WAIT);
727 	if (error) {
728 		dmu_tx_abort(tx);
729 		return (error);
730 	}
731 
732 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
733 	    &volsize, tx);
734 	dmu_tx_commit(tx);
735 
736 	if (error == 0)
737 		error = dmu_free_long_range(os,
738 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
739 	return (error);
740 }
741 
742 void
743 zvol_remove_minors(const char *name)
744 {
745 	zvol_state_t *zv;
746 	char *namebuf;
747 	minor_t minor;
748 
749 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
750 	(void) strncpy(namebuf, name, strlen(name));
751 	(void) strcat(namebuf, "/");
752 	mutex_enter(&zfsdev_state_lock);
753 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
754 
755 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
756 		if (zv == NULL)
757 			continue;
758 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
759 			(void) zvol_remove_zv(zv);
760 	}
761 	kmem_free(namebuf, strlen(name) + 2);
762 
763 	mutex_exit(&zfsdev_state_lock);
764 }
765 
766 static int
767 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
768 {
769 	uint64_t old_volsize = 0ULL;
770 	int error = 0;
771 
772 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
773 
774 	/*
775 	 * Reinitialize the dump area to the new size. If we
776 	 * failed to resize the dump area then restore it back to
777 	 * its original size.  We must set the new volsize prior
778 	 * to calling dumpvp_resize() to ensure that the devices'
779 	 * size(9P) is not visible by the dump subsystem.
780 	 */
781 	old_volsize = zv->zv_volsize;
782 	zvol_size_changed(zv, volsize);
783 
784 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
785 		if ((error = zvol_dumpify(zv)) != 0 ||
786 		    (error = dumpvp_resize()) != 0) {
787 			int dumpify_error;
788 
789 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
790 			zvol_size_changed(zv, old_volsize);
791 			dumpify_error = zvol_dumpify(zv);
792 			error = dumpify_error ? dumpify_error : error;
793 		}
794 	}
795 
796 	/*
797 	 * Generate a LUN expansion event.
798 	 */
799 	if (error == 0) {
800 		sysevent_id_t eid;
801 		nvlist_t *attr;
802 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
803 
804 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
805 		    zv->zv_minor);
806 
807 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
808 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
809 
810 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
811 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
812 
813 		nvlist_free(attr);
814 		kmem_free(physpath, MAXPATHLEN);
815 	}
816 	return (error);
817 }
818 
819 int
820 zvol_set_volsize(const char *name, uint64_t volsize)
821 {
822 	zvol_state_t *zv = NULL;
823 	objset_t *os;
824 	int error;
825 	dmu_object_info_t doi;
826 	uint64_t readonly;
827 	boolean_t owned = B_FALSE;
828 
829 	error = dsl_prop_get_integer(name,
830 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
831 	if (error != 0)
832 		return (error);
833 	if (readonly)
834 		return (SET_ERROR(EROFS));
835 
836 	mutex_enter(&zfsdev_state_lock);
837 	zv = zvol_minor_lookup(name);
838 
839 	if (zv == NULL || zv->zv_objset == NULL) {
840 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
841 		    FTAG, &os)) != 0) {
842 			mutex_exit(&zfsdev_state_lock);
843 			return (error);
844 		}
845 		owned = B_TRUE;
846 		if (zv != NULL)
847 			zv->zv_objset = os;
848 	} else {
849 		os = zv->zv_objset;
850 	}
851 
852 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
853 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
854 		goto out;
855 
856 	error = zvol_update_volsize(os, volsize);
857 
858 	if (error == 0 && zv != NULL)
859 		error = zvol_update_live_volsize(zv, volsize);
860 out:
861 	if (owned) {
862 		dmu_objset_disown(os, FTAG);
863 		if (zv != NULL)
864 			zv->zv_objset = NULL;
865 	}
866 	mutex_exit(&zfsdev_state_lock);
867 	return (error);
868 }
869 
870 /*ARGSUSED*/
871 int
872 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
873 {
874 	zvol_state_t *zv;
875 	int err = 0;
876 
877 	mutex_enter(&zfsdev_state_lock);
878 
879 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
880 	if (zv == NULL) {
881 		mutex_exit(&zfsdev_state_lock);
882 		return (SET_ERROR(ENXIO));
883 	}
884 
885 	if (zv->zv_total_opens == 0)
886 		err = zvol_first_open(zv);
887 	if (err) {
888 		mutex_exit(&zfsdev_state_lock);
889 		return (err);
890 	}
891 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
892 		err = SET_ERROR(EROFS);
893 		goto out;
894 	}
895 	if (zv->zv_flags & ZVOL_EXCL) {
896 		err = SET_ERROR(EBUSY);
897 		goto out;
898 	}
899 	if (flag & FEXCL) {
900 		if (zv->zv_total_opens != 0) {
901 			err = SET_ERROR(EBUSY);
902 			goto out;
903 		}
904 		zv->zv_flags |= ZVOL_EXCL;
905 	}
906 
907 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
908 		zv->zv_open_count[otyp]++;
909 		zv->zv_total_opens++;
910 	}
911 	mutex_exit(&zfsdev_state_lock);
912 
913 	return (err);
914 out:
915 	if (zv->zv_total_opens == 0)
916 		zvol_last_close(zv);
917 	mutex_exit(&zfsdev_state_lock);
918 	return (err);
919 }
920 
921 /*ARGSUSED*/
922 int
923 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
924 {
925 	minor_t minor = getminor(dev);
926 	zvol_state_t *zv;
927 	int error = 0;
928 
929 	mutex_enter(&zfsdev_state_lock);
930 
931 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
932 	if (zv == NULL) {
933 		mutex_exit(&zfsdev_state_lock);
934 		return (SET_ERROR(ENXIO));
935 	}
936 
937 	if (zv->zv_flags & ZVOL_EXCL) {
938 		ASSERT(zv->zv_total_opens == 1);
939 		zv->zv_flags &= ~ZVOL_EXCL;
940 	}
941 
942 	/*
943 	 * If the open count is zero, this is a spurious close.
944 	 * That indicates a bug in the kernel / DDI framework.
945 	 */
946 	ASSERT(zv->zv_open_count[otyp] != 0);
947 	ASSERT(zv->zv_total_opens != 0);
948 
949 	/*
950 	 * You may get multiple opens, but only one close.
951 	 */
952 	zv->zv_open_count[otyp]--;
953 	zv->zv_total_opens--;
954 
955 	if (zv->zv_total_opens == 0)
956 		zvol_last_close(zv);
957 
958 	mutex_exit(&zfsdev_state_lock);
959 	return (error);
960 }
961 
962 static void
963 zvol_get_done(zgd_t *zgd, int error)
964 {
965 	if (zgd->zgd_db)
966 		dmu_buf_rele(zgd->zgd_db, zgd);
967 
968 	zfs_range_unlock(zgd->zgd_rl);
969 
970 	if (error == 0 && zgd->zgd_bp)
971 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
972 
973 	kmem_free(zgd, sizeof (zgd_t));
974 }
975 
976 /*
977  * Get data to generate a TX_WRITE intent log record.
978  */
979 static int
980 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
981 {
982 	zvol_state_t *zv = arg;
983 	objset_t *os = zv->zv_objset;
984 	uint64_t object = ZVOL_OBJ;
985 	uint64_t offset = lr->lr_offset;
986 	uint64_t size = lr->lr_length;	/* length of user data */
987 	blkptr_t *bp = &lr->lr_blkptr;
988 	dmu_buf_t *db;
989 	zgd_t *zgd;
990 	int error;
991 
992 	ASSERT(zio != NULL);
993 	ASSERT(size != 0);
994 
995 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
996 	zgd->zgd_zilog = zv->zv_zilog;
997 	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
998 
999 	/*
1000 	 * Write records come in two flavors: immediate and indirect.
1001 	 * For small writes it's cheaper to store the data with the
1002 	 * log record (immediate); for large writes it's cheaper to
1003 	 * sync the data and get a pointer to it (indirect) so that
1004 	 * we don't have to write the data twice.
1005 	 */
1006 	if (buf != NULL) {	/* immediate write */
1007 		error = dmu_read(os, object, offset, size, buf,
1008 		    DMU_READ_NO_PREFETCH);
1009 	} else {
1010 		size = zv->zv_volblocksize;
1011 		offset = P2ALIGN(offset, size);
1012 		error = dmu_buf_hold(os, object, offset, zgd, &db,
1013 		    DMU_READ_NO_PREFETCH);
1014 		if (error == 0) {
1015 			blkptr_t *obp = dmu_buf_get_blkptr(db);
1016 			if (obp) {
1017 				ASSERT(BP_IS_HOLE(bp));
1018 				*bp = *obp;
1019 			}
1020 
1021 			zgd->zgd_db = db;
1022 			zgd->zgd_bp = bp;
1023 
1024 			ASSERT(db->db_offset == offset);
1025 			ASSERT(db->db_size == size);
1026 
1027 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1028 			    zvol_get_done, zgd);
1029 
1030 			if (error == 0)
1031 				return (0);
1032 		}
1033 	}
1034 
1035 	zvol_get_done(zgd, error);
1036 
1037 	return (error);
1038 }
1039 
1040 /*
1041  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1042  *
1043  * We store data in the log buffers if it's small enough.
1044  * Otherwise we will later flush the data out via dmu_sync().
1045  */
1046 ssize_t zvol_immediate_write_sz = 32768;
1047 
1048 static void
1049 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1050     boolean_t sync)
1051 {
1052 	uint32_t blocksize = zv->zv_volblocksize;
1053 	zilog_t *zilog = zv->zv_zilog;
1054 	boolean_t slogging;
1055 	ssize_t immediate_write_sz;
1056 
1057 	if (zil_replaying(zilog, tx))
1058 		return;
1059 
1060 	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1061 	    ? 0 : zvol_immediate_write_sz;
1062 
1063 	slogging = spa_has_slogs(zilog->zl_spa) &&
1064 	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1065 
1066 	while (resid) {
1067 		itx_t *itx;
1068 		lr_write_t *lr;
1069 		ssize_t len;
1070 		itx_wr_state_t write_state;
1071 
1072 		/*
1073 		 * Unlike zfs_log_write() we can be called with
1074 		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1075 		 */
1076 		if (blocksize > immediate_write_sz && !slogging &&
1077 		    resid >= blocksize && off % blocksize == 0) {
1078 			write_state = WR_INDIRECT; /* uses dmu_sync */
1079 			len = blocksize;
1080 		} else if (sync) {
1081 			write_state = WR_COPIED;
1082 			len = MIN(ZIL_MAX_LOG_DATA, resid);
1083 		} else {
1084 			write_state = WR_NEED_COPY;
1085 			len = MIN(ZIL_MAX_LOG_DATA, resid);
1086 		}
1087 
1088 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1089 		    (write_state == WR_COPIED ? len : 0));
1090 		lr = (lr_write_t *)&itx->itx_lr;
1091 		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1092 		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1093 			zil_itx_destroy(itx);
1094 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1095 			lr = (lr_write_t *)&itx->itx_lr;
1096 			write_state = WR_NEED_COPY;
1097 		}
1098 
1099 		itx->itx_wr_state = write_state;
1100 		if (write_state == WR_NEED_COPY)
1101 			itx->itx_sod += len;
1102 		lr->lr_foid = ZVOL_OBJ;
1103 		lr->lr_offset = off;
1104 		lr->lr_length = len;
1105 		lr->lr_blkoff = 0;
1106 		BP_ZERO(&lr->lr_blkptr);
1107 
1108 		itx->itx_private = zv;
1109 		itx->itx_sync = sync;
1110 
1111 		zil_itx_assign(zilog, itx, tx);
1112 
1113 		off += len;
1114 		resid -= len;
1115 	}
1116 }
1117 
1118 static int
1119 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1120     uint64_t size, boolean_t doread, boolean_t isdump)
1121 {
1122 	vdev_disk_t *dvd;
1123 	int c;
1124 	int numerrors = 0;
1125 
1126 	if (vd->vdev_ops == &vdev_mirror_ops ||
1127 	    vd->vdev_ops == &vdev_replacing_ops ||
1128 	    vd->vdev_ops == &vdev_spare_ops) {
1129 		for (c = 0; c < vd->vdev_children; c++) {
1130 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1131 			    addr, offset, origoffset, size, doread, isdump);
1132 			if (err != 0) {
1133 				numerrors++;
1134 			} else if (doread) {
1135 				break;
1136 			}
1137 		}
1138 	}
1139 
1140 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1141 		return (numerrors < vd->vdev_children ? 0 : EIO);
1142 
1143 	if (doread && !vdev_readable(vd))
1144 		return (SET_ERROR(EIO));
1145 	else if (!doread && !vdev_writeable(vd))
1146 		return (SET_ERROR(EIO));
1147 
1148 	if (vd->vdev_ops == &vdev_raidz_ops) {
1149 		return (vdev_raidz_physio(vd,
1150 		    addr, size, offset, origoffset, doread, isdump));
1151 	}
1152 
1153 	offset += VDEV_LABEL_START_SIZE;
1154 
1155 	if (ddi_in_panic() || isdump) {
1156 		ASSERT(!doread);
1157 		if (doread)
1158 			return (SET_ERROR(EIO));
1159 		dvd = vd->vdev_tsd;
1160 		ASSERT3P(dvd, !=, NULL);
1161 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1162 		    lbtodb(size)));
1163 	} else {
1164 		dvd = vd->vdev_tsd;
1165 		ASSERT3P(dvd, !=, NULL);
1166 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1167 		    offset, doread ? B_READ : B_WRITE));
1168 	}
1169 }
1170 
1171 static int
1172 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1173     boolean_t doread, boolean_t isdump)
1174 {
1175 	vdev_t *vd;
1176 	int error;
1177 	zvol_extent_t *ze;
1178 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1179 
1180 	/* Must be sector aligned, and not stradle a block boundary. */
1181 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1182 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1183 		return (SET_ERROR(EINVAL));
1184 	}
1185 	ASSERT(size <= zv->zv_volblocksize);
1186 
1187 	/* Locate the extent this belongs to */
1188 	ze = list_head(&zv->zv_extents);
1189 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1190 		offset -= ze->ze_nblks * zv->zv_volblocksize;
1191 		ze = list_next(&zv->zv_extents, ze);
1192 	}
1193 
1194 	if (ze == NULL)
1195 		return (SET_ERROR(EINVAL));
1196 
1197 	if (!ddi_in_panic())
1198 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1199 
1200 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1201 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1202 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1203 	    size, doread, isdump);
1204 
1205 	if (!ddi_in_panic())
1206 		spa_config_exit(spa, SCL_STATE, FTAG);
1207 
1208 	return (error);
1209 }
1210 
1211 int
1212 zvol_strategy(buf_t *bp)
1213 {
1214 	zfs_soft_state_t *zs = NULL;
1215 	zvol_state_t *zv;
1216 	uint64_t off, volsize;
1217 	size_t resid;
1218 	char *addr;
1219 	objset_t *os;
1220 	rl_t *rl;
1221 	int error = 0;
1222 	boolean_t doread = bp->b_flags & B_READ;
1223 	boolean_t is_dumpified;
1224 	boolean_t sync;
1225 
1226 	if (getminor(bp->b_edev) == 0) {
1227 		error = SET_ERROR(EINVAL);
1228 	} else {
1229 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1230 		if (zs == NULL)
1231 			error = SET_ERROR(ENXIO);
1232 		else if (zs->zss_type != ZSST_ZVOL)
1233 			error = SET_ERROR(EINVAL);
1234 	}
1235 
1236 	if (error) {
1237 		bioerror(bp, error);
1238 		biodone(bp);
1239 		return (0);
1240 	}
1241 
1242 	zv = zs->zss_data;
1243 
1244 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1245 		bioerror(bp, EROFS);
1246 		biodone(bp);
1247 		return (0);
1248 	}
1249 
1250 	off = ldbtob(bp->b_blkno);
1251 	volsize = zv->zv_volsize;
1252 
1253 	os = zv->zv_objset;
1254 	ASSERT(os != NULL);
1255 
1256 	bp_mapin(bp);
1257 	addr = bp->b_un.b_addr;
1258 	resid = bp->b_bcount;
1259 
1260 	if (resid > 0 && (off < 0 || off >= volsize)) {
1261 		bioerror(bp, EIO);
1262 		biodone(bp);
1263 		return (0);
1264 	}
1265 
1266 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1267 	sync = ((!(bp->b_flags & B_ASYNC) &&
1268 	    !(zv->zv_flags & ZVOL_WCE)) ||
1269 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1270 	    !doread && !is_dumpified;
1271 
1272 	/*
1273 	 * There must be no buffer changes when doing a dmu_sync() because
1274 	 * we can't change the data whilst calculating the checksum.
1275 	 */
1276 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1277 	    doread ? RL_READER : RL_WRITER);
1278 
1279 	while (resid != 0 && off < volsize) {
1280 		size_t size = MIN(resid, zvol_maxphys);
1281 		if (is_dumpified) {
1282 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1283 			error = zvol_dumpio(zv, addr, off, size,
1284 			    doread, B_FALSE);
1285 		} else if (doread) {
1286 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1287 			    DMU_READ_PREFETCH);
1288 		} else {
1289 			dmu_tx_t *tx = dmu_tx_create(os);
1290 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1291 			error = dmu_tx_assign(tx, TXG_WAIT);
1292 			if (error) {
1293 				dmu_tx_abort(tx);
1294 			} else {
1295 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1296 				zvol_log_write(zv, tx, off, size, sync);
1297 				dmu_tx_commit(tx);
1298 			}
1299 		}
1300 		if (error) {
1301 			/* convert checksum errors into IO errors */
1302 			if (error == ECKSUM)
1303 				error = SET_ERROR(EIO);
1304 			break;
1305 		}
1306 		off += size;
1307 		addr += size;
1308 		resid -= size;
1309 	}
1310 	zfs_range_unlock(rl);
1311 
1312 	if ((bp->b_resid = resid) == bp->b_bcount)
1313 		bioerror(bp, off > volsize ? EINVAL : error);
1314 
1315 	if (sync)
1316 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1317 	biodone(bp);
1318 
1319 	return (0);
1320 }
1321 
1322 /*
1323  * Set the buffer count to the zvol maximum transfer.
1324  * Using our own routine instead of the default minphys()
1325  * means that for larger writes we write bigger buffers on X86
1326  * (128K instead of 56K) and flush the disk write cache less often
1327  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1328  * 56K on X86 and 128K on sparc).
1329  */
1330 void
1331 zvol_minphys(struct buf *bp)
1332 {
1333 	if (bp->b_bcount > zvol_maxphys)
1334 		bp->b_bcount = zvol_maxphys;
1335 }
1336 
1337 int
1338 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1339 {
1340 	minor_t minor = getminor(dev);
1341 	zvol_state_t *zv;
1342 	int error = 0;
1343 	uint64_t size;
1344 	uint64_t boff;
1345 	uint64_t resid;
1346 
1347 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1348 	if (zv == NULL)
1349 		return (SET_ERROR(ENXIO));
1350 
1351 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1352 		return (SET_ERROR(EINVAL));
1353 
1354 	boff = ldbtob(blkno);
1355 	resid = ldbtob(nblocks);
1356 
1357 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1358 
1359 	while (resid) {
1360 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1361 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1362 		if (error)
1363 			break;
1364 		boff += size;
1365 		addr += size;
1366 		resid -= size;
1367 	}
1368 
1369 	return (error);
1370 }
1371 
1372 /*ARGSUSED*/
1373 int
1374 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1375 {
1376 	minor_t minor = getminor(dev);
1377 	zvol_state_t *zv;
1378 	uint64_t volsize;
1379 	rl_t *rl;
1380 	int error = 0;
1381 
1382 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1383 	if (zv == NULL)
1384 		return (SET_ERROR(ENXIO));
1385 
1386 	volsize = zv->zv_volsize;
1387 	if (uio->uio_resid > 0 &&
1388 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1389 		return (SET_ERROR(EIO));
1390 
1391 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1392 		error = physio(zvol_strategy, NULL, dev, B_READ,
1393 		    zvol_minphys, uio);
1394 		return (error);
1395 	}
1396 
1397 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1398 	    RL_READER);
1399 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1400 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1401 
1402 		/* don't read past the end */
1403 		if (bytes > volsize - uio->uio_loffset)
1404 			bytes = volsize - uio->uio_loffset;
1405 
1406 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1407 		if (error) {
1408 			/* convert checksum errors into IO errors */
1409 			if (error == ECKSUM)
1410 				error = SET_ERROR(EIO);
1411 			break;
1412 		}
1413 	}
1414 	zfs_range_unlock(rl);
1415 	return (error);
1416 }
1417 
1418 /*ARGSUSED*/
1419 int
1420 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1421 {
1422 	minor_t minor = getminor(dev);
1423 	zvol_state_t *zv;
1424 	uint64_t volsize;
1425 	rl_t *rl;
1426 	int error = 0;
1427 	boolean_t sync;
1428 
1429 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1430 	if (zv == NULL)
1431 		return (SET_ERROR(ENXIO));
1432 
1433 	volsize = zv->zv_volsize;
1434 	if (uio->uio_resid > 0 &&
1435 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1436 		return (SET_ERROR(EIO));
1437 
1438 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1439 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1440 		    zvol_minphys, uio);
1441 		return (error);
1442 	}
1443 
1444 	sync = !(zv->zv_flags & ZVOL_WCE) ||
1445 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1446 
1447 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1448 	    RL_WRITER);
1449 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1450 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1451 		uint64_t off = uio->uio_loffset;
1452 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1453 
1454 		if (bytes > volsize - off)	/* don't write past the end */
1455 			bytes = volsize - off;
1456 
1457 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1458 		error = dmu_tx_assign(tx, TXG_WAIT);
1459 		if (error) {
1460 			dmu_tx_abort(tx);
1461 			break;
1462 		}
1463 		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1464 		if (error == 0)
1465 			zvol_log_write(zv, tx, off, bytes, sync);
1466 		dmu_tx_commit(tx);
1467 
1468 		if (error)
1469 			break;
1470 	}
1471 	zfs_range_unlock(rl);
1472 	if (sync)
1473 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1474 	return (error);
1475 }
1476 
1477 int
1478 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1479 {
1480 	struct uuid uuid = EFI_RESERVED;
1481 	efi_gpe_t gpe = { 0 };
1482 	uint32_t crc;
1483 	dk_efi_t efi;
1484 	int length;
1485 	char *ptr;
1486 
1487 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1488 		return (SET_ERROR(EFAULT));
1489 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1490 	length = efi.dki_length;
1491 	/*
1492 	 * Some clients may attempt to request a PMBR for the
1493 	 * zvol.  Currently this interface will return EINVAL to
1494 	 * such requests.  These requests could be supported by
1495 	 * adding a check for lba == 0 and consing up an appropriate
1496 	 * PMBR.
1497 	 */
1498 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1499 		return (SET_ERROR(EINVAL));
1500 
1501 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1502 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1503 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1504 
1505 	if (efi.dki_lba == 1) {
1506 		efi_gpt_t gpt = { 0 };
1507 
1508 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1509 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1510 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1511 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1512 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1513 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1514 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1515 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1516 		gpt.efi_gpt_SizeOfPartitionEntry =
1517 		    LE_32(sizeof (efi_gpe_t));
1518 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1519 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1520 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1521 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1522 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1523 		    flag))
1524 			return (SET_ERROR(EFAULT));
1525 		ptr += sizeof (gpt);
1526 		length -= sizeof (gpt);
1527 	}
1528 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1529 	    length), flag))
1530 		return (SET_ERROR(EFAULT));
1531 	return (0);
1532 }
1533 
1534 /*
1535  * BEGIN entry points to allow external callers access to the volume.
1536  */
1537 /*
1538  * Return the volume parameters needed for access from an external caller.
1539  * These values are invariant as long as the volume is held open.
1540  */
1541 int
1542 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1543     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1544     void **rl_hdl, void **bonus_hdl)
1545 {
1546 	zvol_state_t *zv;
1547 
1548 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1549 	if (zv == NULL)
1550 		return (SET_ERROR(ENXIO));
1551 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1552 		return (SET_ERROR(ENXIO));
1553 
1554 	ASSERT(blksize && max_xfer_len && minor_hdl &&
1555 	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1556 
1557 	*blksize = zv->zv_volblocksize;
1558 	*max_xfer_len = (uint64_t)zvol_maxphys;
1559 	*minor_hdl = zv;
1560 	*objset_hdl = zv->zv_objset;
1561 	*zil_hdl = zv->zv_zilog;
1562 	*rl_hdl = &zv->zv_znode;
1563 	*bonus_hdl = zv->zv_dbuf;
1564 	return (0);
1565 }
1566 
1567 /*
1568  * Return the current volume size to an external caller.
1569  * The size can change while the volume is open.
1570  */
1571 uint64_t
1572 zvol_get_volume_size(void *minor_hdl)
1573 {
1574 	zvol_state_t *zv = minor_hdl;
1575 
1576 	return (zv->zv_volsize);
1577 }
1578 
1579 /*
1580  * Return the current WCE setting to an external caller.
1581  * The WCE setting can change while the volume is open.
1582  */
1583 int
1584 zvol_get_volume_wce(void *minor_hdl)
1585 {
1586 	zvol_state_t *zv = minor_hdl;
1587 
1588 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1589 }
1590 
1591 /*
1592  * Entry point for external callers to zvol_log_write
1593  */
1594 void
1595 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1596     boolean_t sync)
1597 {
1598 	zvol_state_t *zv = minor_hdl;
1599 
1600 	zvol_log_write(zv, tx, off, resid, sync);
1601 }
1602 /*
1603  * END entry points to allow external callers access to the volume.
1604  */
1605 
1606 /*
1607  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1608  */
1609 static void
1610 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1611     boolean_t sync)
1612 {
1613 	itx_t *itx;
1614 	lr_truncate_t *lr;
1615 	zilog_t *zilog = zv->zv_zilog;
1616 
1617 	if (zil_replaying(zilog, tx))
1618 		return;
1619 
1620 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1621 	lr = (lr_truncate_t *)&itx->itx_lr;
1622 	lr->lr_foid = ZVOL_OBJ;
1623 	lr->lr_offset = off;
1624 	lr->lr_length = len;
1625 
1626 	itx->itx_sync = sync;
1627 	zil_itx_assign(zilog, itx, tx);
1628 }
1629 
1630 /*
1631  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1632  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1633  */
1634 /*ARGSUSED*/
1635 int
1636 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1637 {
1638 	zvol_state_t *zv;
1639 	struct dk_callback *dkc;
1640 	int error = 0;
1641 	rl_t *rl;
1642 
1643 	mutex_enter(&zfsdev_state_lock);
1644 
1645 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1646 
1647 	if (zv == NULL) {
1648 		mutex_exit(&zfsdev_state_lock);
1649 		return (SET_ERROR(ENXIO));
1650 	}
1651 	ASSERT(zv->zv_total_opens > 0);
1652 
1653 	switch (cmd) {
1654 
1655 	case DKIOCINFO:
1656 	{
1657 		struct dk_cinfo dki;
1658 
1659 		bzero(&dki, sizeof (dki));
1660 		(void) strcpy(dki.dki_cname, "zvol");
1661 		(void) strcpy(dki.dki_dname, "zvol");
1662 		dki.dki_ctype = DKC_UNKNOWN;
1663 		dki.dki_unit = getminor(dev);
1664 		dki.dki_maxtransfer =
1665 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1666 		mutex_exit(&zfsdev_state_lock);
1667 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1668 			error = SET_ERROR(EFAULT);
1669 		return (error);
1670 	}
1671 
1672 	case DKIOCGMEDIAINFO:
1673 	{
1674 		struct dk_minfo dkm;
1675 
1676 		bzero(&dkm, sizeof (dkm));
1677 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1678 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1679 		dkm.dki_media_type = DK_UNKNOWN;
1680 		mutex_exit(&zfsdev_state_lock);
1681 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1682 			error = SET_ERROR(EFAULT);
1683 		return (error);
1684 	}
1685 
1686 	case DKIOCGMEDIAINFOEXT:
1687 	{
1688 		struct dk_minfo_ext dkmext;
1689 
1690 		bzero(&dkmext, sizeof (dkmext));
1691 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1692 		dkmext.dki_pbsize = zv->zv_volblocksize;
1693 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1694 		dkmext.dki_media_type = DK_UNKNOWN;
1695 		mutex_exit(&zfsdev_state_lock);
1696 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1697 			error = SET_ERROR(EFAULT);
1698 		return (error);
1699 	}
1700 
1701 	case DKIOCGETEFI:
1702 	{
1703 		uint64_t vs = zv->zv_volsize;
1704 		uint8_t bs = zv->zv_min_bs;
1705 
1706 		mutex_exit(&zfsdev_state_lock);
1707 		error = zvol_getefi((void *)arg, flag, vs, bs);
1708 		return (error);
1709 	}
1710 
1711 	case DKIOCFLUSHWRITECACHE:
1712 		dkc = (struct dk_callback *)arg;
1713 		mutex_exit(&zfsdev_state_lock);
1714 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1715 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1716 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1717 			error = 0;
1718 		}
1719 		return (error);
1720 
1721 	case DKIOCGETWCE:
1722 	{
1723 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1724 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1725 		    flag))
1726 			error = SET_ERROR(EFAULT);
1727 		break;
1728 	}
1729 	case DKIOCSETWCE:
1730 	{
1731 		int wce;
1732 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1733 		    flag)) {
1734 			error = SET_ERROR(EFAULT);
1735 			break;
1736 		}
1737 		if (wce) {
1738 			zv->zv_flags |= ZVOL_WCE;
1739 			mutex_exit(&zfsdev_state_lock);
1740 		} else {
1741 			zv->zv_flags &= ~ZVOL_WCE;
1742 			mutex_exit(&zfsdev_state_lock);
1743 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1744 		}
1745 		return (0);
1746 	}
1747 
1748 	case DKIOCGGEOM:
1749 	case DKIOCGVTOC:
1750 		/*
1751 		 * commands using these (like prtvtoc) expect ENOTSUP
1752 		 * since we're emulating an EFI label
1753 		 */
1754 		error = SET_ERROR(ENOTSUP);
1755 		break;
1756 
1757 	case DKIOCDUMPINIT:
1758 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1759 		    RL_WRITER);
1760 		error = zvol_dumpify(zv);
1761 		zfs_range_unlock(rl);
1762 		break;
1763 
1764 	case DKIOCDUMPFINI:
1765 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1766 			break;
1767 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1768 		    RL_WRITER);
1769 		error = zvol_dump_fini(zv);
1770 		zfs_range_unlock(rl);
1771 		break;
1772 
1773 	case DKIOCFREE:
1774 	{
1775 		dkioc_free_t df;
1776 		dmu_tx_t *tx;
1777 
1778 		if (!zvol_unmap_enabled)
1779 			break;
1780 
1781 		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1782 			error = SET_ERROR(EFAULT);
1783 			break;
1784 		}
1785 
1786 		/*
1787 		 * Apply Postel's Law to length-checking.  If they overshoot,
1788 		 * just blank out until the end, if there's a need to blank
1789 		 * out anything.
1790 		 */
1791 		if (df.df_start >= zv->zv_volsize)
1792 			break;	/* No need to do anything... */
1793 
1794 		mutex_exit(&zfsdev_state_lock);
1795 
1796 		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1797 		    RL_WRITER);
1798 		tx = dmu_tx_create(zv->zv_objset);
1799 		dmu_tx_mark_netfree(tx);
1800 		error = dmu_tx_assign(tx, TXG_WAIT);
1801 		if (error != 0) {
1802 			dmu_tx_abort(tx);
1803 		} else {
1804 			zvol_log_truncate(zv, tx, df.df_start,
1805 			    df.df_length, B_TRUE);
1806 			dmu_tx_commit(tx);
1807 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1808 			    df.df_start, df.df_length);
1809 		}
1810 
1811 		zfs_range_unlock(rl);
1812 
1813 		if (error == 0) {
1814 			/*
1815 			 * If the write-cache is disabled or 'sync' property
1816 			 * is set to 'always' then treat this as a synchronous
1817 			 * operation (i.e. commit to zil).
1818 			 */
1819 			if (!(zv->zv_flags & ZVOL_WCE) ||
1820 			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1821 				zil_commit(zv->zv_zilog, ZVOL_OBJ);
1822 
1823 			/*
1824 			 * If the caller really wants synchronous writes, and
1825 			 * can't wait for them, don't return until the write
1826 			 * is done.
1827 			 */
1828 			if (df.df_flags & DF_WAIT_SYNC) {
1829 				txg_wait_synced(
1830 				    dmu_objset_pool(zv->zv_objset), 0);
1831 			}
1832 		}
1833 		return (error);
1834 	}
1835 
1836 	default:
1837 		error = SET_ERROR(ENOTTY);
1838 		break;
1839 
1840 	}
1841 	mutex_exit(&zfsdev_state_lock);
1842 	return (error);
1843 }
1844 
1845 int
1846 zvol_busy(void)
1847 {
1848 	return (zvol_minors != 0);
1849 }
1850 
1851 void
1852 zvol_init(void)
1853 {
1854 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1855 	    1) == 0);
1856 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1857 }
1858 
1859 void
1860 zvol_fini(void)
1861 {
1862 	mutex_destroy(&zfsdev_state_lock);
1863 	ddi_soft_state_fini(&zfsdev_state);
1864 }
1865 
1866 /*ARGSUSED*/
1867 static int
1868 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1869 {
1870 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1871 
1872 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1873 		return (1);
1874 	return (0);
1875 }
1876 
1877 /*ARGSUSED*/
1878 static void
1879 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1880 {
1881 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1882 
1883 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1884 }
1885 
1886 static int
1887 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1888 {
1889 	dmu_tx_t *tx;
1890 	int error;
1891 	objset_t *os = zv->zv_objset;
1892 	spa_t *spa = dmu_objset_spa(os);
1893 	vdev_t *vd = spa->spa_root_vdev;
1894 	nvlist_t *nv = NULL;
1895 	uint64_t version = spa_version(spa);
1896 	enum zio_checksum checksum;
1897 
1898 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1899 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1900 
1901 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1902 	    DMU_OBJECT_END);
1903 	/* wait for dmu_free_long_range to actually free the blocks */
1904 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1905 
1906 	/*
1907 	 * If the pool on which the dump device is being initialized has more
1908 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1909 	 * enabled.  If so, bump that feature's counter to indicate that the
1910 	 * feature is active. We also check the vdev type to handle the
1911 	 * following case:
1912 	 *   # zpool create test raidz disk1 disk2 disk3
1913 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1914 	 *   the raidz vdev itself has 3 children.
1915 	 */
1916 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1917 		if (!spa_feature_is_enabled(spa,
1918 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1919 			return (SET_ERROR(ENOTSUP));
1920 		(void) dsl_sync_task(spa_name(spa),
1921 		    zfs_mvdev_dump_feature_check,
1922 		    zfs_mvdev_dump_activate_feature_sync, NULL,
1923 		    2, ZFS_SPACE_CHECK_RESERVED);
1924 	}
1925 
1926 	tx = dmu_tx_create(os);
1927 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1928 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1929 	error = dmu_tx_assign(tx, TXG_WAIT);
1930 	if (error) {
1931 		dmu_tx_abort(tx);
1932 		return (error);
1933 	}
1934 
1935 	/*
1936 	 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
1937 	 * function.  Otherwise, use the old default -- OFF.
1938 	 */
1939 	checksum = spa_feature_is_active(spa,
1940 	    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
1941 	    ZIO_CHECKSUM_OFF;
1942 
1943 	/*
1944 	 * If we are resizing the dump device then we only need to
1945 	 * update the refreservation to match the newly updated
1946 	 * zvolsize. Otherwise, we save off the original state of the
1947 	 * zvol so that we can restore them if the zvol is ever undumpified.
1948 	 */
1949 	if (resize) {
1950 		error = zap_update(os, ZVOL_ZAP_OBJ,
1951 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1952 		    &zv->zv_volsize, tx);
1953 	} else {
1954 		uint64_t checksum, compress, refresrv, vbs, dedup;
1955 
1956 		error = dsl_prop_get_integer(zv->zv_name,
1957 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1958 		error = error ? error : dsl_prop_get_integer(zv->zv_name,
1959 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1960 		error = error ? error : dsl_prop_get_integer(zv->zv_name,
1961 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1962 		error = error ? error : dsl_prop_get_integer(zv->zv_name,
1963 		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1964 		if (version >= SPA_VERSION_DEDUP) {
1965 			error = error ? error :
1966 			    dsl_prop_get_integer(zv->zv_name,
1967 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1968 		}
1969 
1970 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1971 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1972 		    &compress, tx);
1973 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1974 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1975 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1976 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1977 		    &refresrv, tx);
1978 		error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1979 		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1980 		    &vbs, tx);
1981 		error = error ? error : dmu_object_set_blocksize(
1982 		    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
1983 		if (version >= SPA_VERSION_DEDUP) {
1984 			error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1985 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1986 			    &dedup, tx);
1987 		}
1988 		if (error == 0)
1989 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
1990 	}
1991 	dmu_tx_commit(tx);
1992 
1993 	/*
1994 	 * We only need update the zvol's property if we are initializing
1995 	 * the dump area for the first time.
1996 	 */
1997 	if (!resize) {
1998 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1999 		VERIFY(nvlist_add_uint64(nv,
2000 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2001 		VERIFY(nvlist_add_uint64(nv,
2002 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2003 		    ZIO_COMPRESS_OFF) == 0);
2004 		VERIFY(nvlist_add_uint64(nv,
2005 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2006 		    checksum) == 0);
2007 		if (version >= SPA_VERSION_DEDUP) {
2008 			VERIFY(nvlist_add_uint64(nv,
2009 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2010 			    ZIO_CHECKSUM_OFF) == 0);
2011 		}
2012 
2013 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2014 		    nv, NULL);
2015 		nvlist_free(nv);
2016 
2017 		if (error)
2018 			return (error);
2019 	}
2020 
2021 	/* Allocate the space for the dump */
2022 	error = zvol_prealloc(zv);
2023 	return (error);
2024 }
2025 
2026 static int
2027 zvol_dumpify(zvol_state_t *zv)
2028 {
2029 	int error = 0;
2030 	uint64_t dumpsize = 0;
2031 	dmu_tx_t *tx;
2032 	objset_t *os = zv->zv_objset;
2033 
2034 	if (zv->zv_flags & ZVOL_RDONLY)
2035 		return (SET_ERROR(EROFS));
2036 
2037 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2038 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2039 		boolean_t resize = (dumpsize > 0);
2040 
2041 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2042 			(void) zvol_dump_fini(zv);
2043 			return (error);
2044 		}
2045 	}
2046 
2047 	/*
2048 	 * Build up our lba mapping.
2049 	 */
2050 	error = zvol_get_lbas(zv);
2051 	if (error) {
2052 		(void) zvol_dump_fini(zv);
2053 		return (error);
2054 	}
2055 
2056 	tx = dmu_tx_create(os);
2057 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2058 	error = dmu_tx_assign(tx, TXG_WAIT);
2059 	if (error) {
2060 		dmu_tx_abort(tx);
2061 		(void) zvol_dump_fini(zv);
2062 		return (error);
2063 	}
2064 
2065 	zv->zv_flags |= ZVOL_DUMPIFIED;
2066 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2067 	    &zv->zv_volsize, tx);
2068 	dmu_tx_commit(tx);
2069 
2070 	if (error) {
2071 		(void) zvol_dump_fini(zv);
2072 		return (error);
2073 	}
2074 
2075 	txg_wait_synced(dmu_objset_pool(os), 0);
2076 	return (0);
2077 }
2078 
2079 static int
2080 zvol_dump_fini(zvol_state_t *zv)
2081 {
2082 	dmu_tx_t *tx;
2083 	objset_t *os = zv->zv_objset;
2084 	nvlist_t *nv;
2085 	int error = 0;
2086 	uint64_t checksum, compress, refresrv, vbs, dedup;
2087 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2088 
2089 	/*
2090 	 * Attempt to restore the zvol back to its pre-dumpified state.
2091 	 * This is a best-effort attempt as it's possible that not all
2092 	 * of these properties were initialized during the dumpify process
2093 	 * (i.e. error during zvol_dump_init).
2094 	 */
2095 
2096 	tx = dmu_tx_create(os);
2097 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2098 	error = dmu_tx_assign(tx, TXG_WAIT);
2099 	if (error) {
2100 		dmu_tx_abort(tx);
2101 		return (error);
2102 	}
2103 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2104 	dmu_tx_commit(tx);
2105 
2106 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2107 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2108 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2109 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2110 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2111 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2112 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2113 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2114 
2115 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2116 	(void) nvlist_add_uint64(nv,
2117 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2118 	(void) nvlist_add_uint64(nv,
2119 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2120 	(void) nvlist_add_uint64(nv,
2121 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2122 	if (version >= SPA_VERSION_DEDUP &&
2123 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2124 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2125 		(void) nvlist_add_uint64(nv,
2126 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2127 	}
2128 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2129 	    nv, NULL);
2130 	nvlist_free(nv);
2131 
2132 	zvol_free_extents(zv);
2133 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2134 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2135 	/* wait for dmu_free_long_range to actually free the blocks */
2136 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2137 	tx = dmu_tx_create(os);
2138 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2139 	error = dmu_tx_assign(tx, TXG_WAIT);
2140 	if (error) {
2141 		dmu_tx_abort(tx);
2142 		return (error);
2143 	}
2144 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2145 		zv->zv_volblocksize = vbs;
2146 	dmu_tx_commit(tx);
2147 
2148 	return (0);
2149 }
2150