xref: /illumos-gate/usr/src/uts/common/fs/zfs/zvol.c (revision 088d69f878cf3fb57556357236ef8e1c8f9d893e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Portions Copyright 2010 Robert Milkowski
25  *
26  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
27  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  * Copyright (c) 2014 Integros [integros.com]
30  */
31 
32 /*
33  * ZFS volume emulation driver.
34  *
35  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36  * Volumes are accessed through the symbolic links named:
37  *
38  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40  *
41  * These links are created by the /dev filesystem (sdev_zvolops.c).
42  * Volumes are persistent through reboot.  No user command needs to be
43  * run before opening and using a device.
44  */
45 
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/errno.h>
49 #include <sys/uio.h>
50 #include <sys/buf.h>
51 #include <sys/modctl.h>
52 #include <sys/open.h>
53 #include <sys/kmem.h>
54 #include <sys/conf.h>
55 #include <sys/cmn_err.h>
56 #include <sys/stat.h>
57 #include <sys/zap.h>
58 #include <sys/spa.h>
59 #include <sys/spa_impl.h>
60 #include <sys/zio.h>
61 #include <sys/dmu_traverse.h>
62 #include <sys/dnode.h>
63 #include <sys/dsl_dataset.h>
64 #include <sys/dsl_prop.h>
65 #include <sys/dkio.h>
66 #include <sys/efi_partition.h>
67 #include <sys/byteorder.h>
68 #include <sys/pathname.h>
69 #include <sys/ddi.h>
70 #include <sys/sunddi.h>
71 #include <sys/crc32.h>
72 #include <sys/dirent.h>
73 #include <sys/policy.h>
74 #include <sys/fs/zfs.h>
75 #include <sys/zfs_ioctl.h>
76 #include <sys/mkdev.h>
77 #include <sys/zil.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_disk.h>
82 #include <sys/vdev_impl.h>
83 #include <sys/vdev_raidz.h>
84 #include <sys/zvol.h>
85 #include <sys/dumphdr.h>
86 #include <sys/zil_impl.h>
87 #include <sys/dbuf.h>
88 #include <sys/dmu_tx.h>
89 #include <sys/zfeature.h>
90 #include <sys/zio_checksum.h>
91 
92 #include "zfs_namecheck.h"
93 
94 void *zfsdev_state;
95 static char *zvol_tag = "zvol_tag";
96 
97 #define	ZVOL_DUMPSIZE		"dumpsize"
98 
99 /*
100  * This lock protects the zfsdev_state structure from being modified
101  * while it's being used, e.g. an open that comes in before a create
102  * finishes.  It also protects temporary opens of the dataset so that,
103  * e.g., an open doesn't get a spurious EBUSY.
104  */
105 kmutex_t zfsdev_state_lock;
106 static uint32_t zvol_minors;
107 
108 typedef struct zvol_extent {
109 	list_node_t	ze_node;
110 	dva_t		ze_dva;		/* dva associated with this extent */
111 	uint64_t	ze_nblks;	/* number of blocks in extent */
112 } zvol_extent_t;
113 
114 /*
115  * The in-core state of each volume.
116  */
117 typedef struct zvol_state {
118 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
119 	uint64_t	zv_volsize;	/* amount of space we advertise */
120 	uint64_t	zv_volblocksize; /* volume block size */
121 	minor_t		zv_minor;	/* minor number */
122 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
123 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
124 	objset_t	*zv_objset;	/* objset handle */
125 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
126 	uint32_t	zv_total_opens;	/* total open count */
127 	zilog_t		*zv_zilog;	/* ZIL handle */
128 	list_t		zv_extents;	/* List of extents for dump */
129 	znode_t		zv_znode;	/* for range locking */
130 	dmu_buf_t	*zv_dbuf;	/* bonus handle */
131 } zvol_state_t;
132 
133 /*
134  * zvol specific flags
135  */
136 #define	ZVOL_RDONLY	0x1
137 #define	ZVOL_DUMPIFIED	0x2
138 #define	ZVOL_EXCL	0x4
139 #define	ZVOL_WCE	0x8
140 
141 /*
142  * zvol maximum transfer in one DMU tx.
143  */
144 int zvol_maxphys = DMU_MAX_ACCESS/2;
145 
146 /*
147  * Toggle unmap functionality.
148  */
149 boolean_t zvol_unmap_enabled = B_TRUE;
150 
151 /*
152  * If true, unmaps requested as synchronous are executed synchronously,
153  * otherwise all unmaps are asynchronous.
154  */
155 boolean_t zvol_unmap_sync_enabled = B_FALSE;
156 
157 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
158     nvlist_t *, nvlist_t *);
159 static int zvol_remove_zv(zvol_state_t *);
160 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
161 static int zvol_dumpify(zvol_state_t *zv);
162 static int zvol_dump_fini(zvol_state_t *zv);
163 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
164 
165 static void
166 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
167 {
168 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
169 
170 	zv->zv_volsize = volsize;
171 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
172 	    "Size", volsize) == DDI_SUCCESS);
173 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
174 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
175 
176 	/* Notify specfs to invalidate the cached size */
177 	spec_size_invalidate(dev, VBLK);
178 	spec_size_invalidate(dev, VCHR);
179 }
180 
181 int
182 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
183 {
184 	if (volsize == 0)
185 		return (SET_ERROR(EINVAL));
186 
187 	if (volsize % blocksize != 0)
188 		return (SET_ERROR(EINVAL));
189 
190 #ifdef _ILP32
191 	if (volsize - 1 > SPEC_MAXOFFSET_T)
192 		return (SET_ERROR(EOVERFLOW));
193 #endif
194 	return (0);
195 }
196 
197 int
198 zvol_check_volblocksize(uint64_t volblocksize)
199 {
200 	if (volblocksize < SPA_MINBLOCKSIZE ||
201 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
202 	    !ISP2(volblocksize))
203 		return (SET_ERROR(EDOM));
204 
205 	return (0);
206 }
207 
208 int
209 zvol_get_stats(objset_t *os, nvlist_t *nv)
210 {
211 	int error;
212 	dmu_object_info_t doi;
213 	uint64_t val;
214 
215 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
216 	if (error)
217 		return (error);
218 
219 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
220 
221 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
222 
223 	if (error == 0) {
224 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
225 		    doi.doi_data_block_size);
226 	}
227 
228 	return (error);
229 }
230 
231 static zvol_state_t *
232 zvol_minor_lookup(const char *name)
233 {
234 	minor_t minor;
235 	zvol_state_t *zv;
236 
237 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
238 
239 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
240 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
241 		if (zv == NULL)
242 			continue;
243 		if (strcmp(zv->zv_name, name) == 0)
244 			return (zv);
245 	}
246 
247 	return (NULL);
248 }
249 
250 /* extent mapping arg */
251 struct maparg {
252 	zvol_state_t	*ma_zv;
253 	uint64_t	ma_blks;
254 };
255 
256 /*ARGSUSED*/
257 static int
258 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
259     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
260 {
261 	struct maparg *ma = arg;
262 	zvol_extent_t *ze;
263 	int bs = ma->ma_zv->zv_volblocksize;
264 
265 	if (bp == NULL || BP_IS_HOLE(bp) ||
266 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
267 		return (0);
268 
269 	VERIFY(!BP_IS_EMBEDDED(bp));
270 
271 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
272 	ma->ma_blks++;
273 
274 	/* Abort immediately if we have encountered gang blocks */
275 	if (BP_IS_GANG(bp))
276 		return (SET_ERROR(EFRAGS));
277 
278 	/*
279 	 * See if the block is at the end of the previous extent.
280 	 */
281 	ze = list_tail(&ma->ma_zv->zv_extents);
282 	if (ze &&
283 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
284 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
285 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
286 		ze->ze_nblks++;
287 		return (0);
288 	}
289 
290 	dprintf_bp(bp, "%s", "next blkptr:");
291 
292 	/* start a new extent */
293 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
294 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
295 	ze->ze_nblks = 1;
296 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
297 	return (0);
298 }
299 
300 static void
301 zvol_free_extents(zvol_state_t *zv)
302 {
303 	zvol_extent_t *ze;
304 
305 	while (ze = list_head(&zv->zv_extents)) {
306 		list_remove(&zv->zv_extents, ze);
307 		kmem_free(ze, sizeof (zvol_extent_t));
308 	}
309 }
310 
311 static int
312 zvol_get_lbas(zvol_state_t *zv)
313 {
314 	objset_t *os = zv->zv_objset;
315 	struct maparg	ma;
316 	int		err;
317 
318 	ma.ma_zv = zv;
319 	ma.ma_blks = 0;
320 	zvol_free_extents(zv);
321 
322 	/* commit any in-flight changes before traversing the dataset */
323 	txg_wait_synced(dmu_objset_pool(os), 0);
324 	err = traverse_dataset(dmu_objset_ds(os), 0,
325 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
326 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
327 		zvol_free_extents(zv);
328 		return (err ? err : EIO);
329 	}
330 
331 	return (0);
332 }
333 
334 /* ARGSUSED */
335 void
336 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
337 {
338 	zfs_creat_t *zct = arg;
339 	nvlist_t *nvprops = zct->zct_props;
340 	int error;
341 	uint64_t volblocksize, volsize;
342 
343 	VERIFY(nvlist_lookup_uint64(nvprops,
344 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
345 	if (nvlist_lookup_uint64(nvprops,
346 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
347 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
348 
349 	/*
350 	 * These properties must be removed from the list so the generic
351 	 * property setting step won't apply to them.
352 	 */
353 	VERIFY(nvlist_remove_all(nvprops,
354 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
355 	(void) nvlist_remove_all(nvprops,
356 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
357 
358 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
359 	    DMU_OT_NONE, 0, tx);
360 	ASSERT(error == 0);
361 
362 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
363 	    DMU_OT_NONE, 0, tx);
364 	ASSERT(error == 0);
365 
366 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
367 	ASSERT(error == 0);
368 }
369 
370 /*
371  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
372  * implement DKIOCFREE/free-long-range.
373  */
374 static int
375 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
376 {
377 	uint64_t offset, length;
378 
379 	if (byteswap)
380 		byteswap_uint64_array(lr, sizeof (*lr));
381 
382 	offset = lr->lr_offset;
383 	length = lr->lr_length;
384 
385 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
386 }
387 
388 /*
389  * Replay a TX_WRITE ZIL transaction that didn't get committed
390  * after a system failure
391  */
392 static int
393 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
394 {
395 	objset_t *os = zv->zv_objset;
396 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
397 	uint64_t offset, length;
398 	dmu_tx_t *tx;
399 	int error;
400 
401 	if (byteswap)
402 		byteswap_uint64_array(lr, sizeof (*lr));
403 
404 	offset = lr->lr_offset;
405 	length = lr->lr_length;
406 
407 	/* If it's a dmu_sync() block, write the whole block */
408 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
409 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
410 		if (length < blocksize) {
411 			offset -= offset % blocksize;
412 			length = blocksize;
413 		}
414 	}
415 
416 	tx = dmu_tx_create(os);
417 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
418 	error = dmu_tx_assign(tx, TXG_WAIT);
419 	if (error) {
420 		dmu_tx_abort(tx);
421 	} else {
422 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
423 		dmu_tx_commit(tx);
424 	}
425 
426 	return (error);
427 }
428 
429 /* ARGSUSED */
430 static int
431 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
432 {
433 	return (SET_ERROR(ENOTSUP));
434 }
435 
436 /*
437  * Callback vectors for replaying records.
438  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
439  */
440 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
441 	zvol_replay_err,	/* 0 no such transaction type */
442 	zvol_replay_err,	/* TX_CREATE */
443 	zvol_replay_err,	/* TX_MKDIR */
444 	zvol_replay_err,	/* TX_MKXATTR */
445 	zvol_replay_err,	/* TX_SYMLINK */
446 	zvol_replay_err,	/* TX_REMOVE */
447 	zvol_replay_err,	/* TX_RMDIR */
448 	zvol_replay_err,	/* TX_LINK */
449 	zvol_replay_err,	/* TX_RENAME */
450 	zvol_replay_write,	/* TX_WRITE */
451 	zvol_replay_truncate,	/* TX_TRUNCATE */
452 	zvol_replay_err,	/* TX_SETATTR */
453 	zvol_replay_err,	/* TX_ACL */
454 	zvol_replay_err,	/* TX_CREATE_ACL */
455 	zvol_replay_err,	/* TX_CREATE_ATTR */
456 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
457 	zvol_replay_err,	/* TX_MKDIR_ACL */
458 	zvol_replay_err,	/* TX_MKDIR_ATTR */
459 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
460 	zvol_replay_err,	/* TX_WRITE2 */
461 };
462 
463 int
464 zvol_name2minor(const char *name, minor_t *minor)
465 {
466 	zvol_state_t *zv;
467 
468 	mutex_enter(&zfsdev_state_lock);
469 	zv = zvol_minor_lookup(name);
470 	if (minor && zv)
471 		*minor = zv->zv_minor;
472 	mutex_exit(&zfsdev_state_lock);
473 	return (zv ? 0 : -1);
474 }
475 
476 /*
477  * Create a minor node (plus a whole lot more) for the specified volume.
478  */
479 int
480 zvol_create_minor(const char *name)
481 {
482 	zfs_soft_state_t *zs;
483 	zvol_state_t *zv;
484 	objset_t *os;
485 	dmu_object_info_t doi;
486 	minor_t minor = 0;
487 	char chrbuf[30], blkbuf[30];
488 	int error;
489 
490 	mutex_enter(&zfsdev_state_lock);
491 
492 	if (zvol_minor_lookup(name) != NULL) {
493 		mutex_exit(&zfsdev_state_lock);
494 		return (SET_ERROR(EEXIST));
495 	}
496 
497 	/* lie and say we're read-only */
498 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
499 
500 	if (error) {
501 		mutex_exit(&zfsdev_state_lock);
502 		return (error);
503 	}
504 
505 	if ((minor = zfsdev_minor_alloc()) == 0) {
506 		dmu_objset_disown(os, FTAG);
507 		mutex_exit(&zfsdev_state_lock);
508 		return (SET_ERROR(ENXIO));
509 	}
510 
511 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
512 		dmu_objset_disown(os, FTAG);
513 		mutex_exit(&zfsdev_state_lock);
514 		return (SET_ERROR(EAGAIN));
515 	}
516 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
517 	    (char *)name);
518 
519 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
520 
521 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
522 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
523 		ddi_soft_state_free(zfsdev_state, minor);
524 		dmu_objset_disown(os, FTAG);
525 		mutex_exit(&zfsdev_state_lock);
526 		return (SET_ERROR(EAGAIN));
527 	}
528 
529 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
530 
531 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
532 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
533 		ddi_remove_minor_node(zfs_dip, chrbuf);
534 		ddi_soft_state_free(zfsdev_state, minor);
535 		dmu_objset_disown(os, FTAG);
536 		mutex_exit(&zfsdev_state_lock);
537 		return (SET_ERROR(EAGAIN));
538 	}
539 
540 	zs = ddi_get_soft_state(zfsdev_state, minor);
541 	zs->zss_type = ZSST_ZVOL;
542 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
543 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
544 	zv->zv_min_bs = DEV_BSHIFT;
545 	zv->zv_minor = minor;
546 	zv->zv_objset = os;
547 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
548 		zv->zv_flags |= ZVOL_RDONLY;
549 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
550 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
551 	    sizeof (rl_t), offsetof(rl_t, r_node));
552 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
553 	    offsetof(zvol_extent_t, ze_node));
554 	/* get and cache the blocksize */
555 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
556 	ASSERT(error == 0);
557 	zv->zv_volblocksize = doi.doi_data_block_size;
558 
559 	if (spa_writeable(dmu_objset_spa(os))) {
560 		if (zil_replay_disable)
561 			zil_destroy(dmu_objset_zil(os), B_FALSE);
562 		else
563 			zil_replay(os, zv, zvol_replay_vector);
564 	}
565 	dmu_objset_disown(os, FTAG);
566 	zv->zv_objset = NULL;
567 
568 	zvol_minors++;
569 
570 	mutex_exit(&zfsdev_state_lock);
571 
572 	return (0);
573 }
574 
575 /*
576  * Remove minor node for the specified volume.
577  */
578 static int
579 zvol_remove_zv(zvol_state_t *zv)
580 {
581 	char nmbuf[20];
582 	minor_t minor = zv->zv_minor;
583 
584 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
585 	if (zv->zv_total_opens != 0)
586 		return (SET_ERROR(EBUSY));
587 
588 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
589 	ddi_remove_minor_node(zfs_dip, nmbuf);
590 
591 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
592 	ddi_remove_minor_node(zfs_dip, nmbuf);
593 
594 	avl_destroy(&zv->zv_znode.z_range_avl);
595 	mutex_destroy(&zv->zv_znode.z_range_lock);
596 
597 	kmem_free(zv, sizeof (zvol_state_t));
598 
599 	ddi_soft_state_free(zfsdev_state, minor);
600 
601 	zvol_minors--;
602 	return (0);
603 }
604 
605 int
606 zvol_remove_minor(const char *name)
607 {
608 	zvol_state_t *zv;
609 	int rc;
610 
611 	mutex_enter(&zfsdev_state_lock);
612 	if ((zv = zvol_minor_lookup(name)) == NULL) {
613 		mutex_exit(&zfsdev_state_lock);
614 		return (SET_ERROR(ENXIO));
615 	}
616 	rc = zvol_remove_zv(zv);
617 	mutex_exit(&zfsdev_state_lock);
618 	return (rc);
619 }
620 
621 int
622 zvol_first_open(zvol_state_t *zv)
623 {
624 	objset_t *os;
625 	uint64_t volsize;
626 	int error;
627 	uint64_t readonly;
628 
629 	/* lie and say we're read-only */
630 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
631 	    zvol_tag, &os);
632 	if (error)
633 		return (error);
634 
635 	zv->zv_objset = os;
636 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
637 	if (error) {
638 		ASSERT(error == 0);
639 		dmu_objset_disown(os, zvol_tag);
640 		return (error);
641 	}
642 
643 	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
644 	if (error) {
645 		dmu_objset_disown(os, zvol_tag);
646 		return (error);
647 	}
648 
649 	zvol_size_changed(zv, volsize);
650 	zv->zv_zilog = zil_open(os, zvol_get_data);
651 
652 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
653 	    NULL) == 0);
654 	if (readonly || dmu_objset_is_snapshot(os) ||
655 	    !spa_writeable(dmu_objset_spa(os)))
656 		zv->zv_flags |= ZVOL_RDONLY;
657 	else
658 		zv->zv_flags &= ~ZVOL_RDONLY;
659 	return (error);
660 }
661 
662 void
663 zvol_last_close(zvol_state_t *zv)
664 {
665 	zil_close(zv->zv_zilog);
666 	zv->zv_zilog = NULL;
667 
668 	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
669 	zv->zv_dbuf = NULL;
670 
671 	/*
672 	 * Evict cached data
673 	 */
674 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
675 	    !(zv->zv_flags & ZVOL_RDONLY))
676 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
677 	dmu_objset_evict_dbufs(zv->zv_objset);
678 
679 	dmu_objset_disown(zv->zv_objset, zvol_tag);
680 	zv->zv_objset = NULL;
681 }
682 
683 int
684 zvol_prealloc(zvol_state_t *zv)
685 {
686 	objset_t *os = zv->zv_objset;
687 	dmu_tx_t *tx;
688 	uint64_t refd, avail, usedobjs, availobjs;
689 	uint64_t resid = zv->zv_volsize;
690 	uint64_t off = 0;
691 
692 	/* Check the space usage before attempting to allocate the space */
693 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
694 	if (avail < zv->zv_volsize)
695 		return (SET_ERROR(ENOSPC));
696 
697 	/* Free old extents if they exist */
698 	zvol_free_extents(zv);
699 
700 	while (resid != 0) {
701 		int error;
702 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
703 
704 		tx = dmu_tx_create(os);
705 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
706 		error = dmu_tx_assign(tx, TXG_WAIT);
707 		if (error) {
708 			dmu_tx_abort(tx);
709 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
710 			return (error);
711 		}
712 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
713 		dmu_tx_commit(tx);
714 		off += bytes;
715 		resid -= bytes;
716 	}
717 	txg_wait_synced(dmu_objset_pool(os), 0);
718 
719 	return (0);
720 }
721 
722 static int
723 zvol_update_volsize(objset_t *os, uint64_t volsize)
724 {
725 	dmu_tx_t *tx;
726 	int error;
727 
728 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
729 
730 	tx = dmu_tx_create(os);
731 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
732 	dmu_tx_mark_netfree(tx);
733 	error = dmu_tx_assign(tx, TXG_WAIT);
734 	if (error) {
735 		dmu_tx_abort(tx);
736 		return (error);
737 	}
738 
739 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
740 	    &volsize, tx);
741 	dmu_tx_commit(tx);
742 
743 	if (error == 0)
744 		error = dmu_free_long_range(os,
745 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
746 	return (error);
747 }
748 
749 void
750 zvol_remove_minors(const char *name)
751 {
752 	zvol_state_t *zv;
753 	char *namebuf;
754 	minor_t minor;
755 
756 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
757 	(void) strncpy(namebuf, name, strlen(name));
758 	(void) strcat(namebuf, "/");
759 	mutex_enter(&zfsdev_state_lock);
760 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
761 
762 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
763 		if (zv == NULL)
764 			continue;
765 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
766 			(void) zvol_remove_zv(zv);
767 	}
768 	kmem_free(namebuf, strlen(name) + 2);
769 
770 	mutex_exit(&zfsdev_state_lock);
771 }
772 
773 static int
774 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
775 {
776 	uint64_t old_volsize = 0ULL;
777 	int error = 0;
778 
779 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
780 
781 	/*
782 	 * Reinitialize the dump area to the new size. If we
783 	 * failed to resize the dump area then restore it back to
784 	 * its original size.  We must set the new volsize prior
785 	 * to calling dumpvp_resize() to ensure that the devices'
786 	 * size(9P) is not visible by the dump subsystem.
787 	 */
788 	old_volsize = zv->zv_volsize;
789 	zvol_size_changed(zv, volsize);
790 
791 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
792 		if ((error = zvol_dumpify(zv)) != 0 ||
793 		    (error = dumpvp_resize()) != 0) {
794 			int dumpify_error;
795 
796 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
797 			zvol_size_changed(zv, old_volsize);
798 			dumpify_error = zvol_dumpify(zv);
799 			error = dumpify_error ? dumpify_error : error;
800 		}
801 	}
802 
803 	/*
804 	 * Generate a LUN expansion event.
805 	 */
806 	if (error == 0) {
807 		sysevent_id_t eid;
808 		nvlist_t *attr;
809 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
810 
811 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
812 		    zv->zv_minor);
813 
814 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
815 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
816 
817 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
818 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
819 
820 		nvlist_free(attr);
821 		kmem_free(physpath, MAXPATHLEN);
822 	}
823 	return (error);
824 }
825 
826 int
827 zvol_set_volsize(const char *name, uint64_t volsize)
828 {
829 	zvol_state_t *zv = NULL;
830 	objset_t *os;
831 	int error;
832 	dmu_object_info_t doi;
833 	uint64_t readonly;
834 	boolean_t owned = B_FALSE;
835 
836 	error = dsl_prop_get_integer(name,
837 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
838 	if (error != 0)
839 		return (error);
840 	if (readonly)
841 		return (SET_ERROR(EROFS));
842 
843 	mutex_enter(&zfsdev_state_lock);
844 	zv = zvol_minor_lookup(name);
845 
846 	if (zv == NULL || zv->zv_objset == NULL) {
847 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
848 		    FTAG, &os)) != 0) {
849 			mutex_exit(&zfsdev_state_lock);
850 			return (error);
851 		}
852 		owned = B_TRUE;
853 		if (zv != NULL)
854 			zv->zv_objset = os;
855 	} else {
856 		os = zv->zv_objset;
857 	}
858 
859 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
860 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
861 		goto out;
862 
863 	error = zvol_update_volsize(os, volsize);
864 
865 	if (error == 0 && zv != NULL)
866 		error = zvol_update_live_volsize(zv, volsize);
867 out:
868 	if (owned) {
869 		dmu_objset_disown(os, FTAG);
870 		if (zv != NULL)
871 			zv->zv_objset = NULL;
872 	}
873 	mutex_exit(&zfsdev_state_lock);
874 	return (error);
875 }
876 
877 /*ARGSUSED*/
878 int
879 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
880 {
881 	zvol_state_t *zv;
882 	int err = 0;
883 
884 	mutex_enter(&zfsdev_state_lock);
885 
886 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
887 	if (zv == NULL) {
888 		mutex_exit(&zfsdev_state_lock);
889 		return (SET_ERROR(ENXIO));
890 	}
891 
892 	if (zv->zv_total_opens == 0)
893 		err = zvol_first_open(zv);
894 	if (err) {
895 		mutex_exit(&zfsdev_state_lock);
896 		return (err);
897 	}
898 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
899 		err = SET_ERROR(EROFS);
900 		goto out;
901 	}
902 	if (zv->zv_flags & ZVOL_EXCL) {
903 		err = SET_ERROR(EBUSY);
904 		goto out;
905 	}
906 	if (flag & FEXCL) {
907 		if (zv->zv_total_opens != 0) {
908 			err = SET_ERROR(EBUSY);
909 			goto out;
910 		}
911 		zv->zv_flags |= ZVOL_EXCL;
912 	}
913 
914 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
915 		zv->zv_open_count[otyp]++;
916 		zv->zv_total_opens++;
917 	}
918 	mutex_exit(&zfsdev_state_lock);
919 
920 	return (err);
921 out:
922 	if (zv->zv_total_opens == 0)
923 		zvol_last_close(zv);
924 	mutex_exit(&zfsdev_state_lock);
925 	return (err);
926 }
927 
928 /*ARGSUSED*/
929 int
930 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
931 {
932 	minor_t minor = getminor(dev);
933 	zvol_state_t *zv;
934 	int error = 0;
935 
936 	mutex_enter(&zfsdev_state_lock);
937 
938 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
939 	if (zv == NULL) {
940 		mutex_exit(&zfsdev_state_lock);
941 		return (SET_ERROR(ENXIO));
942 	}
943 
944 	if (zv->zv_flags & ZVOL_EXCL) {
945 		ASSERT(zv->zv_total_opens == 1);
946 		zv->zv_flags &= ~ZVOL_EXCL;
947 	}
948 
949 	/*
950 	 * If the open count is zero, this is a spurious close.
951 	 * That indicates a bug in the kernel / DDI framework.
952 	 */
953 	ASSERT(zv->zv_open_count[otyp] != 0);
954 	ASSERT(zv->zv_total_opens != 0);
955 
956 	/*
957 	 * You may get multiple opens, but only one close.
958 	 */
959 	zv->zv_open_count[otyp]--;
960 	zv->zv_total_opens--;
961 
962 	if (zv->zv_total_opens == 0)
963 		zvol_last_close(zv);
964 
965 	mutex_exit(&zfsdev_state_lock);
966 	return (error);
967 }
968 
969 static void
970 zvol_get_done(zgd_t *zgd, int error)
971 {
972 	if (zgd->zgd_db)
973 		dmu_buf_rele(zgd->zgd_db, zgd);
974 
975 	zfs_range_unlock(zgd->zgd_rl);
976 
977 	if (error == 0 && zgd->zgd_bp)
978 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
979 
980 	kmem_free(zgd, sizeof (zgd_t));
981 }
982 
983 /*
984  * Get data to generate a TX_WRITE intent log record.
985  */
986 static int
987 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
988 {
989 	zvol_state_t *zv = arg;
990 	objset_t *os = zv->zv_objset;
991 	uint64_t object = ZVOL_OBJ;
992 	uint64_t offset = lr->lr_offset;
993 	uint64_t size = lr->lr_length;	/* length of user data */
994 	dmu_buf_t *db;
995 	zgd_t *zgd;
996 	int error;
997 
998 	ASSERT(zio != NULL);
999 	ASSERT(size != 0);
1000 
1001 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1002 	zgd->zgd_zilog = zv->zv_zilog;
1003 	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1004 
1005 	/*
1006 	 * Write records come in two flavors: immediate and indirect.
1007 	 * For small writes it's cheaper to store the data with the
1008 	 * log record (immediate); for large writes it's cheaper to
1009 	 * sync the data and get a pointer to it (indirect) so that
1010 	 * we don't have to write the data twice.
1011 	 */
1012 	if (buf != NULL) {	/* immediate write */
1013 		error = dmu_read(os, object, offset, size, buf,
1014 		    DMU_READ_NO_PREFETCH);
1015 	} else {
1016 		size = zv->zv_volblocksize;
1017 		offset = P2ALIGN(offset, size);
1018 		error = dmu_buf_hold(os, object, offset, zgd, &db,
1019 		    DMU_READ_NO_PREFETCH);
1020 		if (error == 0) {
1021 			blkptr_t *bp = &lr->lr_blkptr;
1022 
1023 			zgd->zgd_db = db;
1024 			zgd->zgd_bp = bp;
1025 
1026 			ASSERT(db->db_offset == offset);
1027 			ASSERT(db->db_size == size);
1028 
1029 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1030 			    zvol_get_done, zgd);
1031 
1032 			if (error == 0)
1033 				return (0);
1034 		}
1035 	}
1036 
1037 	zvol_get_done(zgd, error);
1038 
1039 	return (error);
1040 }
1041 
1042 /*
1043  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1044  *
1045  * We store data in the log buffers if it's small enough.
1046  * Otherwise we will later flush the data out via dmu_sync().
1047  */
1048 ssize_t zvol_immediate_write_sz = 32768;
1049 
1050 static void
1051 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1052     boolean_t sync)
1053 {
1054 	uint32_t blocksize = zv->zv_volblocksize;
1055 	zilog_t *zilog = zv->zv_zilog;
1056 	itx_wr_state_t write_state;
1057 
1058 	if (zil_replaying(zilog, tx))
1059 		return;
1060 
1061 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1062 		write_state = WR_INDIRECT;
1063 	else if (!spa_has_slogs(zilog->zl_spa) &&
1064 	    resid >= blocksize && blocksize > zvol_immediate_write_sz)
1065 		write_state = WR_INDIRECT;
1066 	else if (sync)
1067 		write_state = WR_COPIED;
1068 	else
1069 		write_state = WR_NEED_COPY;
1070 
1071 	while (resid) {
1072 		itx_t *itx;
1073 		lr_write_t *lr;
1074 		itx_wr_state_t wr_state = write_state;
1075 		ssize_t len = resid;
1076 
1077 		if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1078 			wr_state = WR_NEED_COPY;
1079 		else if (wr_state == WR_INDIRECT)
1080 			len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1081 
1082 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1083 		    (wr_state == WR_COPIED ? len : 0));
1084 		lr = (lr_write_t *)&itx->itx_lr;
1085 		if (wr_state == WR_COPIED && dmu_read(zv->zv_objset,
1086 		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1087 			zil_itx_destroy(itx);
1088 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1089 			lr = (lr_write_t *)&itx->itx_lr;
1090 			wr_state = WR_NEED_COPY;
1091 		}
1092 
1093 		itx->itx_wr_state = wr_state;
1094 		lr->lr_foid = ZVOL_OBJ;
1095 		lr->lr_offset = off;
1096 		lr->lr_length = len;
1097 		lr->lr_blkoff = 0;
1098 		BP_ZERO(&lr->lr_blkptr);
1099 
1100 		itx->itx_private = zv;
1101 		itx->itx_sync = sync;
1102 
1103 		zil_itx_assign(zilog, itx, tx);
1104 
1105 		off += len;
1106 		resid -= len;
1107 	}
1108 }
1109 
1110 static int
1111 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1112     uint64_t size, boolean_t doread, boolean_t isdump)
1113 {
1114 	vdev_disk_t *dvd;
1115 	int c;
1116 	int numerrors = 0;
1117 
1118 	if (vd->vdev_ops == &vdev_mirror_ops ||
1119 	    vd->vdev_ops == &vdev_replacing_ops ||
1120 	    vd->vdev_ops == &vdev_spare_ops) {
1121 		for (c = 0; c < vd->vdev_children; c++) {
1122 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1123 			    addr, offset, origoffset, size, doread, isdump);
1124 			if (err != 0) {
1125 				numerrors++;
1126 			} else if (doread) {
1127 				break;
1128 			}
1129 		}
1130 	}
1131 
1132 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1133 		return (numerrors < vd->vdev_children ? 0 : EIO);
1134 
1135 	if (doread && !vdev_readable(vd))
1136 		return (SET_ERROR(EIO));
1137 	else if (!doread && !vdev_writeable(vd))
1138 		return (SET_ERROR(EIO));
1139 
1140 	if (vd->vdev_ops == &vdev_raidz_ops) {
1141 		return (vdev_raidz_physio(vd,
1142 		    addr, size, offset, origoffset, doread, isdump));
1143 	}
1144 
1145 	offset += VDEV_LABEL_START_SIZE;
1146 
1147 	if (ddi_in_panic() || isdump) {
1148 		ASSERT(!doread);
1149 		if (doread)
1150 			return (SET_ERROR(EIO));
1151 		dvd = vd->vdev_tsd;
1152 		ASSERT3P(dvd, !=, NULL);
1153 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1154 		    lbtodb(size)));
1155 	} else {
1156 		dvd = vd->vdev_tsd;
1157 		ASSERT3P(dvd, !=, NULL);
1158 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1159 		    offset, doread ? B_READ : B_WRITE));
1160 	}
1161 }
1162 
1163 static int
1164 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1165     boolean_t doread, boolean_t isdump)
1166 {
1167 	vdev_t *vd;
1168 	int error;
1169 	zvol_extent_t *ze;
1170 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1171 
1172 	/* Must be sector aligned, and not stradle a block boundary. */
1173 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1174 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1175 		return (SET_ERROR(EINVAL));
1176 	}
1177 	ASSERT(size <= zv->zv_volblocksize);
1178 
1179 	/* Locate the extent this belongs to */
1180 	ze = list_head(&zv->zv_extents);
1181 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1182 		offset -= ze->ze_nblks * zv->zv_volblocksize;
1183 		ze = list_next(&zv->zv_extents, ze);
1184 	}
1185 
1186 	if (ze == NULL)
1187 		return (SET_ERROR(EINVAL));
1188 
1189 	if (!ddi_in_panic())
1190 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1191 
1192 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1193 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1194 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1195 	    size, doread, isdump);
1196 
1197 	if (!ddi_in_panic())
1198 		spa_config_exit(spa, SCL_STATE, FTAG);
1199 
1200 	return (error);
1201 }
1202 
1203 int
1204 zvol_strategy(buf_t *bp)
1205 {
1206 	zfs_soft_state_t *zs = NULL;
1207 	zvol_state_t *zv;
1208 	uint64_t off, volsize;
1209 	size_t resid;
1210 	char *addr;
1211 	objset_t *os;
1212 	rl_t *rl;
1213 	int error = 0;
1214 	boolean_t doread = bp->b_flags & B_READ;
1215 	boolean_t is_dumpified;
1216 	boolean_t sync;
1217 
1218 	if (getminor(bp->b_edev) == 0) {
1219 		error = SET_ERROR(EINVAL);
1220 	} else {
1221 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1222 		if (zs == NULL)
1223 			error = SET_ERROR(ENXIO);
1224 		else if (zs->zss_type != ZSST_ZVOL)
1225 			error = SET_ERROR(EINVAL);
1226 	}
1227 
1228 	if (error) {
1229 		bioerror(bp, error);
1230 		biodone(bp);
1231 		return (0);
1232 	}
1233 
1234 	zv = zs->zss_data;
1235 
1236 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1237 		bioerror(bp, EROFS);
1238 		biodone(bp);
1239 		return (0);
1240 	}
1241 
1242 	off = ldbtob(bp->b_blkno);
1243 	volsize = zv->zv_volsize;
1244 
1245 	os = zv->zv_objset;
1246 	ASSERT(os != NULL);
1247 
1248 	bp_mapin(bp);
1249 	addr = bp->b_un.b_addr;
1250 	resid = bp->b_bcount;
1251 
1252 	if (resid > 0 && (off < 0 || off >= volsize)) {
1253 		bioerror(bp, EIO);
1254 		biodone(bp);
1255 		return (0);
1256 	}
1257 
1258 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1259 	sync = ((!(bp->b_flags & B_ASYNC) &&
1260 	    !(zv->zv_flags & ZVOL_WCE)) ||
1261 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1262 	    !doread && !is_dumpified;
1263 
1264 	/*
1265 	 * There must be no buffer changes when doing a dmu_sync() because
1266 	 * we can't change the data whilst calculating the checksum.
1267 	 */
1268 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1269 	    doread ? RL_READER : RL_WRITER);
1270 
1271 	while (resid != 0 && off < volsize) {
1272 		size_t size = MIN(resid, zvol_maxphys);
1273 		if (is_dumpified) {
1274 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1275 			error = zvol_dumpio(zv, addr, off, size,
1276 			    doread, B_FALSE);
1277 		} else if (doread) {
1278 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1279 			    DMU_READ_PREFETCH);
1280 		} else {
1281 			dmu_tx_t *tx = dmu_tx_create(os);
1282 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1283 			error = dmu_tx_assign(tx, TXG_WAIT);
1284 			if (error) {
1285 				dmu_tx_abort(tx);
1286 			} else {
1287 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1288 				zvol_log_write(zv, tx, off, size, sync);
1289 				dmu_tx_commit(tx);
1290 			}
1291 		}
1292 		if (error) {
1293 			/* convert checksum errors into IO errors */
1294 			if (error == ECKSUM)
1295 				error = SET_ERROR(EIO);
1296 			break;
1297 		}
1298 		off += size;
1299 		addr += size;
1300 		resid -= size;
1301 	}
1302 	zfs_range_unlock(rl);
1303 
1304 	if ((bp->b_resid = resid) == bp->b_bcount)
1305 		bioerror(bp, off > volsize ? EINVAL : error);
1306 
1307 	if (sync)
1308 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1309 	biodone(bp);
1310 
1311 	return (0);
1312 }
1313 
1314 /*
1315  * Set the buffer count to the zvol maximum transfer.
1316  * Using our own routine instead of the default minphys()
1317  * means that for larger writes we write bigger buffers on X86
1318  * (128K instead of 56K) and flush the disk write cache less often
1319  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1320  * 56K on X86 and 128K on sparc).
1321  */
1322 void
1323 zvol_minphys(struct buf *bp)
1324 {
1325 	if (bp->b_bcount > zvol_maxphys)
1326 		bp->b_bcount = zvol_maxphys;
1327 }
1328 
1329 int
1330 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1331 {
1332 	minor_t minor = getminor(dev);
1333 	zvol_state_t *zv;
1334 	int error = 0;
1335 	uint64_t size;
1336 	uint64_t boff;
1337 	uint64_t resid;
1338 
1339 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1340 	if (zv == NULL)
1341 		return (SET_ERROR(ENXIO));
1342 
1343 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1344 		return (SET_ERROR(EINVAL));
1345 
1346 	boff = ldbtob(blkno);
1347 	resid = ldbtob(nblocks);
1348 
1349 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1350 
1351 	while (resid) {
1352 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1353 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1354 		if (error)
1355 			break;
1356 		boff += size;
1357 		addr += size;
1358 		resid -= size;
1359 	}
1360 
1361 	return (error);
1362 }
1363 
1364 /*ARGSUSED*/
1365 int
1366 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1367 {
1368 	minor_t minor = getminor(dev);
1369 	zvol_state_t *zv;
1370 	uint64_t volsize;
1371 	rl_t *rl;
1372 	int error = 0;
1373 
1374 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1375 	if (zv == NULL)
1376 		return (SET_ERROR(ENXIO));
1377 
1378 	volsize = zv->zv_volsize;
1379 	if (uio->uio_resid > 0 &&
1380 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1381 		return (SET_ERROR(EIO));
1382 
1383 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1384 		error = physio(zvol_strategy, NULL, dev, B_READ,
1385 		    zvol_minphys, uio);
1386 		return (error);
1387 	}
1388 
1389 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1390 	    RL_READER);
1391 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1392 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1393 
1394 		/* don't read past the end */
1395 		if (bytes > volsize - uio->uio_loffset)
1396 			bytes = volsize - uio->uio_loffset;
1397 
1398 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1399 		if (error) {
1400 			/* convert checksum errors into IO errors */
1401 			if (error == ECKSUM)
1402 				error = SET_ERROR(EIO);
1403 			break;
1404 		}
1405 	}
1406 	zfs_range_unlock(rl);
1407 	return (error);
1408 }
1409 
1410 /*ARGSUSED*/
1411 int
1412 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1413 {
1414 	minor_t minor = getminor(dev);
1415 	zvol_state_t *zv;
1416 	uint64_t volsize;
1417 	rl_t *rl;
1418 	int error = 0;
1419 	boolean_t sync;
1420 
1421 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1422 	if (zv == NULL)
1423 		return (SET_ERROR(ENXIO));
1424 
1425 	volsize = zv->zv_volsize;
1426 	if (uio->uio_resid > 0 &&
1427 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1428 		return (SET_ERROR(EIO));
1429 
1430 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1431 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1432 		    zvol_minphys, uio);
1433 		return (error);
1434 	}
1435 
1436 	sync = !(zv->zv_flags & ZVOL_WCE) ||
1437 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1438 
1439 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1440 	    RL_WRITER);
1441 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1442 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1443 		uint64_t off = uio->uio_loffset;
1444 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1445 
1446 		if (bytes > volsize - off)	/* don't write past the end */
1447 			bytes = volsize - off;
1448 
1449 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1450 		error = dmu_tx_assign(tx, TXG_WAIT);
1451 		if (error) {
1452 			dmu_tx_abort(tx);
1453 			break;
1454 		}
1455 		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1456 		if (error == 0)
1457 			zvol_log_write(zv, tx, off, bytes, sync);
1458 		dmu_tx_commit(tx);
1459 
1460 		if (error)
1461 			break;
1462 	}
1463 	zfs_range_unlock(rl);
1464 	if (sync)
1465 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1466 	return (error);
1467 }
1468 
1469 int
1470 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1471 {
1472 	struct uuid uuid = EFI_RESERVED;
1473 	efi_gpe_t gpe = { 0 };
1474 	uint32_t crc;
1475 	dk_efi_t efi;
1476 	int length;
1477 	char *ptr;
1478 
1479 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1480 		return (SET_ERROR(EFAULT));
1481 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1482 	length = efi.dki_length;
1483 	/*
1484 	 * Some clients may attempt to request a PMBR for the
1485 	 * zvol.  Currently this interface will return EINVAL to
1486 	 * such requests.  These requests could be supported by
1487 	 * adding a check for lba == 0 and consing up an appropriate
1488 	 * PMBR.
1489 	 */
1490 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1491 		return (SET_ERROR(EINVAL));
1492 
1493 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1494 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1495 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1496 
1497 	if (efi.dki_lba == 1) {
1498 		efi_gpt_t gpt = { 0 };
1499 
1500 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1501 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1502 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1503 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1504 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1505 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1506 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1507 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1508 		gpt.efi_gpt_SizeOfPartitionEntry =
1509 		    LE_32(sizeof (efi_gpe_t));
1510 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1511 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1512 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1513 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1514 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1515 		    flag))
1516 			return (SET_ERROR(EFAULT));
1517 		ptr += sizeof (gpt);
1518 		length -= sizeof (gpt);
1519 	}
1520 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1521 	    length), flag))
1522 		return (SET_ERROR(EFAULT));
1523 	return (0);
1524 }
1525 
1526 /*
1527  * BEGIN entry points to allow external callers access to the volume.
1528  */
1529 /*
1530  * Return the volume parameters needed for access from an external caller.
1531  * These values are invariant as long as the volume is held open.
1532  */
1533 int
1534 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1535     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1536     void **rl_hdl, void **bonus_hdl)
1537 {
1538 	zvol_state_t *zv;
1539 
1540 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1541 	if (zv == NULL)
1542 		return (SET_ERROR(ENXIO));
1543 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1544 		return (SET_ERROR(ENXIO));
1545 
1546 	ASSERT(blksize && max_xfer_len && minor_hdl &&
1547 	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1548 
1549 	*blksize = zv->zv_volblocksize;
1550 	*max_xfer_len = (uint64_t)zvol_maxphys;
1551 	*minor_hdl = zv;
1552 	*objset_hdl = zv->zv_objset;
1553 	*zil_hdl = zv->zv_zilog;
1554 	*rl_hdl = &zv->zv_znode;
1555 	*bonus_hdl = zv->zv_dbuf;
1556 	return (0);
1557 }
1558 
1559 /*
1560  * Return the current volume size to an external caller.
1561  * The size can change while the volume is open.
1562  */
1563 uint64_t
1564 zvol_get_volume_size(void *minor_hdl)
1565 {
1566 	zvol_state_t *zv = minor_hdl;
1567 
1568 	return (zv->zv_volsize);
1569 }
1570 
1571 /*
1572  * Return the current WCE setting to an external caller.
1573  * The WCE setting can change while the volume is open.
1574  */
1575 int
1576 zvol_get_volume_wce(void *minor_hdl)
1577 {
1578 	zvol_state_t *zv = minor_hdl;
1579 
1580 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1581 }
1582 
1583 /*
1584  * Entry point for external callers to zvol_log_write
1585  */
1586 void
1587 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1588     boolean_t sync)
1589 {
1590 	zvol_state_t *zv = minor_hdl;
1591 
1592 	zvol_log_write(zv, tx, off, resid, sync);
1593 }
1594 /*
1595  * END entry points to allow external callers access to the volume.
1596  */
1597 
1598 /*
1599  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1600  */
1601 static void
1602 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1603     boolean_t sync)
1604 {
1605 	itx_t *itx;
1606 	lr_truncate_t *lr;
1607 	zilog_t *zilog = zv->zv_zilog;
1608 
1609 	if (zil_replaying(zilog, tx))
1610 		return;
1611 
1612 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1613 	lr = (lr_truncate_t *)&itx->itx_lr;
1614 	lr->lr_foid = ZVOL_OBJ;
1615 	lr->lr_offset = off;
1616 	lr->lr_length = len;
1617 
1618 	itx->itx_sync = sync;
1619 	zil_itx_assign(zilog, itx, tx);
1620 }
1621 
1622 /*
1623  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1624  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1625  */
1626 /*ARGSUSED*/
1627 int
1628 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1629 {
1630 	zvol_state_t *zv;
1631 	struct dk_callback *dkc;
1632 	int error = 0;
1633 	rl_t *rl;
1634 
1635 	mutex_enter(&zfsdev_state_lock);
1636 
1637 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1638 
1639 	if (zv == NULL) {
1640 		mutex_exit(&zfsdev_state_lock);
1641 		return (SET_ERROR(ENXIO));
1642 	}
1643 	ASSERT(zv->zv_total_opens > 0);
1644 
1645 	switch (cmd) {
1646 
1647 	case DKIOCINFO:
1648 	{
1649 		struct dk_cinfo dki;
1650 
1651 		bzero(&dki, sizeof (dki));
1652 		(void) strcpy(dki.dki_cname, "zvol");
1653 		(void) strcpy(dki.dki_dname, "zvol");
1654 		dki.dki_ctype = DKC_UNKNOWN;
1655 		dki.dki_unit = getminor(dev);
1656 		dki.dki_maxtransfer =
1657 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1658 		mutex_exit(&zfsdev_state_lock);
1659 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1660 			error = SET_ERROR(EFAULT);
1661 		return (error);
1662 	}
1663 
1664 	case DKIOCGMEDIAINFO:
1665 	{
1666 		struct dk_minfo dkm;
1667 
1668 		bzero(&dkm, sizeof (dkm));
1669 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1670 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1671 		dkm.dki_media_type = DK_UNKNOWN;
1672 		mutex_exit(&zfsdev_state_lock);
1673 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1674 			error = SET_ERROR(EFAULT);
1675 		return (error);
1676 	}
1677 
1678 	case DKIOCGMEDIAINFOEXT:
1679 	{
1680 		struct dk_minfo_ext dkmext;
1681 
1682 		bzero(&dkmext, sizeof (dkmext));
1683 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1684 		dkmext.dki_pbsize = zv->zv_volblocksize;
1685 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1686 		dkmext.dki_media_type = DK_UNKNOWN;
1687 		mutex_exit(&zfsdev_state_lock);
1688 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1689 			error = SET_ERROR(EFAULT);
1690 		return (error);
1691 	}
1692 
1693 	case DKIOCGETEFI:
1694 	{
1695 		uint64_t vs = zv->zv_volsize;
1696 		uint8_t bs = zv->zv_min_bs;
1697 
1698 		mutex_exit(&zfsdev_state_lock);
1699 		error = zvol_getefi((void *)arg, flag, vs, bs);
1700 		return (error);
1701 	}
1702 
1703 	case DKIOCFLUSHWRITECACHE:
1704 		dkc = (struct dk_callback *)arg;
1705 		mutex_exit(&zfsdev_state_lock);
1706 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1707 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1708 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1709 			error = 0;
1710 		}
1711 		return (error);
1712 
1713 	case DKIOCGETWCE:
1714 	{
1715 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1716 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1717 		    flag))
1718 			error = SET_ERROR(EFAULT);
1719 		break;
1720 	}
1721 	case DKIOCSETWCE:
1722 	{
1723 		int wce;
1724 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1725 		    flag)) {
1726 			error = SET_ERROR(EFAULT);
1727 			break;
1728 		}
1729 		if (wce) {
1730 			zv->zv_flags |= ZVOL_WCE;
1731 			mutex_exit(&zfsdev_state_lock);
1732 		} else {
1733 			zv->zv_flags &= ~ZVOL_WCE;
1734 			mutex_exit(&zfsdev_state_lock);
1735 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1736 		}
1737 		return (0);
1738 	}
1739 
1740 	case DKIOCGGEOM:
1741 	case DKIOCGVTOC:
1742 		/*
1743 		 * commands using these (like prtvtoc) expect ENOTSUP
1744 		 * since we're emulating an EFI label
1745 		 */
1746 		error = SET_ERROR(ENOTSUP);
1747 		break;
1748 
1749 	case DKIOCDUMPINIT:
1750 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1751 		    RL_WRITER);
1752 		error = zvol_dumpify(zv);
1753 		zfs_range_unlock(rl);
1754 		break;
1755 
1756 	case DKIOCDUMPFINI:
1757 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1758 			break;
1759 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1760 		    RL_WRITER);
1761 		error = zvol_dump_fini(zv);
1762 		zfs_range_unlock(rl);
1763 		break;
1764 
1765 	case DKIOCFREE:
1766 	{
1767 		dkioc_free_t df;
1768 		dmu_tx_t *tx;
1769 
1770 		if (!zvol_unmap_enabled)
1771 			break;
1772 
1773 		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1774 			error = SET_ERROR(EFAULT);
1775 			break;
1776 		}
1777 
1778 		/*
1779 		 * Apply Postel's Law to length-checking.  If they overshoot,
1780 		 * just blank out until the end, if there's a need to blank
1781 		 * out anything.
1782 		 */
1783 		if (df.df_start >= zv->zv_volsize)
1784 			break;	/* No need to do anything... */
1785 
1786 		mutex_exit(&zfsdev_state_lock);
1787 
1788 		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1789 		    RL_WRITER);
1790 		tx = dmu_tx_create(zv->zv_objset);
1791 		dmu_tx_mark_netfree(tx);
1792 		error = dmu_tx_assign(tx, TXG_WAIT);
1793 		if (error != 0) {
1794 			dmu_tx_abort(tx);
1795 		} else {
1796 			zvol_log_truncate(zv, tx, df.df_start,
1797 			    df.df_length, B_TRUE);
1798 			dmu_tx_commit(tx);
1799 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1800 			    df.df_start, df.df_length);
1801 		}
1802 
1803 		zfs_range_unlock(rl);
1804 
1805 		/*
1806 		 * If the write-cache is disabled, 'sync' property
1807 		 * is set to 'always', or if the caller is asking for
1808 		 * a synchronous free, commit this operation to the zil.
1809 		 * This will sync any previous uncommitted writes to the
1810 		 * zvol object.
1811 		 * Can be overridden by the zvol_unmap_sync_enabled tunable.
1812 		 */
1813 		if ((error == 0) && zvol_unmap_sync_enabled &&
1814 		    (!(zv->zv_flags & ZVOL_WCE) ||
1815 		    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1816 		    (df.df_flags & DF_WAIT_SYNC))) {
1817 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1818 		}
1819 
1820 		return (error);
1821 	}
1822 
1823 	default:
1824 		error = SET_ERROR(ENOTTY);
1825 		break;
1826 
1827 	}
1828 	mutex_exit(&zfsdev_state_lock);
1829 	return (error);
1830 }
1831 
1832 int
1833 zvol_busy(void)
1834 {
1835 	return (zvol_minors != 0);
1836 }
1837 
1838 void
1839 zvol_init(void)
1840 {
1841 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1842 	    1) == 0);
1843 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1844 }
1845 
1846 void
1847 zvol_fini(void)
1848 {
1849 	mutex_destroy(&zfsdev_state_lock);
1850 	ddi_soft_state_fini(&zfsdev_state);
1851 }
1852 
1853 /*ARGSUSED*/
1854 static int
1855 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1856 {
1857 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1858 
1859 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1860 		return (1);
1861 	return (0);
1862 }
1863 
1864 /*ARGSUSED*/
1865 static void
1866 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1867 {
1868 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1869 
1870 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1871 }
1872 
1873 static int
1874 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1875 {
1876 	dmu_tx_t *tx;
1877 	int error;
1878 	objset_t *os = zv->zv_objset;
1879 	spa_t *spa = dmu_objset_spa(os);
1880 	vdev_t *vd = spa->spa_root_vdev;
1881 	nvlist_t *nv = NULL;
1882 	uint64_t version = spa_version(spa);
1883 	uint64_t checksum, compress, refresrv, vbs, dedup;
1884 
1885 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1886 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1887 
1888 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1889 	    DMU_OBJECT_END);
1890 	if (error != 0)
1891 		return (error);
1892 	/* wait for dmu_free_long_range to actually free the blocks */
1893 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1894 
1895 	/*
1896 	 * If the pool on which the dump device is being initialized has more
1897 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1898 	 * enabled.  If so, bump that feature's counter to indicate that the
1899 	 * feature is active. We also check the vdev type to handle the
1900 	 * following case:
1901 	 *   # zpool create test raidz disk1 disk2 disk3
1902 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1903 	 *   the raidz vdev itself has 3 children.
1904 	 */
1905 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1906 		if (!spa_feature_is_enabled(spa,
1907 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1908 			return (SET_ERROR(ENOTSUP));
1909 		(void) dsl_sync_task(spa_name(spa),
1910 		    zfs_mvdev_dump_feature_check,
1911 		    zfs_mvdev_dump_activate_feature_sync, NULL,
1912 		    2, ZFS_SPACE_CHECK_RESERVED);
1913 	}
1914 
1915 	if (!resize) {
1916 		error = dsl_prop_get_integer(zv->zv_name,
1917 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1918 		if (error == 0) {
1919 			error = dsl_prop_get_integer(zv->zv_name,
1920 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1921 			    NULL);
1922 		}
1923 		if (error == 0) {
1924 			error = dsl_prop_get_integer(zv->zv_name,
1925 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1926 			    &refresrv, NULL);
1927 		}
1928 		if (error == 0) {
1929 			error = dsl_prop_get_integer(zv->zv_name,
1930 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1931 			    NULL);
1932 		}
1933 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1934 			error = dsl_prop_get_integer(zv->zv_name,
1935 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1936 		}
1937 	}
1938 	if (error != 0)
1939 		return (error);
1940 
1941 	tx = dmu_tx_create(os);
1942 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1943 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1944 	error = dmu_tx_assign(tx, TXG_WAIT);
1945 	if (error != 0) {
1946 		dmu_tx_abort(tx);
1947 		return (error);
1948 	}
1949 
1950 	/*
1951 	 * If we are resizing the dump device then we only need to
1952 	 * update the refreservation to match the newly updated
1953 	 * zvolsize. Otherwise, we save off the original state of the
1954 	 * zvol so that we can restore them if the zvol is ever undumpified.
1955 	 */
1956 	if (resize) {
1957 		error = zap_update(os, ZVOL_ZAP_OBJ,
1958 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1959 		    &zv->zv_volsize, tx);
1960 	} else {
1961 		error = zap_update(os, ZVOL_ZAP_OBJ,
1962 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1963 		    &compress, tx);
1964 		if (error == 0) {
1965 			error = zap_update(os, ZVOL_ZAP_OBJ,
1966 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
1967 			    &checksum, tx);
1968 		}
1969 		if (error == 0) {
1970 			error = zap_update(os, ZVOL_ZAP_OBJ,
1971 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1972 			    &refresrv, tx);
1973 		}
1974 		if (error == 0) {
1975 			error = zap_update(os, ZVOL_ZAP_OBJ,
1976 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1977 			    &vbs, tx);
1978 		}
1979 		if (error == 0) {
1980 			error = dmu_object_set_blocksize(
1981 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
1982 		}
1983 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1984 			error = zap_update(os, ZVOL_ZAP_OBJ,
1985 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1986 			    &dedup, tx);
1987 		}
1988 		if (error == 0)
1989 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
1990 	}
1991 	dmu_tx_commit(tx);
1992 
1993 	/*
1994 	 * We only need update the zvol's property if we are initializing
1995 	 * the dump area for the first time.
1996 	 */
1997 	if (error == 0 && !resize) {
1998 		/*
1999 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2000 		 * function.  Otherwise, use the old default -- OFF.
2001 		 */
2002 		checksum = spa_feature_is_active(spa,
2003 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2004 		    ZIO_CHECKSUM_OFF;
2005 
2006 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2007 		VERIFY(nvlist_add_uint64(nv,
2008 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2009 		VERIFY(nvlist_add_uint64(nv,
2010 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2011 		    ZIO_COMPRESS_OFF) == 0);
2012 		VERIFY(nvlist_add_uint64(nv,
2013 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2014 		    checksum) == 0);
2015 		if (version >= SPA_VERSION_DEDUP) {
2016 			VERIFY(nvlist_add_uint64(nv,
2017 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2018 			    ZIO_CHECKSUM_OFF) == 0);
2019 		}
2020 
2021 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2022 		    nv, NULL);
2023 		nvlist_free(nv);
2024 	}
2025 
2026 	/* Allocate the space for the dump */
2027 	if (error == 0)
2028 		error = zvol_prealloc(zv);
2029 	return (error);
2030 }
2031 
2032 static int
2033 zvol_dumpify(zvol_state_t *zv)
2034 {
2035 	int error = 0;
2036 	uint64_t dumpsize = 0;
2037 	dmu_tx_t *tx;
2038 	objset_t *os = zv->zv_objset;
2039 
2040 	if (zv->zv_flags & ZVOL_RDONLY)
2041 		return (SET_ERROR(EROFS));
2042 
2043 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2044 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2045 		boolean_t resize = (dumpsize > 0);
2046 
2047 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2048 			(void) zvol_dump_fini(zv);
2049 			return (error);
2050 		}
2051 	}
2052 
2053 	/*
2054 	 * Build up our lba mapping.
2055 	 */
2056 	error = zvol_get_lbas(zv);
2057 	if (error) {
2058 		(void) zvol_dump_fini(zv);
2059 		return (error);
2060 	}
2061 
2062 	tx = dmu_tx_create(os);
2063 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2064 	error = dmu_tx_assign(tx, TXG_WAIT);
2065 	if (error) {
2066 		dmu_tx_abort(tx);
2067 		(void) zvol_dump_fini(zv);
2068 		return (error);
2069 	}
2070 
2071 	zv->zv_flags |= ZVOL_DUMPIFIED;
2072 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2073 	    &zv->zv_volsize, tx);
2074 	dmu_tx_commit(tx);
2075 
2076 	if (error) {
2077 		(void) zvol_dump_fini(zv);
2078 		return (error);
2079 	}
2080 
2081 	txg_wait_synced(dmu_objset_pool(os), 0);
2082 	return (0);
2083 }
2084 
2085 static int
2086 zvol_dump_fini(zvol_state_t *zv)
2087 {
2088 	dmu_tx_t *tx;
2089 	objset_t *os = zv->zv_objset;
2090 	nvlist_t *nv;
2091 	int error = 0;
2092 	uint64_t checksum, compress, refresrv, vbs, dedup;
2093 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2094 
2095 	/*
2096 	 * Attempt to restore the zvol back to its pre-dumpified state.
2097 	 * This is a best-effort attempt as it's possible that not all
2098 	 * of these properties were initialized during the dumpify process
2099 	 * (i.e. error during zvol_dump_init).
2100 	 */
2101 
2102 	tx = dmu_tx_create(os);
2103 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2104 	error = dmu_tx_assign(tx, TXG_WAIT);
2105 	if (error) {
2106 		dmu_tx_abort(tx);
2107 		return (error);
2108 	}
2109 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2110 	dmu_tx_commit(tx);
2111 
2112 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2113 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2114 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2115 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2116 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2117 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2118 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2119 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2120 
2121 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2122 	(void) nvlist_add_uint64(nv,
2123 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2124 	(void) nvlist_add_uint64(nv,
2125 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2126 	(void) nvlist_add_uint64(nv,
2127 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2128 	if (version >= SPA_VERSION_DEDUP &&
2129 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2130 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2131 		(void) nvlist_add_uint64(nv,
2132 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2133 	}
2134 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2135 	    nv, NULL);
2136 	nvlist_free(nv);
2137 
2138 	zvol_free_extents(zv);
2139 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2140 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2141 	/* wait for dmu_free_long_range to actually free the blocks */
2142 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2143 	tx = dmu_tx_create(os);
2144 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2145 	error = dmu_tx_assign(tx, TXG_WAIT);
2146 	if (error) {
2147 		dmu_tx_abort(tx);
2148 		return (error);
2149 	}
2150 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2151 		zv->zv_volblocksize = vbs;
2152 	dmu_tx_commit(tx);
2153 
2154 	return (0);
2155 }
2156