xref: /titanic_51/usr/src/uts/common/fs/zfs/zvol.c (revision 104fd6c7f7ca0d908df976b49f1d1db5abe3fe78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  *
24  * Portions Copyright 2010 Robert Milkowski
25  *
26  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
27  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  * Copyright (c) 2014 Integros [integros.com]
30  */
31 
32 /*
33  * ZFS volume emulation driver.
34  *
35  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36  * Volumes are accessed through the symbolic links named:
37  *
38  * /dev/zvol/dsk/<pool_name>/<dataset_name>
39  * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40  *
41  * These links are created by the /dev filesystem (sdev_zvolops.c).
42  * Volumes are persistent through reboot.  No user command needs to be
43  * run before opening and using a device.
44  */
45 
46 #include <sys/types.h>
47 #include <sys/param.h>
48 #include <sys/errno.h>
49 #include <sys/uio.h>
50 #include <sys/buf.h>
51 #include <sys/modctl.h>
52 #include <sys/open.h>
53 #include <sys/kmem.h>
54 #include <sys/conf.h>
55 #include <sys/cmn_err.h>
56 #include <sys/stat.h>
57 #include <sys/zap.h>
58 #include <sys/spa.h>
59 #include <sys/spa_impl.h>
60 #include <sys/zio.h>
61 #include <sys/dmu_traverse.h>
62 #include <sys/dnode.h>
63 #include <sys/dsl_dataset.h>
64 #include <sys/dsl_prop.h>
65 #include <sys/dkio.h>
66 #include <sys/efi_partition.h>
67 #include <sys/byteorder.h>
68 #include <sys/pathname.h>
69 #include <sys/ddi.h>
70 #include <sys/sunddi.h>
71 #include <sys/crc32.h>
72 #include <sys/dirent.h>
73 #include <sys/policy.h>
74 #include <sys/fs/zfs.h>
75 #include <sys/zfs_ioctl.h>
76 #include <sys/mkdev.h>
77 #include <sys/zil.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_disk.h>
82 #include <sys/vdev_impl.h>
83 #include <sys/vdev_raidz.h>
84 #include <sys/zvol.h>
85 #include <sys/dumphdr.h>
86 #include <sys/zil_impl.h>
87 #include <sys/dbuf.h>
88 #include <sys/zfs_events.h>
89 #include <sys/dmu_tx.h>
90 #include <sys/zfeature.h>
91 #include <sys/zio_checksum.h>
92 
93 #include "zfs_namecheck.h"
94 
95 void *zfsdev_state;
96 static char *zvol_tag = "zvol_tag";
97 
98 #define	ZVOL_DUMPSIZE		"dumpsize"
99 
100 /*
101  * This lock protects the zfsdev_state structure from being modified
102  * while it's being used, e.g. an open that comes in before a create
103  * finishes.  It also protects temporary opens of the dataset so that,
104  * e.g., an open doesn't get a spurious EBUSY.
105  */
106 kmutex_t zfsdev_state_lock;
107 static uint32_t zvol_minors;
108 
109 typedef struct zvol_extent {
110 	list_node_t	ze_node;
111 	dva_t		ze_dva;		/* dva associated with this extent */
112 	uint64_t	ze_nblks;	/* number of blocks in extent */
113 } zvol_extent_t;
114 
115 /*
116  * The in-core state of each volume.
117  */
118 typedef struct zvol_state {
119 	char		zv_name[MAXPATHLEN]; /* pool/dd name */
120 	uint64_t	zv_volsize;	/* amount of space we advertise */
121 	uint64_t	zv_volblocksize; /* volume block size */
122 	minor_t		zv_minor;	/* minor number */
123 	uint8_t		zv_min_bs;	/* minimum addressable block shift */
124 	uint8_t		zv_flags;	/* readonly, dumpified, etc. */
125 	objset_t	*zv_objset;	/* objset handle */
126 	uint32_t	zv_open_count[OTYPCNT];	/* open counts */
127 	uint32_t	zv_total_opens;	/* total open count */
128 	zilog_t		*zv_zilog;	/* ZIL handle */
129 	list_t		zv_extents;	/* List of extents for dump */
130 	znode_t		zv_znode;	/* for range locking */
131 	dmu_buf_t	*zv_dbuf;	/* bonus handle */
132 } zvol_state_t;
133 
134 /*
135  * zvol specific flags
136  */
137 #define	ZVOL_RDONLY	0x1
138 #define	ZVOL_DUMPIFIED	0x2
139 #define	ZVOL_EXCL	0x4
140 #define	ZVOL_WCE	0x8
141 
142 /*
143  * zvol maximum transfer in one DMU tx.
144  */
145 int zvol_maxphys = DMU_MAX_ACCESS/2;
146 
147 /*
148  * Toggle unmap functionality.
149  */
150 boolean_t zvol_unmap_enabled = B_TRUE;
151 
152 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
153     nvlist_t *, nvlist_t *);
154 static int zvol_remove_zv(zvol_state_t *);
155 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
156 static int zvol_dumpify(zvol_state_t *zv);
157 static int zvol_dump_fini(zvol_state_t *zv);
158 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
159 
160 static void
161 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
162 {
163 	dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
164 
165 	zv->zv_volsize = volsize;
166 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
167 	    "Size", volsize) == DDI_SUCCESS);
168 	VERIFY(ddi_prop_update_int64(dev, zfs_dip,
169 	    "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
170 
171 	/* Notify specfs to invalidate the cached size */
172 	spec_size_invalidate(dev, VBLK);
173 	spec_size_invalidate(dev, VCHR);
174 }
175 
176 int
177 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
178 {
179 	if (volsize == 0)
180 		return (SET_ERROR(EINVAL));
181 
182 	if (volsize % blocksize != 0)
183 		return (SET_ERROR(EINVAL));
184 
185 #ifdef _ILP32
186 	if (volsize - 1 > SPEC_MAXOFFSET_T)
187 		return (SET_ERROR(EOVERFLOW));
188 #endif
189 	return (0);
190 }
191 
192 int
193 zvol_check_volblocksize(uint64_t volblocksize)
194 {
195 	if (volblocksize < SPA_MINBLOCKSIZE ||
196 	    volblocksize > SPA_OLD_MAXBLOCKSIZE ||
197 	    !ISP2(volblocksize))
198 		return (SET_ERROR(EDOM));
199 
200 	return (0);
201 }
202 
203 int
204 zvol_get_stats(objset_t *os, nvlist_t *nv)
205 {
206 	int error;
207 	dmu_object_info_t doi;
208 	uint64_t val;
209 
210 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
211 	if (error)
212 		return (error);
213 
214 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
215 
216 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
217 
218 	if (error == 0) {
219 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
220 		    doi.doi_data_block_size);
221 	}
222 
223 	return (error);
224 }
225 
226 static zvol_state_t *
227 zvol_minor_lookup(const char *name)
228 {
229 	minor_t minor;
230 	zvol_state_t *zv;
231 
232 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
233 
234 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
235 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
236 		if (zv == NULL)
237 			continue;
238 		if (strcmp(zv->zv_name, name) == 0)
239 			return (zv);
240 	}
241 
242 	return (NULL);
243 }
244 
245 /* extent mapping arg */
246 struct maparg {
247 	zvol_state_t	*ma_zv;
248 	uint64_t	ma_blks;
249 };
250 
251 /*ARGSUSED*/
252 static int
253 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
254     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
255 {
256 	struct maparg *ma = arg;
257 	zvol_extent_t *ze;
258 	int bs = ma->ma_zv->zv_volblocksize;
259 
260 	if (bp == NULL || BP_IS_HOLE(bp) ||
261 	    zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
262 		return (0);
263 
264 	VERIFY(!BP_IS_EMBEDDED(bp));
265 
266 	VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
267 	ma->ma_blks++;
268 
269 	/* Abort immediately if we have encountered gang blocks */
270 	if (BP_IS_GANG(bp))
271 		return (SET_ERROR(EFRAGS));
272 
273 	/*
274 	 * See if the block is at the end of the previous extent.
275 	 */
276 	ze = list_tail(&ma->ma_zv->zv_extents);
277 	if (ze &&
278 	    DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
279 	    DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
280 	    DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
281 		ze->ze_nblks++;
282 		return (0);
283 	}
284 
285 	dprintf_bp(bp, "%s", "next blkptr:");
286 
287 	/* start a new extent */
288 	ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
289 	ze->ze_dva = bp->blk_dva[0];	/* structure assignment */
290 	ze->ze_nblks = 1;
291 	list_insert_tail(&ma->ma_zv->zv_extents, ze);
292 	return (0);
293 }
294 
295 static void
296 zvol_free_extents(zvol_state_t *zv)
297 {
298 	zvol_extent_t *ze;
299 
300 	while (ze = list_head(&zv->zv_extents)) {
301 		list_remove(&zv->zv_extents, ze);
302 		kmem_free(ze, sizeof (zvol_extent_t));
303 	}
304 }
305 
306 static int
307 zvol_get_lbas(zvol_state_t *zv)
308 {
309 	objset_t *os = zv->zv_objset;
310 	struct maparg	ma;
311 	int		err;
312 
313 	ma.ma_zv = zv;
314 	ma.ma_blks = 0;
315 	zvol_free_extents(zv);
316 
317 	/* commit any in-flight changes before traversing the dataset */
318 	txg_wait_synced(dmu_objset_pool(os), 0);
319 	err = traverse_dataset(dmu_objset_ds(os), 0,
320 	    TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
321 	if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
322 		zvol_free_extents(zv);
323 		return (err ? err : EIO);
324 	}
325 
326 	return (0);
327 }
328 
329 /* ARGSUSED */
330 void
331 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
332 {
333 	zfs_creat_t *zct = arg;
334 	nvlist_t *nvprops = zct->zct_props;
335 	int error;
336 	uint64_t volblocksize, volsize;
337 
338 	VERIFY(nvlist_lookup_uint64(nvprops,
339 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
340 	if (nvlist_lookup_uint64(nvprops,
341 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
342 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
343 
344 	/*
345 	 * These properties must be removed from the list so the generic
346 	 * property setting step won't apply to them.
347 	 */
348 	VERIFY(nvlist_remove_all(nvprops,
349 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
350 	(void) nvlist_remove_all(nvprops,
351 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
352 
353 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
354 	    DMU_OT_NONE, 0, tx);
355 	ASSERT(error == 0);
356 
357 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
358 	    DMU_OT_NONE, 0, tx);
359 	ASSERT(error == 0);
360 
361 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
362 	ASSERT(error == 0);
363 }
364 
365 /*
366  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
367  * implement DKIOCFREE/free-long-range.
368  */
369 static int
370 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
371 {
372 	uint64_t offset, length;
373 
374 	if (byteswap)
375 		byteswap_uint64_array(lr, sizeof (*lr));
376 
377 	offset = lr->lr_offset;
378 	length = lr->lr_length;
379 
380 	return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
381 }
382 
383 /*
384  * Replay a TX_WRITE ZIL transaction that didn't get committed
385  * after a system failure
386  */
387 static int
388 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
389 {
390 	objset_t *os = zv->zv_objset;
391 	char *data = (char *)(lr + 1);	/* data follows lr_write_t */
392 	uint64_t offset, length;
393 	dmu_tx_t *tx;
394 	int error;
395 
396 	if (byteswap)
397 		byteswap_uint64_array(lr, sizeof (*lr));
398 
399 	offset = lr->lr_offset;
400 	length = lr->lr_length;
401 
402 	/* If it's a dmu_sync() block, write the whole block */
403 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
404 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
405 		if (length < blocksize) {
406 			offset -= offset % blocksize;
407 			length = blocksize;
408 		}
409 	}
410 
411 	tx = dmu_tx_create(os);
412 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
413 	error = dmu_tx_assign(tx, TXG_WAIT);
414 	if (error) {
415 		dmu_tx_abort(tx);
416 	} else {
417 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
418 		dmu_tx_commit(tx);
419 	}
420 
421 	return (error);
422 }
423 
424 /* ARGSUSED */
425 static int
426 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
427 {
428 	return (SET_ERROR(ENOTSUP));
429 }
430 
431 /*
432  * Callback vectors for replaying records.
433  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
434  */
435 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
436 	zvol_replay_err,	/* 0 no such transaction type */
437 	zvol_replay_err,	/* TX_CREATE */
438 	zvol_replay_err,	/* TX_MKDIR */
439 	zvol_replay_err,	/* TX_MKXATTR */
440 	zvol_replay_err,	/* TX_SYMLINK */
441 	zvol_replay_err,	/* TX_REMOVE */
442 	zvol_replay_err,	/* TX_RMDIR */
443 	zvol_replay_err,	/* TX_LINK */
444 	zvol_replay_err,	/* TX_RENAME */
445 	zvol_replay_write,	/* TX_WRITE */
446 	zvol_replay_truncate,	/* TX_TRUNCATE */
447 	zvol_replay_err,	/* TX_SETATTR */
448 	zvol_replay_err,	/* TX_ACL */
449 	zvol_replay_err,	/* TX_CREATE_ACL */
450 	zvol_replay_err,	/* TX_CREATE_ATTR */
451 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
452 	zvol_replay_err,	/* TX_MKDIR_ACL */
453 	zvol_replay_err,	/* TX_MKDIR_ATTR */
454 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
455 	zvol_replay_err,	/* TX_WRITE2 */
456 };
457 
458 int
459 zvol_name2minor(const char *name, minor_t *minor)
460 {
461 	zvol_state_t *zv;
462 
463 	mutex_enter(&zfsdev_state_lock);
464 	zv = zvol_minor_lookup(name);
465 	if (minor && zv)
466 		*minor = zv->zv_minor;
467 	mutex_exit(&zfsdev_state_lock);
468 	return (zv ? 0 : -1);
469 }
470 
471 /*
472  * Create a minor node (plus a whole lot more) for the specified volume.
473  */
474 int
475 zvol_create_minor(const char *name)
476 {
477 	zfs_soft_state_t *zs;
478 	zvol_state_t *zv;
479 	objset_t *os;
480 	dmu_object_info_t doi;
481 	minor_t minor = 0;
482 	char chrbuf[30], blkbuf[30];
483 	int error;
484 
485 	mutex_enter(&zfsdev_state_lock);
486 
487 	if (zvol_minor_lookup(name) != NULL) {
488 		mutex_exit(&zfsdev_state_lock);
489 		return (SET_ERROR(EEXIST));
490 	}
491 
492 	/* lie and say we're read-only */
493 	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
494 
495 	if (error) {
496 		mutex_exit(&zfsdev_state_lock);
497 		return (error);
498 	}
499 
500 	if ((minor = zfsdev_minor_alloc()) == 0) {
501 		dmu_objset_disown(os, FTAG);
502 		mutex_exit(&zfsdev_state_lock);
503 		return (SET_ERROR(ENXIO));
504 	}
505 
506 	if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
507 		dmu_objset_disown(os, FTAG);
508 		mutex_exit(&zfsdev_state_lock);
509 		return (SET_ERROR(EAGAIN));
510 	}
511 	(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
512 	    (char *)name);
513 
514 	(void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
515 
516 	if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
517 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
518 		ddi_soft_state_free(zfsdev_state, minor);
519 		dmu_objset_disown(os, FTAG);
520 		mutex_exit(&zfsdev_state_lock);
521 		return (SET_ERROR(EAGAIN));
522 	}
523 
524 	(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
525 
526 	if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
527 	    minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
528 		ddi_remove_minor_node(zfs_dip, chrbuf);
529 		ddi_soft_state_free(zfsdev_state, minor);
530 		dmu_objset_disown(os, FTAG);
531 		mutex_exit(&zfsdev_state_lock);
532 		return (SET_ERROR(EAGAIN));
533 	}
534 
535 	zs = ddi_get_soft_state(zfsdev_state, minor);
536 	zs->zss_type = ZSST_ZVOL;
537 	zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
538 	(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
539 	zv->zv_min_bs = DEV_BSHIFT;
540 	zv->zv_minor = minor;
541 	zv->zv_objset = os;
542 	if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
543 		zv->zv_flags |= ZVOL_RDONLY;
544 	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
545 	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
546 	    sizeof (rl_t), offsetof(rl_t, r_node));
547 	list_create(&zv->zv_extents, sizeof (zvol_extent_t),
548 	    offsetof(zvol_extent_t, ze_node));
549 	/* get and cache the blocksize */
550 	error = dmu_object_info(os, ZVOL_OBJ, &doi);
551 	ASSERT(error == 0);
552 	zv->zv_volblocksize = doi.doi_data_block_size;
553 
554 	if (spa_writeable(dmu_objset_spa(os))) {
555 		if (zil_replay_disable)
556 			zil_destroy(dmu_objset_zil(os), B_FALSE);
557 		else
558 			zil_replay(os, zv, zvol_replay_vector);
559 	}
560 	dmu_objset_disown(os, FTAG);
561 	zv->zv_objset = NULL;
562 
563 	zvol_minors++;
564 
565 	mutex_exit(&zfsdev_state_lock);
566 
567 	return (0);
568 }
569 
570 /*
571  * Remove minor node for the specified volume.
572  */
573 static int
574 zvol_remove_zv(zvol_state_t *zv)
575 {
576 	char nmbuf[20];
577 	minor_t minor = zv->zv_minor;
578 
579 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
580 	if (zv->zv_total_opens != 0)
581 		return (SET_ERROR(EBUSY));
582 
583 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
584 	ddi_remove_minor_node(zfs_dip, nmbuf);
585 
586 	(void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
587 	ddi_remove_minor_node(zfs_dip, nmbuf);
588 
589 	avl_destroy(&zv->zv_znode.z_range_avl);
590 	mutex_destroy(&zv->zv_znode.z_range_lock);
591 
592 	kmem_free(zv, sizeof (zvol_state_t));
593 
594 	ddi_soft_state_free(zfsdev_state, minor);
595 
596 	zvol_minors--;
597 	return (0);
598 }
599 
600 int
601 zvol_remove_minor(const char *name)
602 {
603 	zvol_state_t *zv;
604 	int rc;
605 
606 	mutex_enter(&zfsdev_state_lock);
607 	if ((zv = zvol_minor_lookup(name)) == NULL) {
608 		mutex_exit(&zfsdev_state_lock);
609 		return (SET_ERROR(ENXIO));
610 	}
611 	rc = zvol_remove_zv(zv);
612 	mutex_exit(&zfsdev_state_lock);
613 	return (rc);
614 }
615 
616 int
617 zvol_first_open(zvol_state_t *zv)
618 {
619 	objset_t *os;
620 	uint64_t volsize;
621 	int error;
622 	uint64_t readonly;
623 
624 	/* lie and say we're read-only */
625 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
626 	    zvol_tag, &os);
627 	if (error)
628 		return (error);
629 
630 	zv->zv_objset = os;
631 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
632 	if (error) {
633 		ASSERT(error == 0);
634 		dmu_objset_disown(os, zvol_tag);
635 		return (error);
636 	}
637 
638 	error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
639 	if (error) {
640 		dmu_objset_disown(os, zvol_tag);
641 		return (error);
642 	}
643 
644 	zvol_size_changed(zv, volsize);
645 	zv->zv_zilog = zil_open(os, zvol_get_data);
646 
647 	VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
648 	    NULL) == 0);
649 	if (readonly || dmu_objset_is_snapshot(os) ||
650 	    !spa_writeable(dmu_objset_spa(os)))
651 		zv->zv_flags |= ZVOL_RDONLY;
652 	else
653 		zv->zv_flags &= ~ZVOL_RDONLY;
654 	return (error);
655 }
656 
657 void
658 zvol_last_close(zvol_state_t *zv)
659 {
660 	zil_close(zv->zv_zilog);
661 	zv->zv_zilog = NULL;
662 
663 	dmu_buf_rele(zv->zv_dbuf, zvol_tag);
664 	zv->zv_dbuf = NULL;
665 
666 	/*
667 	 * Evict cached data
668 	 */
669 	if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
670 	    !(zv->zv_flags & ZVOL_RDONLY))
671 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
672 	dmu_objset_evict_dbufs(zv->zv_objset);
673 
674 	dmu_objset_disown(zv->zv_objset, zvol_tag);
675 	zv->zv_objset = NULL;
676 }
677 
678 int
679 zvol_prealloc(zvol_state_t *zv)
680 {
681 	objset_t *os = zv->zv_objset;
682 	dmu_tx_t *tx;
683 	uint64_t refd, avail, usedobjs, availobjs;
684 	uint64_t resid = zv->zv_volsize;
685 	uint64_t off = 0;
686 
687 	/* Check the space usage before attempting to allocate the space */
688 	dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
689 	if (avail < zv->zv_volsize)
690 		return (SET_ERROR(ENOSPC));
691 
692 	/* Free old extents if they exist */
693 	zvol_free_extents(zv);
694 
695 	while (resid != 0) {
696 		int error;
697 		uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
698 
699 		tx = dmu_tx_create(os);
700 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
701 		error = dmu_tx_assign(tx, TXG_WAIT);
702 		if (error) {
703 			dmu_tx_abort(tx);
704 			(void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
705 			return (error);
706 		}
707 		dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
708 		dmu_tx_commit(tx);
709 		off += bytes;
710 		resid -= bytes;
711 	}
712 	txg_wait_synced(dmu_objset_pool(os), 0);
713 
714 	return (0);
715 }
716 
717 static int
718 zvol_update_volsize(objset_t *os, uint64_t volsize)
719 {
720 	dmu_tx_t *tx;
721 	int error;
722 
723 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
724 
725 	tx = dmu_tx_create(os);
726 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
727 	dmu_tx_mark_netfree(tx);
728 	error = dmu_tx_assign(tx, TXG_WAIT);
729 	if (error) {
730 		dmu_tx_abort(tx);
731 		return (error);
732 	}
733 
734 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
735 	    &volsize, tx);
736 	dmu_tx_commit(tx);
737 
738 	if (error == 0)
739 		error = dmu_free_long_range(os,
740 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
741 	return (error);
742 }
743 
744 void
745 zvol_remove_minors(const char *name)
746 {
747 	zvol_state_t *zv;
748 	char *namebuf;
749 	minor_t minor;
750 
751 	namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
752 	(void) strncpy(namebuf, name, strlen(name));
753 	(void) strcat(namebuf, "/");
754 	mutex_enter(&zfsdev_state_lock);
755 	for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
756 
757 		zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
758 		if (zv == NULL)
759 			continue;
760 		if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
761 			(void) zvol_remove_zv(zv);
762 	}
763 	kmem_free(namebuf, strlen(name) + 2);
764 
765 	mutex_exit(&zfsdev_state_lock);
766 }
767 
768 static int
769 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
770 {
771 	uint64_t old_volsize = 0ULL;
772 	int error = 0;
773 
774 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
775 
776 	/*
777 	 * Reinitialize the dump area to the new size. If we
778 	 * failed to resize the dump area then restore it back to
779 	 * its original size.  We must set the new volsize prior
780 	 * to calling dumpvp_resize() to ensure that the devices'
781 	 * size(9P) is not visible by the dump subsystem.
782 	 */
783 	old_volsize = zv->zv_volsize;
784 	zvol_size_changed(zv, volsize);
785 
786 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
787 		if ((error = zvol_dumpify(zv)) != 0 ||
788 		    (error = dumpvp_resize()) != 0) {
789 			int dumpify_error;
790 
791 			(void) zvol_update_volsize(zv->zv_objset, old_volsize);
792 			zvol_size_changed(zv, old_volsize);
793 			dumpify_error = zvol_dumpify(zv);
794 			error = dumpify_error ? dumpify_error : error;
795 		}
796 	}
797 
798 	/*
799 	 * Generate a LUN expansion event.
800 	 */
801 	if (error == 0) {
802 		sysevent_id_t eid;
803 		nvlist_t *attr;
804 		char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
805 
806 		(void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
807 		    zv->zv_minor);
808 
809 		VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
810 		VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
811 
812 		(void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
813 		    ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
814 
815 		nvlist_free(attr);
816 		kmem_free(physpath, MAXPATHLEN);
817 	}
818 	return (error);
819 }
820 
821 int
822 zvol_set_volsize(const char *name, uint64_t volsize)
823 {
824 	zvol_state_t *zv = NULL;
825 	objset_t *os;
826 	int error;
827 	dmu_object_info_t doi;
828 	uint64_t readonly;
829 	boolean_t owned = B_FALSE;
830 
831 	error = dsl_prop_get_integer(name,
832 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
833 	if (error != 0)
834 		return (error);
835 	if (readonly)
836 		return (SET_ERROR(EROFS));
837 
838 	mutex_enter(&zfsdev_state_lock);
839 	zv = zvol_minor_lookup(name);
840 
841 	if (zv == NULL || zv->zv_objset == NULL) {
842 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
843 		    FTAG, &os)) != 0) {
844 			mutex_exit(&zfsdev_state_lock);
845 			return (error);
846 		}
847 		owned = B_TRUE;
848 		if (zv != NULL)
849 			zv->zv_objset = os;
850 	} else {
851 		os = zv->zv_objset;
852 	}
853 
854 	if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
855 	    (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
856 		goto out;
857 
858 	error = zvol_update_volsize(os, volsize);
859 
860 	if (error == 0 && zv != NULL)
861 		error = zvol_update_live_volsize(zv, volsize);
862 out:
863 	if (owned) {
864 		dmu_objset_disown(os, FTAG);
865 		if (zv != NULL)
866 			zv->zv_objset = NULL;
867 	}
868 	mutex_exit(&zfsdev_state_lock);
869 	return (error);
870 }
871 
872 /*ARGSUSED*/
873 int
874 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
875 {
876 	zvol_state_t *zv;
877 	int err = 0;
878 
879 	mutex_enter(&zfsdev_state_lock);
880 
881 	zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
882 	if (zv == NULL) {
883 		mutex_exit(&zfsdev_state_lock);
884 		return (SET_ERROR(ENXIO));
885 	}
886 
887 	if (zv->zv_total_opens == 0)
888 		err = zvol_first_open(zv);
889 	if (err) {
890 		mutex_exit(&zfsdev_state_lock);
891 		return (err);
892 	}
893 	if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
894 		err = SET_ERROR(EROFS);
895 		goto out;
896 	}
897 	if (zv->zv_flags & ZVOL_EXCL) {
898 		err = SET_ERROR(EBUSY);
899 		goto out;
900 	}
901 	if (flag & FEXCL) {
902 		if (zv->zv_total_opens != 0) {
903 			err = SET_ERROR(EBUSY);
904 			goto out;
905 		}
906 		zv->zv_flags |= ZVOL_EXCL;
907 	}
908 
909 	if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
910 		zv->zv_open_count[otyp]++;
911 		zv->zv_total_opens++;
912 	}
913 	mutex_exit(&zfsdev_state_lock);
914 
915 	return (err);
916 out:
917 	if (zv->zv_total_opens == 0)
918 		zvol_last_close(zv);
919 	mutex_exit(&zfsdev_state_lock);
920 	return (err);
921 }
922 
923 /*ARGSUSED*/
924 int
925 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
926 {
927 	minor_t minor = getminor(dev);
928 	zvol_state_t *zv;
929 	int error = 0;
930 
931 	mutex_enter(&zfsdev_state_lock);
932 
933 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
934 	if (zv == NULL) {
935 		mutex_exit(&zfsdev_state_lock);
936 		return (SET_ERROR(ENXIO));
937 	}
938 
939 	if (zv->zv_flags & ZVOL_EXCL) {
940 		ASSERT(zv->zv_total_opens == 1);
941 		zv->zv_flags &= ~ZVOL_EXCL;
942 	}
943 
944 	/*
945 	 * If the open count is zero, this is a spurious close.
946 	 * That indicates a bug in the kernel / DDI framework.
947 	 */
948 	ASSERT(zv->zv_open_count[otyp] != 0);
949 	ASSERT(zv->zv_total_opens != 0);
950 
951 	/*
952 	 * You may get multiple opens, but only one close.
953 	 */
954 	zv->zv_open_count[otyp]--;
955 	zv->zv_total_opens--;
956 
957 	if (zv->zv_total_opens == 0)
958 		zvol_last_close(zv);
959 
960 	mutex_exit(&zfsdev_state_lock);
961 	return (error);
962 }
963 
964 static void
965 zvol_get_done(zgd_t *zgd, int error)
966 {
967 	if (zgd->zgd_db)
968 		dmu_buf_rele(zgd->zgd_db, zgd);
969 
970 	zfs_range_unlock(zgd->zgd_rl);
971 
972 	if (error == 0 && zgd->zgd_bp)
973 		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
974 
975 	kmem_free(zgd, sizeof (zgd_t));
976 }
977 
978 /*
979  * Get data to generate a TX_WRITE intent log record.
980  */
981 static int
982 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
983 {
984 	zvol_state_t *zv = arg;
985 	objset_t *os = zv->zv_objset;
986 	uint64_t object = ZVOL_OBJ;
987 	uint64_t offset = lr->lr_offset;
988 	uint64_t size = lr->lr_length;	/* length of user data */
989 	blkptr_t *bp = &lr->lr_blkptr;
990 	dmu_buf_t *db;
991 	zgd_t *zgd;
992 	int error;
993 
994 	ASSERT(zio != NULL);
995 	ASSERT(size != 0);
996 
997 	zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
998 	zgd->zgd_zilog = zv->zv_zilog;
999 	zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1000 
1001 	/*
1002 	 * Write records come in two flavors: immediate and indirect.
1003 	 * For small writes it's cheaper to store the data with the
1004 	 * log record (immediate); for large writes it's cheaper to
1005 	 * sync the data and get a pointer to it (indirect) so that
1006 	 * we don't have to write the data twice.
1007 	 */
1008 	if (buf != NULL) {	/* immediate write */
1009 		error = dmu_read(os, object, offset, size, buf,
1010 		    DMU_READ_NO_PREFETCH);
1011 	} else {
1012 		size = zv->zv_volblocksize;
1013 		offset = P2ALIGN(offset, size);
1014 		error = dmu_buf_hold(os, object, offset, zgd, &db,
1015 		    DMU_READ_NO_PREFETCH);
1016 		if (error == 0) {
1017 			blkptr_t *obp = dmu_buf_get_blkptr(db);
1018 			if (obp) {
1019 				ASSERT(BP_IS_HOLE(bp));
1020 				*bp = *obp;
1021 			}
1022 
1023 			zgd->zgd_db = db;
1024 			zgd->zgd_bp = bp;
1025 
1026 			ASSERT(db->db_offset == offset);
1027 			ASSERT(db->db_size == size);
1028 
1029 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
1030 			    zvol_get_done, zgd);
1031 
1032 			if (error == 0)
1033 				return (0);
1034 		}
1035 	}
1036 
1037 	zvol_get_done(zgd, error);
1038 
1039 	return (error);
1040 }
1041 
1042 /*
1043  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1044  *
1045  * We store data in the log buffers if it's small enough.
1046  * Otherwise we will later flush the data out via dmu_sync().
1047  */
1048 ssize_t zvol_immediate_write_sz = 32768;
1049 
1050 static void
1051 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1052     boolean_t sync)
1053 {
1054 	uint32_t blocksize = zv->zv_volblocksize;
1055 	zilog_t *zilog = zv->zv_zilog;
1056 	boolean_t slogging;
1057 	ssize_t immediate_write_sz;
1058 
1059 	if (zil_replaying(zilog, tx))
1060 		return;
1061 
1062 	immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1063 	    ? 0 : zvol_immediate_write_sz;
1064 
1065 	slogging = spa_has_slogs(zilog->zl_spa) &&
1066 	    (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1067 
1068 	while (resid) {
1069 		itx_t *itx;
1070 		lr_write_t *lr;
1071 		ssize_t len;
1072 		itx_wr_state_t write_state;
1073 
1074 		/*
1075 		 * Unlike zfs_log_write() we can be called with
1076 		 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1077 		 */
1078 		if (blocksize > immediate_write_sz && !slogging &&
1079 		    resid >= blocksize && off % blocksize == 0) {
1080 			write_state = WR_INDIRECT; /* uses dmu_sync */
1081 			len = blocksize;
1082 		} else if (sync) {
1083 			write_state = WR_COPIED;
1084 			len = MIN(ZIL_MAX_LOG_DATA, resid);
1085 		} else {
1086 			write_state = WR_NEED_COPY;
1087 			len = MIN(ZIL_MAX_LOG_DATA, resid);
1088 		}
1089 
1090 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1091 		    (write_state == WR_COPIED ? len : 0));
1092 		lr = (lr_write_t *)&itx->itx_lr;
1093 		if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1094 		    ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1095 			zil_itx_destroy(itx);
1096 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1097 			lr = (lr_write_t *)&itx->itx_lr;
1098 			write_state = WR_NEED_COPY;
1099 		}
1100 
1101 		itx->itx_wr_state = write_state;
1102 		if (write_state == WR_NEED_COPY)
1103 			itx->itx_sod += len;
1104 		lr->lr_foid = ZVOL_OBJ;
1105 		lr->lr_offset = off;
1106 		lr->lr_length = len;
1107 		lr->lr_blkoff = 0;
1108 		BP_ZERO(&lr->lr_blkptr);
1109 
1110 		itx->itx_private = zv;
1111 		itx->itx_sync = sync;
1112 
1113 		zil_itx_assign(zilog, itx, tx);
1114 
1115 		rw_enter(&rz_zev_rwlock, RW_READER);
1116 		if (rz_zev_callbacks && rz_zev_callbacks->rz_zev_zvol_write)
1117 			rz_zev_callbacks->rz_zev_zvol_write(zv->zv_name,
1118 			    zv->zv_objset, tx, off, len);
1119 		rw_exit(&rz_zev_rwlock);
1120 
1121 		off += len;
1122 		resid -= len;
1123 	}
1124 }
1125 
1126 static int
1127 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1128     uint64_t size, boolean_t doread, boolean_t isdump)
1129 {
1130 	vdev_disk_t *dvd;
1131 	int c;
1132 	int numerrors = 0;
1133 
1134 	if (vd->vdev_ops == &vdev_mirror_ops ||
1135 	    vd->vdev_ops == &vdev_replacing_ops ||
1136 	    vd->vdev_ops == &vdev_spare_ops) {
1137 		for (c = 0; c < vd->vdev_children; c++) {
1138 			int err = zvol_dumpio_vdev(vd->vdev_child[c],
1139 			    addr, offset, origoffset, size, doread, isdump);
1140 			if (err != 0) {
1141 				numerrors++;
1142 			} else if (doread) {
1143 				break;
1144 			}
1145 		}
1146 	}
1147 
1148 	if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1149 		return (numerrors < vd->vdev_children ? 0 : EIO);
1150 
1151 	if (doread && !vdev_readable(vd))
1152 		return (SET_ERROR(EIO));
1153 	else if (!doread && !vdev_writeable(vd))
1154 		return (SET_ERROR(EIO));
1155 
1156 	if (vd->vdev_ops == &vdev_raidz_ops) {
1157 		return (vdev_raidz_physio(vd,
1158 		    addr, size, offset, origoffset, doread, isdump));
1159 	}
1160 
1161 	offset += VDEV_LABEL_START_SIZE;
1162 
1163 	if (ddi_in_panic() || isdump) {
1164 		ASSERT(!doread);
1165 		if (doread)
1166 			return (SET_ERROR(EIO));
1167 		dvd = vd->vdev_tsd;
1168 		ASSERT3P(dvd, !=, NULL);
1169 		return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1170 		    lbtodb(size)));
1171 	} else {
1172 		dvd = vd->vdev_tsd;
1173 		ASSERT3P(dvd, !=, NULL);
1174 		return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1175 		    offset, doread ? B_READ : B_WRITE));
1176 	}
1177 }
1178 
1179 static int
1180 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1181     boolean_t doread, boolean_t isdump)
1182 {
1183 	vdev_t *vd;
1184 	int error;
1185 	zvol_extent_t *ze;
1186 	spa_t *spa = dmu_objset_spa(zv->zv_objset);
1187 
1188 	/* Must be sector aligned, and not stradle a block boundary. */
1189 	if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1190 	    P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1191 		return (SET_ERROR(EINVAL));
1192 	}
1193 	ASSERT(size <= zv->zv_volblocksize);
1194 
1195 	/* Locate the extent this belongs to */
1196 	ze = list_head(&zv->zv_extents);
1197 	while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1198 		offset -= ze->ze_nblks * zv->zv_volblocksize;
1199 		ze = list_next(&zv->zv_extents, ze);
1200 	}
1201 
1202 	if (ze == NULL)
1203 		return (SET_ERROR(EINVAL));
1204 
1205 	if (!ddi_in_panic())
1206 		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1207 
1208 	vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1209 	offset += DVA_GET_OFFSET(&ze->ze_dva);
1210 	error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1211 	    size, doread, isdump);
1212 
1213 	if (!ddi_in_panic())
1214 		spa_config_exit(spa, SCL_STATE, FTAG);
1215 
1216 	return (error);
1217 }
1218 
1219 int
1220 zvol_strategy(buf_t *bp)
1221 {
1222 	zfs_soft_state_t *zs = NULL;
1223 	zvol_state_t *zv;
1224 	uint64_t off, volsize;
1225 	size_t resid;
1226 	char *addr;
1227 	objset_t *os;
1228 	rl_t *rl;
1229 	int error = 0;
1230 	boolean_t doread = bp->b_flags & B_READ;
1231 	boolean_t is_dumpified;
1232 	boolean_t sync;
1233 
1234 	if (getminor(bp->b_edev) == 0) {
1235 		error = SET_ERROR(EINVAL);
1236 	} else {
1237 		zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1238 		if (zs == NULL)
1239 			error = SET_ERROR(ENXIO);
1240 		else if (zs->zss_type != ZSST_ZVOL)
1241 			error = SET_ERROR(EINVAL);
1242 	}
1243 
1244 	if (error) {
1245 		bioerror(bp, error);
1246 		biodone(bp);
1247 		return (0);
1248 	}
1249 
1250 	zv = zs->zss_data;
1251 
1252 	if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1253 		bioerror(bp, EROFS);
1254 		biodone(bp);
1255 		return (0);
1256 	}
1257 
1258 	off = ldbtob(bp->b_blkno);
1259 	volsize = zv->zv_volsize;
1260 
1261 	os = zv->zv_objset;
1262 	ASSERT(os != NULL);
1263 
1264 	bp_mapin(bp);
1265 	addr = bp->b_un.b_addr;
1266 	resid = bp->b_bcount;
1267 
1268 	if (resid > 0 && (off < 0 || off >= volsize)) {
1269 		bioerror(bp, EIO);
1270 		biodone(bp);
1271 		return (0);
1272 	}
1273 
1274 	is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1275 	sync = ((!(bp->b_flags & B_ASYNC) &&
1276 	    !(zv->zv_flags & ZVOL_WCE)) ||
1277 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1278 	    !doread && !is_dumpified;
1279 
1280 	/*
1281 	 * There must be no buffer changes when doing a dmu_sync() because
1282 	 * we can't change the data whilst calculating the checksum.
1283 	 */
1284 	rl = zfs_range_lock(&zv->zv_znode, off, resid,
1285 	    doread ? RL_READER : RL_WRITER);
1286 
1287 	while (resid != 0 && off < volsize) {
1288 		size_t size = MIN(resid, zvol_maxphys);
1289 		if (is_dumpified) {
1290 			size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1291 			error = zvol_dumpio(zv, addr, off, size,
1292 			    doread, B_FALSE);
1293 		} else if (doread) {
1294 			error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1295 			    DMU_READ_PREFETCH);
1296 		} else {
1297 			dmu_tx_t *tx = dmu_tx_create(os);
1298 			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1299 			error = dmu_tx_assign(tx, TXG_WAIT);
1300 			if (error) {
1301 				dmu_tx_abort(tx);
1302 			} else {
1303 				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1304 				zvol_log_write(zv, tx, off, size, sync);
1305 				dmu_tx_commit(tx);
1306 			}
1307 		}
1308 		if (error) {
1309 			/* convert checksum errors into IO errors */
1310 			if (error == ECKSUM)
1311 				error = SET_ERROR(EIO);
1312 			break;
1313 		}
1314 		off += size;
1315 		addr += size;
1316 		resid -= size;
1317 	}
1318 	zfs_range_unlock(rl);
1319 
1320 	if ((bp->b_resid = resid) == bp->b_bcount)
1321 		bioerror(bp, off > volsize ? EINVAL : error);
1322 
1323 	if (sync)
1324 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1325 	biodone(bp);
1326 
1327 	return (0);
1328 }
1329 
1330 /*
1331  * Set the buffer count to the zvol maximum transfer.
1332  * Using our own routine instead of the default minphys()
1333  * means that for larger writes we write bigger buffers on X86
1334  * (128K instead of 56K) and flush the disk write cache less often
1335  * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1336  * 56K on X86 and 128K on sparc).
1337  */
1338 void
1339 zvol_minphys(struct buf *bp)
1340 {
1341 	if (bp->b_bcount > zvol_maxphys)
1342 		bp->b_bcount = zvol_maxphys;
1343 }
1344 
1345 int
1346 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1347 {
1348 	minor_t minor = getminor(dev);
1349 	zvol_state_t *zv;
1350 	int error = 0;
1351 	uint64_t size;
1352 	uint64_t boff;
1353 	uint64_t resid;
1354 
1355 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1356 	if (zv == NULL)
1357 		return (SET_ERROR(ENXIO));
1358 
1359 	if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1360 		return (SET_ERROR(EINVAL));
1361 
1362 	boff = ldbtob(blkno);
1363 	resid = ldbtob(nblocks);
1364 
1365 	VERIFY3U(boff + resid, <=, zv->zv_volsize);
1366 
1367 	while (resid) {
1368 		size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1369 		error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1370 		if (error)
1371 			break;
1372 		boff += size;
1373 		addr += size;
1374 		resid -= size;
1375 	}
1376 
1377 	return (error);
1378 }
1379 
1380 /*ARGSUSED*/
1381 int
1382 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1383 {
1384 	minor_t minor = getminor(dev);
1385 	zvol_state_t *zv;
1386 	uint64_t volsize;
1387 	rl_t *rl;
1388 	int error = 0;
1389 
1390 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1391 	if (zv == NULL)
1392 		return (SET_ERROR(ENXIO));
1393 
1394 	volsize = zv->zv_volsize;
1395 	if (uio->uio_resid > 0 &&
1396 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1397 		return (SET_ERROR(EIO));
1398 
1399 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1400 		error = physio(zvol_strategy, NULL, dev, B_READ,
1401 		    zvol_minphys, uio);
1402 		return (error);
1403 	}
1404 
1405 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1406 	    RL_READER);
1407 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1408 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1409 
1410 		/* don't read past the end */
1411 		if (bytes > volsize - uio->uio_loffset)
1412 			bytes = volsize - uio->uio_loffset;
1413 
1414 		error =  dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1415 		if (error) {
1416 			/* convert checksum errors into IO errors */
1417 			if (error == ECKSUM)
1418 				error = SET_ERROR(EIO);
1419 			break;
1420 		}
1421 	}
1422 	zfs_range_unlock(rl);
1423 	return (error);
1424 }
1425 
1426 /*ARGSUSED*/
1427 int
1428 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1429 {
1430 	minor_t minor = getminor(dev);
1431 	zvol_state_t *zv;
1432 	uint64_t volsize;
1433 	rl_t *rl;
1434 	int error = 0;
1435 	boolean_t sync;
1436 
1437 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1438 	if (zv == NULL)
1439 		return (SET_ERROR(ENXIO));
1440 
1441 	volsize = zv->zv_volsize;
1442 	if (uio->uio_resid > 0 &&
1443 	    (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1444 		return (SET_ERROR(EIO));
1445 
1446 	if (zv->zv_flags & ZVOL_DUMPIFIED) {
1447 		error = physio(zvol_strategy, NULL, dev, B_WRITE,
1448 		    zvol_minphys, uio);
1449 		return (error);
1450 	}
1451 
1452 	sync = !(zv->zv_flags & ZVOL_WCE) ||
1453 	    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1454 
1455 	rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1456 	    RL_WRITER);
1457 	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1458 		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1459 		uint64_t off = uio->uio_loffset;
1460 		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1461 
1462 		if (bytes > volsize - off)	/* don't write past the end */
1463 			bytes = volsize - off;
1464 
1465 		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1466 		error = dmu_tx_assign(tx, TXG_WAIT);
1467 		if (error) {
1468 			dmu_tx_abort(tx);
1469 			break;
1470 		}
1471 		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1472 		if (error == 0)
1473 			zvol_log_write(zv, tx, off, bytes, sync);
1474 		dmu_tx_commit(tx);
1475 
1476 		if (error)
1477 			break;
1478 	}
1479 	zfs_range_unlock(rl);
1480 	if (sync)
1481 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1482 	return (error);
1483 }
1484 
1485 int
1486 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1487 {
1488 	struct uuid uuid = EFI_RESERVED;
1489 	efi_gpe_t gpe = { 0 };
1490 	uint32_t crc;
1491 	dk_efi_t efi;
1492 	int length;
1493 	char *ptr;
1494 
1495 	if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1496 		return (SET_ERROR(EFAULT));
1497 	ptr = (char *)(uintptr_t)efi.dki_data_64;
1498 	length = efi.dki_length;
1499 	/*
1500 	 * Some clients may attempt to request a PMBR for the
1501 	 * zvol.  Currently this interface will return EINVAL to
1502 	 * such requests.  These requests could be supported by
1503 	 * adding a check for lba == 0 and consing up an appropriate
1504 	 * PMBR.
1505 	 */
1506 	if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1507 		return (SET_ERROR(EINVAL));
1508 
1509 	gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1510 	gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1511 	UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1512 
1513 	if (efi.dki_lba == 1) {
1514 		efi_gpt_t gpt = { 0 };
1515 
1516 		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1517 		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1518 		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1519 		gpt.efi_gpt_MyLBA = LE_64(1ULL);
1520 		gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1521 		gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1522 		gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1523 		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1524 		gpt.efi_gpt_SizeOfPartitionEntry =
1525 		    LE_32(sizeof (efi_gpe_t));
1526 		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1527 		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1528 		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1529 		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1530 		if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1531 		    flag))
1532 			return (SET_ERROR(EFAULT));
1533 		ptr += sizeof (gpt);
1534 		length -= sizeof (gpt);
1535 	}
1536 	if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1537 	    length), flag))
1538 		return (SET_ERROR(EFAULT));
1539 	return (0);
1540 }
1541 
1542 /*
1543  * BEGIN entry points to allow external callers access to the volume.
1544  */
1545 /*
1546  * Return the volume parameters needed for access from an external caller.
1547  * These values are invariant as long as the volume is held open.
1548  */
1549 int
1550 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1551     uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1552     void **rl_hdl, void **bonus_hdl)
1553 {
1554 	zvol_state_t *zv;
1555 
1556 	zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1557 	if (zv == NULL)
1558 		return (SET_ERROR(ENXIO));
1559 	if (zv->zv_flags & ZVOL_DUMPIFIED)
1560 		return (SET_ERROR(ENXIO));
1561 
1562 	ASSERT(blksize && max_xfer_len && minor_hdl &&
1563 	    objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1564 
1565 	*blksize = zv->zv_volblocksize;
1566 	*max_xfer_len = (uint64_t)zvol_maxphys;
1567 	*minor_hdl = zv;
1568 	*objset_hdl = zv->zv_objset;
1569 	*zil_hdl = zv->zv_zilog;
1570 	*rl_hdl = &zv->zv_znode;
1571 	*bonus_hdl = zv->zv_dbuf;
1572 	return (0);
1573 }
1574 
1575 /*
1576  * Return the current volume size to an external caller.
1577  * The size can change while the volume is open.
1578  */
1579 uint64_t
1580 zvol_get_volume_size(void *minor_hdl)
1581 {
1582 	zvol_state_t *zv = minor_hdl;
1583 
1584 	return (zv->zv_volsize);
1585 }
1586 
1587 /*
1588  * Return the current WCE setting to an external caller.
1589  * The WCE setting can change while the volume is open.
1590  */
1591 int
1592 zvol_get_volume_wce(void *minor_hdl)
1593 {
1594 	zvol_state_t *zv = minor_hdl;
1595 
1596 	return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1597 }
1598 
1599 /*
1600  * Entry point for external callers to zvol_log_write
1601  */
1602 void
1603 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1604     boolean_t sync)
1605 {
1606 	zvol_state_t *zv = minor_hdl;
1607 
1608 	zvol_log_write(zv, tx, off, resid, sync);
1609 }
1610 /*
1611  * END entry points to allow external callers access to the volume.
1612  */
1613 
1614 /*
1615  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1616  */
1617 static void
1618 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1619     boolean_t sync)
1620 {
1621 	itx_t *itx;
1622 	lr_truncate_t *lr;
1623 	zilog_t *zilog = zv->zv_zilog;
1624 
1625 	if (zil_replaying(zilog, tx))
1626 		return;
1627 
1628 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1629 	lr = (lr_truncate_t *)&itx->itx_lr;
1630 	lr->lr_foid = ZVOL_OBJ;
1631 	lr->lr_offset = off;
1632 	lr->lr_length = len;
1633 
1634 	itx->itx_sync = sync;
1635 	zil_itx_assign(zilog, itx, tx);
1636 
1637 	rw_enter(&rz_zev_rwlock, RW_READER);
1638 	if (rz_zev_callbacks && rz_zev_callbacks->rz_zev_zvol_truncate)
1639 		rz_zev_callbacks->rz_zev_zvol_truncate(zv->zv_name,
1640 		    zv->zv_objset, tx, off, len);
1641 	rw_exit(&rz_zev_rwlock);
1642 }
1643 
1644 /*
1645  * Dirtbag ioctls to support mkfs(1M) for UFS filesystems.  See dkio(7I).
1646  * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1647  */
1648 /*ARGSUSED*/
1649 int
1650 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1651 {
1652 	zvol_state_t *zv;
1653 	struct dk_callback *dkc;
1654 	int error = 0;
1655 	rl_t *rl;
1656 
1657 	mutex_enter(&zfsdev_state_lock);
1658 
1659 	zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1660 
1661 	if (zv == NULL) {
1662 		mutex_exit(&zfsdev_state_lock);
1663 		return (SET_ERROR(ENXIO));
1664 	}
1665 	ASSERT(zv->zv_total_opens > 0);
1666 
1667 	switch (cmd) {
1668 
1669 	case DKIOCINFO:
1670 	{
1671 		struct dk_cinfo dki;
1672 
1673 		bzero(&dki, sizeof (dki));
1674 		(void) strcpy(dki.dki_cname, "zvol");
1675 		(void) strcpy(dki.dki_dname, "zvol");
1676 		dki.dki_ctype = DKC_UNKNOWN;
1677 		dki.dki_unit = getminor(dev);
1678 		dki.dki_maxtransfer =
1679 		    1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1680 		mutex_exit(&zfsdev_state_lock);
1681 		if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1682 			error = SET_ERROR(EFAULT);
1683 		return (error);
1684 	}
1685 
1686 	case DKIOCGMEDIAINFO:
1687 	{
1688 		struct dk_minfo dkm;
1689 
1690 		bzero(&dkm, sizeof (dkm));
1691 		dkm.dki_lbsize = 1U << zv->zv_min_bs;
1692 		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1693 		dkm.dki_media_type = DK_UNKNOWN;
1694 		mutex_exit(&zfsdev_state_lock);
1695 		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1696 			error = SET_ERROR(EFAULT);
1697 		return (error);
1698 	}
1699 
1700 	case DKIOCGMEDIAINFOEXT:
1701 	{
1702 		struct dk_minfo_ext dkmext;
1703 
1704 		bzero(&dkmext, sizeof (dkmext));
1705 		dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1706 		dkmext.dki_pbsize = zv->zv_volblocksize;
1707 		dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1708 		dkmext.dki_media_type = DK_UNKNOWN;
1709 		mutex_exit(&zfsdev_state_lock);
1710 		if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1711 			error = SET_ERROR(EFAULT);
1712 		return (error);
1713 	}
1714 
1715 	case DKIOCGETEFI:
1716 	{
1717 		uint64_t vs = zv->zv_volsize;
1718 		uint8_t bs = zv->zv_min_bs;
1719 
1720 		mutex_exit(&zfsdev_state_lock);
1721 		error = zvol_getefi((void *)arg, flag, vs, bs);
1722 		return (error);
1723 	}
1724 
1725 	case DKIOCFLUSHWRITECACHE:
1726 		dkc = (struct dk_callback *)arg;
1727 		mutex_exit(&zfsdev_state_lock);
1728 		zil_commit(zv->zv_zilog, ZVOL_OBJ);
1729 		if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1730 			(*dkc->dkc_callback)(dkc->dkc_cookie, error);
1731 			error = 0;
1732 		}
1733 		return (error);
1734 
1735 	case DKIOCGETWCE:
1736 	{
1737 		int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1738 		if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1739 		    flag))
1740 			error = SET_ERROR(EFAULT);
1741 		break;
1742 	}
1743 	case DKIOCSETWCE:
1744 	{
1745 		int wce;
1746 		if (ddi_copyin((void *)arg, &wce, sizeof (int),
1747 		    flag)) {
1748 			error = SET_ERROR(EFAULT);
1749 			break;
1750 		}
1751 		if (wce) {
1752 			zv->zv_flags |= ZVOL_WCE;
1753 			mutex_exit(&zfsdev_state_lock);
1754 		} else {
1755 			zv->zv_flags &= ~ZVOL_WCE;
1756 			mutex_exit(&zfsdev_state_lock);
1757 			zil_commit(zv->zv_zilog, ZVOL_OBJ);
1758 		}
1759 		return (0);
1760 	}
1761 
1762 	case DKIOCGGEOM:
1763 	case DKIOCGVTOC:
1764 		/*
1765 		 * commands using these (like prtvtoc) expect ENOTSUP
1766 		 * since we're emulating an EFI label
1767 		 */
1768 		error = SET_ERROR(ENOTSUP);
1769 		break;
1770 
1771 	case DKIOCDUMPINIT:
1772 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1773 		    RL_WRITER);
1774 		error = zvol_dumpify(zv);
1775 		zfs_range_unlock(rl);
1776 		break;
1777 
1778 	case DKIOCDUMPFINI:
1779 		if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1780 			break;
1781 		rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1782 		    RL_WRITER);
1783 		error = zvol_dump_fini(zv);
1784 		zfs_range_unlock(rl);
1785 		break;
1786 
1787 	case DKIOCFREE:
1788 	{
1789 		dkioc_free_t df;
1790 		dmu_tx_t *tx;
1791 
1792 		if (!zvol_unmap_enabled)
1793 			break;
1794 
1795 		if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1796 			error = SET_ERROR(EFAULT);
1797 			break;
1798 		}
1799 
1800 		/*
1801 		 * Apply Postel's Law to length-checking.  If they overshoot,
1802 		 * just blank out until the end, if there's a need to blank
1803 		 * out anything.
1804 		 */
1805 		if (df.df_start >= zv->zv_volsize)
1806 			break;	/* No need to do anything... */
1807 
1808 		mutex_exit(&zfsdev_state_lock);
1809 
1810 		rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1811 		    RL_WRITER);
1812 		tx = dmu_tx_create(zv->zv_objset);
1813 		dmu_tx_mark_netfree(tx);
1814 		error = dmu_tx_assign(tx, TXG_WAIT);
1815 		if (error != 0) {
1816 			dmu_tx_abort(tx);
1817 		} else {
1818 			zvol_log_truncate(zv, tx, df.df_start,
1819 			    df.df_length, B_TRUE);
1820 			dmu_tx_commit(tx);
1821 			error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1822 			    df.df_start, df.df_length);
1823 		}
1824 
1825 		zfs_range_unlock(rl);
1826 
1827 		if (error == 0) {
1828 			/*
1829 			 * If the write-cache is disabled or 'sync' property
1830 			 * is set to 'always' then treat this as a synchronous
1831 			 * operation (i.e. commit to zil).
1832 			 */
1833 			if (!(zv->zv_flags & ZVOL_WCE) ||
1834 			    (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1835 				zil_commit(zv->zv_zilog, ZVOL_OBJ);
1836 
1837 			/*
1838 			 * If the caller really wants synchronous writes, and
1839 			 * can't wait for them, don't return until the write
1840 			 * is done.
1841 			 */
1842 			if (df.df_flags & DF_WAIT_SYNC) {
1843 				txg_wait_synced(
1844 				    dmu_objset_pool(zv->zv_objset), 0);
1845 			}
1846 		}
1847 		return (error);
1848 	}
1849 
1850 	default:
1851 		error = SET_ERROR(ENOTTY);
1852 		break;
1853 
1854 	}
1855 	mutex_exit(&zfsdev_state_lock);
1856 	return (error);
1857 }
1858 
1859 int
1860 zvol_busy(void)
1861 {
1862 	return (zvol_minors != 0);
1863 }
1864 
1865 void
1866 zvol_init(void)
1867 {
1868 	VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1869 	    1) == 0);
1870 	mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1871 }
1872 
1873 void
1874 zvol_fini(void)
1875 {
1876 	mutex_destroy(&zfsdev_state_lock);
1877 	ddi_soft_state_fini(&zfsdev_state);
1878 }
1879 
1880 /*ARGSUSED*/
1881 static int
1882 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1883 {
1884 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1885 
1886 	if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1887 		return (1);
1888 	return (0);
1889 }
1890 
1891 /*ARGSUSED*/
1892 static void
1893 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1894 {
1895 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1896 
1897 	spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1898 }
1899 
1900 static int
1901 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1902 {
1903 	dmu_tx_t *tx;
1904 	int error;
1905 	objset_t *os = zv->zv_objset;
1906 	spa_t *spa = dmu_objset_spa(os);
1907 	vdev_t *vd = spa->spa_root_vdev;
1908 	nvlist_t *nv = NULL;
1909 	uint64_t version = spa_version(spa);
1910 	uint64_t checksum, compress, refresrv, vbs, dedup;
1911 
1912 	ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1913 	ASSERT(vd->vdev_ops == &vdev_root_ops);
1914 
1915 	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1916 	    DMU_OBJECT_END);
1917 	if (error != 0)
1918 		return (error);
1919 	/* wait for dmu_free_long_range to actually free the blocks */
1920 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1921 
1922 	/*
1923 	 * If the pool on which the dump device is being initialized has more
1924 	 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1925 	 * enabled.  If so, bump that feature's counter to indicate that the
1926 	 * feature is active. We also check the vdev type to handle the
1927 	 * following case:
1928 	 *   # zpool create test raidz disk1 disk2 disk3
1929 	 *   Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1930 	 *   the raidz vdev itself has 3 children.
1931 	 */
1932 	if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1933 		if (!spa_feature_is_enabled(spa,
1934 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1935 			return (SET_ERROR(ENOTSUP));
1936 		(void) dsl_sync_task(spa_name(spa),
1937 		    zfs_mvdev_dump_feature_check,
1938 		    zfs_mvdev_dump_activate_feature_sync, NULL,
1939 		    2, ZFS_SPACE_CHECK_RESERVED);
1940 	}
1941 
1942 	if (!resize) {
1943 		error = dsl_prop_get_integer(zv->zv_name,
1944 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1945 		if (error == 0) {
1946 			error = dsl_prop_get_integer(zv->zv_name,
1947 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1948 			    NULL);
1949 		}
1950 		if (error == 0) {
1951 			error = dsl_prop_get_integer(zv->zv_name,
1952 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1953 			    &refresrv, NULL);
1954 		}
1955 		if (error == 0) {
1956 			error = dsl_prop_get_integer(zv->zv_name,
1957 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1958 			    NULL);
1959 		}
1960 		if (version >= SPA_VERSION_DEDUP && error == 0) {
1961 			error = dsl_prop_get_integer(zv->zv_name,
1962 			    zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1963 		}
1964 	}
1965 	if (error != 0)
1966 		return (error);
1967 
1968 	tx = dmu_tx_create(os);
1969 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1970 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1971 	error = dmu_tx_assign(tx, TXG_WAIT);
1972 	if (error != 0) {
1973 		dmu_tx_abort(tx);
1974 		return (error);
1975 	}
1976 
1977 	/*
1978 	 * If we are resizing the dump device then we only need to
1979 	 * update the refreservation to match the newly updated
1980 	 * zvolsize. Otherwise, we save off the original state of the
1981 	 * zvol so that we can restore them if the zvol is ever undumpified.
1982 	 */
1983 	if (resize) {
1984 		error = zap_update(os, ZVOL_ZAP_OBJ,
1985 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1986 		    &zv->zv_volsize, tx);
1987 	} else {
1988 		error = zap_update(os, ZVOL_ZAP_OBJ,
1989 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1990 		    &compress, tx);
1991 		if (error == 0) {
1992 			error = zap_update(os, ZVOL_ZAP_OBJ,
1993 			    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
1994 			    &checksum, tx);
1995 		}
1996 		if (error == 0) {
1997 			error = zap_update(os, ZVOL_ZAP_OBJ,
1998 			    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1999 			    &refresrv, tx);
2000 		}
2001 		if (error == 0) {
2002 			error = zap_update(os, ZVOL_ZAP_OBJ,
2003 			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2004 			    &vbs, tx);
2005 		}
2006 		if (error == 0) {
2007 			error = dmu_object_set_blocksize(
2008 			    os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2009 		}
2010 		if (version >= SPA_VERSION_DEDUP && error == 0) {
2011 			error = zap_update(os, ZVOL_ZAP_OBJ,
2012 			    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2013 			    &dedup, tx);
2014 		}
2015 		if (error == 0)
2016 			zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2017 	}
2018 	dmu_tx_commit(tx);
2019 
2020 	/*
2021 	 * We only need update the zvol's property if we are initializing
2022 	 * the dump area for the first time.
2023 	 */
2024 	if (error == 0 && !resize) {
2025 		/*
2026 		 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2027 		 * function.  Otherwise, use the old default -- OFF.
2028 		 */
2029 		checksum = spa_feature_is_active(spa,
2030 		    SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2031 		    ZIO_CHECKSUM_OFF;
2032 
2033 		VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2034 		VERIFY(nvlist_add_uint64(nv,
2035 		    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2036 		VERIFY(nvlist_add_uint64(nv,
2037 		    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2038 		    ZIO_COMPRESS_OFF) == 0);
2039 		VERIFY(nvlist_add_uint64(nv,
2040 		    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2041 		    checksum) == 0);
2042 		if (version >= SPA_VERSION_DEDUP) {
2043 			VERIFY(nvlist_add_uint64(nv,
2044 			    zfs_prop_to_name(ZFS_PROP_DEDUP),
2045 			    ZIO_CHECKSUM_OFF) == 0);
2046 		}
2047 
2048 		error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2049 		    nv, NULL);
2050 		nvlist_free(nv);
2051 	}
2052 
2053 	/* Allocate the space for the dump */
2054 	if (error == 0)
2055 		error = zvol_prealloc(zv);
2056 	return (error);
2057 }
2058 
2059 static int
2060 zvol_dumpify(zvol_state_t *zv)
2061 {
2062 	int error = 0;
2063 	uint64_t dumpsize = 0;
2064 	dmu_tx_t *tx;
2065 	objset_t *os = zv->zv_objset;
2066 
2067 	if (zv->zv_flags & ZVOL_RDONLY)
2068 		return (SET_ERROR(EROFS));
2069 
2070 	if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2071 	    8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2072 		boolean_t resize = (dumpsize > 0);
2073 
2074 		if ((error = zvol_dump_init(zv, resize)) != 0) {
2075 			(void) zvol_dump_fini(zv);
2076 			return (error);
2077 		}
2078 	}
2079 
2080 	/*
2081 	 * Build up our lba mapping.
2082 	 */
2083 	error = zvol_get_lbas(zv);
2084 	if (error) {
2085 		(void) zvol_dump_fini(zv);
2086 		return (error);
2087 	}
2088 
2089 	tx = dmu_tx_create(os);
2090 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2091 	error = dmu_tx_assign(tx, TXG_WAIT);
2092 	if (error) {
2093 		dmu_tx_abort(tx);
2094 		(void) zvol_dump_fini(zv);
2095 		return (error);
2096 	}
2097 
2098 	zv->zv_flags |= ZVOL_DUMPIFIED;
2099 	error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2100 	    &zv->zv_volsize, tx);
2101 	dmu_tx_commit(tx);
2102 
2103 	if (error) {
2104 		(void) zvol_dump_fini(zv);
2105 		return (error);
2106 	}
2107 
2108 	txg_wait_synced(dmu_objset_pool(os), 0);
2109 	return (0);
2110 }
2111 
2112 static int
2113 zvol_dump_fini(zvol_state_t *zv)
2114 {
2115 	dmu_tx_t *tx;
2116 	objset_t *os = zv->zv_objset;
2117 	nvlist_t *nv;
2118 	int error = 0;
2119 	uint64_t checksum, compress, refresrv, vbs, dedup;
2120 	uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2121 
2122 	/*
2123 	 * Attempt to restore the zvol back to its pre-dumpified state.
2124 	 * This is a best-effort attempt as it's possible that not all
2125 	 * of these properties were initialized during the dumpify process
2126 	 * (i.e. error during zvol_dump_init).
2127 	 */
2128 
2129 	tx = dmu_tx_create(os);
2130 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2131 	error = dmu_tx_assign(tx, TXG_WAIT);
2132 	if (error) {
2133 		dmu_tx_abort(tx);
2134 		return (error);
2135 	}
2136 	(void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2137 	dmu_tx_commit(tx);
2138 
2139 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2140 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2141 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2142 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2143 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2144 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2145 	(void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2146 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2147 
2148 	VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2149 	(void) nvlist_add_uint64(nv,
2150 	    zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2151 	(void) nvlist_add_uint64(nv,
2152 	    zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2153 	(void) nvlist_add_uint64(nv,
2154 	    zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2155 	if (version >= SPA_VERSION_DEDUP &&
2156 	    zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2157 	    zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2158 		(void) nvlist_add_uint64(nv,
2159 		    zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2160 	}
2161 	(void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2162 	    nv, NULL);
2163 	nvlist_free(nv);
2164 
2165 	zvol_free_extents(zv);
2166 	zv->zv_flags &= ~ZVOL_DUMPIFIED;
2167 	(void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2168 	/* wait for dmu_free_long_range to actually free the blocks */
2169 	txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2170 	tx = dmu_tx_create(os);
2171 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2172 	error = dmu_tx_assign(tx, TXG_WAIT);
2173 	if (error) {
2174 		dmu_tx_abort(tx);
2175 		return (error);
2176 	}
2177 	if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2178 		zv->zv_volblocksize = vbs;
2179 	dmu_tx_commit(tx);
2180 
2181 	return (0);
2182 }
2183