xref: /freebsd/sys/contrib/openzfs/module/zfs/zvol.c (revision 79ac3c12a714bcd3f2354c52d948aed9575c46d6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23  * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24  * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25  * LLNL-CODE-403049.
26  *
27  * ZFS volume emulation driver.
28  *
29  * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30  * Volumes are accessed through the symbolic links named:
31  *
32  * /dev/<pool_name>/<dataset_name>
33  *
34  * Volumes are persistent through reboot and module load.  No user command
35  * needs to be run before opening and using a device.
36  *
37  * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
38  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
39  * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
40  */
41 
42 /*
43  * Note on locking of zvol state structures.
44  *
45  * These structures are used to maintain internal state used to emulate block
46  * devices on top of zvols. In particular, management of device minor number
47  * operations - create, remove, rename, and set_snapdev - involves access to
48  * these structures. The zvol_state_lock is primarily used to protect the
49  * zvol_state_list. The zv->zv_state_lock is used to protect the contents
50  * of the zvol_state_t structures, as well as to make sure that when the
51  * time comes to remove the structure from the list, it is not in use, and
52  * therefore, it can be taken off zvol_state_list and freed.
53  *
54  * The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
55  * e.g. for the duration of receive and rollback operations. This lock can be
56  * held for significant periods of time. Given that it is undesirable to hold
57  * mutexes for long periods of time, the following lock ordering applies:
58  * - take zvol_state_lock if necessary, to protect zvol_state_list
59  * - take zv_suspend_lock if necessary, by the code path in question
60  * - take zv_state_lock to protect zvol_state_t
61  *
62  * The minor operations are issued to spa->spa_zvol_taskq queues, that are
63  * single-threaded (to preserve order of minor operations), and are executed
64  * through the zvol_task_cb that dispatches the specific operations. Therefore,
65  * these operations are serialized per pool. Consequently, we can be certain
66  * that for a given zvol, there is only one operation at a time in progress.
67  * That is why one can be sure that first, zvol_state_t for a given zvol is
68  * allocated and placed on zvol_state_list, and then other minor operations
69  * for this zvol are going to proceed in the order of issue.
70  *
71  */
72 
73 #include <sys/dataset_kstats.h>
74 #include <sys/dbuf.h>
75 #include <sys/dmu_traverse.h>
76 #include <sys/dsl_dataset.h>
77 #include <sys/dsl_prop.h>
78 #include <sys/dsl_dir.h>
79 #include <sys/zap.h>
80 #include <sys/zfeature.h>
81 #include <sys/zil_impl.h>
82 #include <sys/dmu_tx.h>
83 #include <sys/zio.h>
84 #include <sys/zfs_rlock.h>
85 #include <sys/spa_impl.h>
86 #include <sys/zvol.h>
87 
88 #include <sys/zvol_impl.h>
89 
90 
91 unsigned int zvol_inhibit_dev = 0;
92 unsigned int zvol_volmode = ZFS_VOLMODE_GEOM;
93 
94 struct hlist_head *zvol_htable;
95 list_t zvol_state_list;
96 krwlock_t zvol_state_lock;
97 const zvol_platform_ops_t *ops;
98 
99 typedef enum {
100 	ZVOL_ASYNC_REMOVE_MINORS,
101 	ZVOL_ASYNC_RENAME_MINORS,
102 	ZVOL_ASYNC_SET_SNAPDEV,
103 	ZVOL_ASYNC_SET_VOLMODE,
104 	ZVOL_ASYNC_MAX
105 } zvol_async_op_t;
106 
107 typedef struct {
108 	zvol_async_op_t op;
109 	char pool[MAXNAMELEN];
110 	char name1[MAXNAMELEN];
111 	char name2[MAXNAMELEN];
112 	zprop_source_t source;
113 	uint64_t value;
114 } zvol_task_t;
115 
116 uint64_t
117 zvol_name_hash(const char *name)
118 {
119 	int i;
120 	uint64_t crc = -1ULL;
121 	const uint8_t *p = (const uint8_t *)name;
122 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
123 	for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
124 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
125 	}
126 	return (crc);
127 }
128 
129 /*
130  * Find a zvol_state_t given the name and hash generated by zvol_name_hash.
131  * If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
132  * return (NULL) without the taking locks. The zv_suspend_lock is always taken
133  * before zv_state_lock. The mode argument indicates the mode (including none)
134  * for zv_suspend_lock to be taken.
135  */
136 zvol_state_t *
137 zvol_find_by_name_hash(const char *name, uint64_t hash, int mode)
138 {
139 	zvol_state_t *zv;
140 	struct hlist_node *p = NULL;
141 
142 	rw_enter(&zvol_state_lock, RW_READER);
143 	hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
144 		zv = hlist_entry(p, zvol_state_t, zv_hlink);
145 		mutex_enter(&zv->zv_state_lock);
146 		if (zv->zv_hash == hash &&
147 		    strncmp(zv->zv_name, name, MAXNAMELEN) == 0) {
148 			/*
149 			 * this is the right zvol, take the locks in the
150 			 * right order
151 			 */
152 			if (mode != RW_NONE &&
153 			    !rw_tryenter(&zv->zv_suspend_lock, mode)) {
154 				mutex_exit(&zv->zv_state_lock);
155 				rw_enter(&zv->zv_suspend_lock, mode);
156 				mutex_enter(&zv->zv_state_lock);
157 				/*
158 				 * zvol cannot be renamed as we continue
159 				 * to hold zvol_state_lock
160 				 */
161 				ASSERT(zv->zv_hash == hash &&
162 				    strncmp(zv->zv_name, name, MAXNAMELEN)
163 				    == 0);
164 			}
165 			rw_exit(&zvol_state_lock);
166 			return (zv);
167 		}
168 		mutex_exit(&zv->zv_state_lock);
169 	}
170 	rw_exit(&zvol_state_lock);
171 
172 	return (NULL);
173 }
174 
175 /*
176  * Find a zvol_state_t given the name.
177  * If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
178  * return (NULL) without the taking locks. The zv_suspend_lock is always taken
179  * before zv_state_lock. The mode argument indicates the mode (including none)
180  * for zv_suspend_lock to be taken.
181  */
182 static zvol_state_t *
183 zvol_find_by_name(const char *name, int mode)
184 {
185 	return (zvol_find_by_name_hash(name, zvol_name_hash(name), mode));
186 }
187 
188 /*
189  * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
190  */
191 void
192 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
193 {
194 	zfs_creat_t *zct = arg;
195 	nvlist_t *nvprops = zct->zct_props;
196 	int error;
197 	uint64_t volblocksize, volsize;
198 
199 	VERIFY(nvlist_lookup_uint64(nvprops,
200 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
201 	if (nvlist_lookup_uint64(nvprops,
202 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
203 		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
204 
205 	/*
206 	 * These properties must be removed from the list so the generic
207 	 * property setting step won't apply to them.
208 	 */
209 	VERIFY(nvlist_remove_all(nvprops,
210 	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
211 	(void) nvlist_remove_all(nvprops,
212 	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
213 
214 	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
215 	    DMU_OT_NONE, 0, tx);
216 	ASSERT(error == 0);
217 
218 	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
219 	    DMU_OT_NONE, 0, tx);
220 	ASSERT(error == 0);
221 
222 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
223 	ASSERT(error == 0);
224 }
225 
226 /*
227  * ZFS_IOC_OBJSET_STATS entry point.
228  */
229 int
230 zvol_get_stats(objset_t *os, nvlist_t *nv)
231 {
232 	int error;
233 	dmu_object_info_t *doi;
234 	uint64_t val;
235 
236 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
237 	if (error)
238 		return (SET_ERROR(error));
239 
240 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
241 	doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
242 	error = dmu_object_info(os, ZVOL_OBJ, doi);
243 
244 	if (error == 0) {
245 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
246 		    doi->doi_data_block_size);
247 	}
248 
249 	kmem_free(doi, sizeof (dmu_object_info_t));
250 
251 	return (SET_ERROR(error));
252 }
253 
254 /*
255  * Sanity check volume size.
256  */
257 int
258 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
259 {
260 	if (volsize == 0)
261 		return (SET_ERROR(EINVAL));
262 
263 	if (volsize % blocksize != 0)
264 		return (SET_ERROR(EINVAL));
265 
266 #ifdef _ILP32
267 	if (volsize - 1 > SPEC_MAXOFFSET_T)
268 		return (SET_ERROR(EOVERFLOW));
269 #endif
270 	return (0);
271 }
272 
273 /*
274  * Ensure the zap is flushed then inform the VFS of the capacity change.
275  */
276 static int
277 zvol_update_volsize(uint64_t volsize, objset_t *os)
278 {
279 	dmu_tx_t *tx;
280 	int error;
281 	uint64_t txg;
282 
283 	tx = dmu_tx_create(os);
284 	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
285 	dmu_tx_mark_netfree(tx);
286 	error = dmu_tx_assign(tx, TXG_WAIT);
287 	if (error) {
288 		dmu_tx_abort(tx);
289 		return (SET_ERROR(error));
290 	}
291 	txg = dmu_tx_get_txg(tx);
292 
293 	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
294 	    &volsize, tx);
295 	dmu_tx_commit(tx);
296 
297 	txg_wait_synced(dmu_objset_pool(os), txg);
298 
299 	if (error == 0)
300 		error = dmu_free_long_range(os,
301 		    ZVOL_OBJ, volsize, DMU_OBJECT_END);
302 
303 	return (error);
304 }
305 
306 /*
307  * Set ZFS_PROP_VOLSIZE set entry point.  Note that modifying the volume
308  * size will result in a udev "change" event being generated.
309  */
310 int
311 zvol_set_volsize(const char *name, uint64_t volsize)
312 {
313 	objset_t *os = NULL;
314 	uint64_t readonly;
315 	int error;
316 	boolean_t owned = B_FALSE;
317 
318 	error = dsl_prop_get_integer(name,
319 	    zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
320 	if (error != 0)
321 		return (SET_ERROR(error));
322 	if (readonly)
323 		return (SET_ERROR(EROFS));
324 
325 	zvol_state_t *zv = zvol_find_by_name(name, RW_READER);
326 
327 	ASSERT(zv == NULL || (MUTEX_HELD(&zv->zv_state_lock) &&
328 	    RW_READ_HELD(&zv->zv_suspend_lock)));
329 
330 	if (zv == NULL || zv->zv_objset == NULL) {
331 		if (zv != NULL)
332 			rw_exit(&zv->zv_suspend_lock);
333 		if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
334 		    FTAG, &os)) != 0) {
335 			if (zv != NULL)
336 				mutex_exit(&zv->zv_state_lock);
337 			return (SET_ERROR(error));
338 		}
339 		owned = B_TRUE;
340 		if (zv != NULL)
341 			zv->zv_objset = os;
342 	} else {
343 		os = zv->zv_objset;
344 	}
345 
346 	dmu_object_info_t *doi = kmem_alloc(sizeof (*doi), KM_SLEEP);
347 
348 	if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
349 	    (error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
350 		goto out;
351 
352 	error = zvol_update_volsize(volsize, os);
353 	if (error == 0 && zv != NULL) {
354 		zv->zv_volsize = volsize;
355 		zv->zv_changed = 1;
356 	}
357 out:
358 	kmem_free(doi, sizeof (dmu_object_info_t));
359 
360 	if (owned) {
361 		dmu_objset_disown(os, B_TRUE, FTAG);
362 		if (zv != NULL)
363 			zv->zv_objset = NULL;
364 	} else {
365 		rw_exit(&zv->zv_suspend_lock);
366 	}
367 
368 	if (zv != NULL)
369 		mutex_exit(&zv->zv_state_lock);
370 
371 	if (error == 0 && zv != NULL)
372 		ops->zv_update_volsize(zv, volsize);
373 
374 	return (SET_ERROR(error));
375 }
376 
377 /*
378  * Sanity check volume block size.
379  */
380 int
381 zvol_check_volblocksize(const char *name, uint64_t volblocksize)
382 {
383 	/* Record sizes above 128k need the feature to be enabled */
384 	if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
385 		spa_t *spa;
386 		int error;
387 
388 		if ((error = spa_open(name, &spa, FTAG)) != 0)
389 			return (error);
390 
391 		if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
392 			spa_close(spa, FTAG);
393 			return (SET_ERROR(ENOTSUP));
394 		}
395 
396 		/*
397 		 * We don't allow setting the property above 1MB,
398 		 * unless the tunable has been changed.
399 		 */
400 		if (volblocksize > zfs_max_recordsize)
401 			return (SET_ERROR(EDOM));
402 
403 		spa_close(spa, FTAG);
404 	}
405 
406 	if (volblocksize < SPA_MINBLOCKSIZE ||
407 	    volblocksize > SPA_MAXBLOCKSIZE ||
408 	    !ISP2(volblocksize))
409 		return (SET_ERROR(EDOM));
410 
411 	return (0);
412 }
413 
414 /*
415  * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
416  */
417 int
418 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
419 {
420 	zvol_state_t *zv;
421 	dmu_tx_t *tx;
422 	int error;
423 
424 	zv = zvol_find_by_name(name, RW_READER);
425 
426 	if (zv == NULL)
427 		return (SET_ERROR(ENXIO));
428 
429 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
430 	ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
431 
432 	if (zv->zv_flags & ZVOL_RDONLY) {
433 		mutex_exit(&zv->zv_state_lock);
434 		rw_exit(&zv->zv_suspend_lock);
435 		return (SET_ERROR(EROFS));
436 	}
437 
438 	tx = dmu_tx_create(zv->zv_objset);
439 	dmu_tx_hold_bonus(tx, ZVOL_OBJ);
440 	error = dmu_tx_assign(tx, TXG_WAIT);
441 	if (error) {
442 		dmu_tx_abort(tx);
443 	} else {
444 		error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
445 		    volblocksize, 0, tx);
446 		if (error == ENOTSUP)
447 			error = SET_ERROR(EBUSY);
448 		dmu_tx_commit(tx);
449 		if (error == 0)
450 			zv->zv_volblocksize = volblocksize;
451 	}
452 
453 	mutex_exit(&zv->zv_state_lock);
454 	rw_exit(&zv->zv_suspend_lock);
455 
456 	return (SET_ERROR(error));
457 }
458 
459 /*
460  * Replay a TX_TRUNCATE ZIL transaction if asked.  TX_TRUNCATE is how we
461  * implement DKIOCFREE/free-long-range.
462  */
463 static int
464 zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
465 {
466 	zvol_state_t *zv = arg1;
467 	lr_truncate_t *lr = arg2;
468 	uint64_t offset, length;
469 
470 	if (byteswap)
471 		byteswap_uint64_array(lr, sizeof (*lr));
472 
473 	offset = lr->lr_offset;
474 	length = lr->lr_length;
475 
476 	dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
477 	dmu_tx_mark_netfree(tx);
478 	int error = dmu_tx_assign(tx, TXG_WAIT);
479 	if (error != 0) {
480 		dmu_tx_abort(tx);
481 	} else {
482 		zil_replaying(zv->zv_zilog, tx);
483 		dmu_tx_commit(tx);
484 		error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset,
485 		    length);
486 	}
487 
488 	return (error);
489 }
490 
491 /*
492  * Replay a TX_WRITE ZIL transaction that didn't get committed
493  * after a system failure
494  */
495 static int
496 zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
497 {
498 	zvol_state_t *zv = arg1;
499 	lr_write_t *lr = arg2;
500 	objset_t *os = zv->zv_objset;
501 	char *data = (char *)(lr + 1);  /* data follows lr_write_t */
502 	uint64_t offset, length;
503 	dmu_tx_t *tx;
504 	int error;
505 
506 	if (byteswap)
507 		byteswap_uint64_array(lr, sizeof (*lr));
508 
509 	offset = lr->lr_offset;
510 	length = lr->lr_length;
511 
512 	/* If it's a dmu_sync() block, write the whole block */
513 	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
514 		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
515 		if (length < blocksize) {
516 			offset -= offset % blocksize;
517 			length = blocksize;
518 		}
519 	}
520 
521 	tx = dmu_tx_create(os);
522 	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
523 	error = dmu_tx_assign(tx, TXG_WAIT);
524 	if (error) {
525 		dmu_tx_abort(tx);
526 	} else {
527 		dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
528 		zil_replaying(zv->zv_zilog, tx);
529 		dmu_tx_commit(tx);
530 	}
531 
532 	return (error);
533 }
534 
535 static int
536 zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
537 {
538 	return (SET_ERROR(ENOTSUP));
539 }
540 
541 /*
542  * Callback vectors for replaying records.
543  * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
544  */
545 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
546 	zvol_replay_err,	/* no such transaction type */
547 	zvol_replay_err,	/* TX_CREATE */
548 	zvol_replay_err,	/* TX_MKDIR */
549 	zvol_replay_err,	/* TX_MKXATTR */
550 	zvol_replay_err,	/* TX_SYMLINK */
551 	zvol_replay_err,	/* TX_REMOVE */
552 	zvol_replay_err,	/* TX_RMDIR */
553 	zvol_replay_err,	/* TX_LINK */
554 	zvol_replay_err,	/* TX_RENAME */
555 	zvol_replay_write,	/* TX_WRITE */
556 	zvol_replay_truncate,	/* TX_TRUNCATE */
557 	zvol_replay_err,	/* TX_SETATTR */
558 	zvol_replay_err,	/* TX_ACL */
559 	zvol_replay_err,	/* TX_CREATE_ATTR */
560 	zvol_replay_err,	/* TX_CREATE_ACL_ATTR */
561 	zvol_replay_err,	/* TX_MKDIR_ACL */
562 	zvol_replay_err,	/* TX_MKDIR_ATTR */
563 	zvol_replay_err,	/* TX_MKDIR_ACL_ATTR */
564 	zvol_replay_err,	/* TX_WRITE2 */
565 };
566 
567 /*
568  * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
569  *
570  * We store data in the log buffers if it's small enough.
571  * Otherwise we will later flush the data out via dmu_sync().
572  */
573 ssize_t zvol_immediate_write_sz = 32768;
574 
575 void
576 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
577     uint64_t size, int sync)
578 {
579 	uint32_t blocksize = zv->zv_volblocksize;
580 	zilog_t *zilog = zv->zv_zilog;
581 	itx_wr_state_t write_state;
582 
583 	if (zil_replaying(zilog, tx))
584 		return;
585 
586 	if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
587 		write_state = WR_INDIRECT;
588 	else if (!spa_has_slogs(zilog->zl_spa) &&
589 	    size >= blocksize && blocksize > zvol_immediate_write_sz)
590 		write_state = WR_INDIRECT;
591 	else if (sync)
592 		write_state = WR_COPIED;
593 	else
594 		write_state = WR_NEED_COPY;
595 
596 	while (size) {
597 		itx_t *itx;
598 		lr_write_t *lr;
599 		itx_wr_state_t wr_state = write_state;
600 		ssize_t len = size;
601 
602 		if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
603 			wr_state = WR_NEED_COPY;
604 		else if (wr_state == WR_INDIRECT)
605 			len = MIN(blocksize - P2PHASE(offset, blocksize), size);
606 
607 		itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
608 		    (wr_state == WR_COPIED ? len : 0));
609 		lr = (lr_write_t *)&itx->itx_lr;
610 		if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
611 		    offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
612 			zil_itx_destroy(itx);
613 			itx = zil_itx_create(TX_WRITE, sizeof (*lr));
614 			lr = (lr_write_t *)&itx->itx_lr;
615 			wr_state = WR_NEED_COPY;
616 		}
617 
618 		itx->itx_wr_state = wr_state;
619 		lr->lr_foid = ZVOL_OBJ;
620 		lr->lr_offset = offset;
621 		lr->lr_length = len;
622 		lr->lr_blkoff = 0;
623 		BP_ZERO(&lr->lr_blkptr);
624 
625 		itx->itx_private = zv;
626 		itx->itx_sync = sync;
627 
628 		(void) zil_itx_assign(zilog, itx, tx);
629 
630 		offset += len;
631 		size -= len;
632 	}
633 }
634 
635 /*
636  * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
637  */
638 void
639 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
640     boolean_t sync)
641 {
642 	itx_t *itx;
643 	lr_truncate_t *lr;
644 	zilog_t *zilog = zv->zv_zilog;
645 
646 	if (zil_replaying(zilog, tx))
647 		return;
648 
649 	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
650 	lr = (lr_truncate_t *)&itx->itx_lr;
651 	lr->lr_foid = ZVOL_OBJ;
652 	lr->lr_offset = off;
653 	lr->lr_length = len;
654 
655 	itx->itx_sync = sync;
656 	zil_itx_assign(zilog, itx, tx);
657 }
658 
659 
660 /* ARGSUSED */
661 static void
662 zvol_get_done(zgd_t *zgd, int error)
663 {
664 	if (zgd->zgd_db)
665 		dmu_buf_rele(zgd->zgd_db, zgd);
666 
667 	zfs_rangelock_exit(zgd->zgd_lr);
668 
669 	kmem_free(zgd, sizeof (zgd_t));
670 }
671 
672 /*
673  * Get data to generate a TX_WRITE intent log record.
674  */
675 int
676 zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
677     struct lwb *lwb, zio_t *zio)
678 {
679 	zvol_state_t *zv = arg;
680 	uint64_t offset = lr->lr_offset;
681 	uint64_t size = lr->lr_length;
682 	dmu_buf_t *db;
683 	zgd_t *zgd;
684 	int error;
685 
686 	ASSERT3P(lwb, !=, NULL);
687 	ASSERT3P(zio, !=, NULL);
688 	ASSERT3U(size, !=, 0);
689 
690 	zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
691 	zgd->zgd_lwb = lwb;
692 
693 	/*
694 	 * Write records come in two flavors: immediate and indirect.
695 	 * For small writes it's cheaper to store the data with the
696 	 * log record (immediate); for large writes it's cheaper to
697 	 * sync the data and get a pointer to it (indirect) so that
698 	 * we don't have to write the data twice.
699 	 */
700 	if (buf != NULL) { /* immediate write */
701 		zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
702 		    size, RL_READER);
703 		error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
704 		    DMU_READ_NO_PREFETCH);
705 	} else { /* indirect write */
706 		/*
707 		 * Have to lock the whole block to ensure when it's written out
708 		 * and its checksum is being calculated that no one can change
709 		 * the data. Contrarily to zfs_get_data we need not re-check
710 		 * blocksize after we get the lock because it cannot be changed.
711 		 */
712 		size = zv->zv_volblocksize;
713 		offset = P2ALIGN_TYPED(offset, size, uint64_t);
714 		zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
715 		    size, RL_READER);
716 		error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
717 		    DMU_READ_NO_PREFETCH);
718 		if (error == 0) {
719 			blkptr_t *bp = &lr->lr_blkptr;
720 
721 			zgd->zgd_db = db;
722 			zgd->zgd_bp = bp;
723 
724 			ASSERT(db != NULL);
725 			ASSERT(db->db_offset == offset);
726 			ASSERT(db->db_size == size);
727 
728 			error = dmu_sync(zio, lr->lr_common.lrc_txg,
729 			    zvol_get_done, zgd);
730 
731 			if (error == 0)
732 				return (0);
733 		}
734 	}
735 
736 	zvol_get_done(zgd, error);
737 
738 	return (SET_ERROR(error));
739 }
740 
741 /*
742  * The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
743  */
744 
745 void
746 zvol_insert(zvol_state_t *zv)
747 {
748 	ASSERT(RW_WRITE_HELD(&zvol_state_lock));
749 	list_insert_head(&zvol_state_list, zv);
750 	hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
751 }
752 
753 /*
754  * Simply remove the zvol from to list of zvols.
755  */
756 static void
757 zvol_remove(zvol_state_t *zv)
758 {
759 	ASSERT(RW_WRITE_HELD(&zvol_state_lock));
760 	list_remove(&zvol_state_list, zv);
761 	hlist_del(&zv->zv_hlink);
762 }
763 
764 /*
765  * Setup zv after we just own the zv->objset
766  */
767 static int
768 zvol_setup_zv(zvol_state_t *zv)
769 {
770 	uint64_t volsize;
771 	int error;
772 	uint64_t ro;
773 	objset_t *os = zv->zv_objset;
774 
775 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
776 	ASSERT(RW_LOCK_HELD(&zv->zv_suspend_lock));
777 
778 	zv->zv_zilog = NULL;
779 	zv->zv_flags &= ~ZVOL_WRITTEN_TO;
780 
781 	error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
782 	if (error)
783 		return (SET_ERROR(error));
784 
785 	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
786 	if (error)
787 		return (SET_ERROR(error));
788 
789 	error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
790 	if (error)
791 		return (SET_ERROR(error));
792 
793 	ops->zv_set_capacity(zv, volsize >> 9);
794 	zv->zv_volsize = volsize;
795 
796 	if (ro || dmu_objset_is_snapshot(os) ||
797 	    !spa_writeable(dmu_objset_spa(os))) {
798 		ops->zv_set_disk_ro(zv, 1);
799 		zv->zv_flags |= ZVOL_RDONLY;
800 	} else {
801 		ops->zv_set_disk_ro(zv, 0);
802 		zv->zv_flags &= ~ZVOL_RDONLY;
803 	}
804 	return (0);
805 }
806 
807 /*
808  * Shutdown every zv_objset related stuff except zv_objset itself.
809  * The is the reverse of zvol_setup_zv.
810  */
811 static void
812 zvol_shutdown_zv(zvol_state_t *zv)
813 {
814 	ASSERT(MUTEX_HELD(&zv->zv_state_lock) &&
815 	    RW_LOCK_HELD(&zv->zv_suspend_lock));
816 
817 	if (zv->zv_flags & ZVOL_WRITTEN_TO) {
818 		ASSERT(zv->zv_zilog != NULL);
819 		zil_close(zv->zv_zilog);
820 	}
821 
822 	zv->zv_zilog = NULL;
823 
824 	dnode_rele(zv->zv_dn, zv);
825 	zv->zv_dn = NULL;
826 
827 	/*
828 	 * Evict cached data. We must write out any dirty data before
829 	 * disowning the dataset.
830 	 */
831 	if (zv->zv_flags & ZVOL_WRITTEN_TO)
832 		txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
833 	(void) dmu_objset_evict_dbufs(zv->zv_objset);
834 }
835 
836 /*
837  * return the proper tag for rollback and recv
838  */
839 void *
840 zvol_tag(zvol_state_t *zv)
841 {
842 	ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
843 	return (zv->zv_open_count > 0 ? zv : NULL);
844 }
845 
846 /*
847  * Suspend the zvol for recv and rollback.
848  */
849 zvol_state_t *
850 zvol_suspend(const char *name)
851 {
852 	zvol_state_t *zv;
853 
854 	zv = zvol_find_by_name(name, RW_WRITER);
855 
856 	if (zv == NULL)
857 		return (NULL);
858 
859 	/* block all I/O, release in zvol_resume. */
860 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
861 	ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
862 
863 	atomic_inc(&zv->zv_suspend_ref);
864 
865 	if (zv->zv_open_count > 0)
866 		zvol_shutdown_zv(zv);
867 
868 	/*
869 	 * do not hold zv_state_lock across suspend/resume to
870 	 * avoid locking up zvol lookups
871 	 */
872 	mutex_exit(&zv->zv_state_lock);
873 
874 	/* zv_suspend_lock is released in zvol_resume() */
875 	return (zv);
876 }
877 
878 int
879 zvol_resume(zvol_state_t *zv)
880 {
881 	int error = 0;
882 
883 	ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
884 
885 	mutex_enter(&zv->zv_state_lock);
886 
887 	if (zv->zv_open_count > 0) {
888 		VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
889 		VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
890 		VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
891 		dmu_objset_rele(zv->zv_objset, zv);
892 
893 		error = zvol_setup_zv(zv);
894 	}
895 
896 	mutex_exit(&zv->zv_state_lock);
897 
898 	rw_exit(&zv->zv_suspend_lock);
899 	/*
900 	 * We need this because we don't hold zvol_state_lock while releasing
901 	 * zv_suspend_lock. zvol_remove_minors_impl thus cannot check
902 	 * zv_suspend_lock to determine it is safe to free because rwlock is
903 	 * not inherent atomic.
904 	 */
905 	atomic_dec(&zv->zv_suspend_ref);
906 
907 	return (SET_ERROR(error));
908 }
909 
910 int
911 zvol_first_open(zvol_state_t *zv, boolean_t readonly)
912 {
913 	objset_t *os;
914 	int error, locked = 0;
915 	boolean_t ro;
916 
917 	ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
918 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
919 
920 	/*
921 	 * In all other cases the spa_namespace_lock is taken before the
922 	 * bdev->bd_mutex lock.	 But in this case the Linux __blkdev_get()
923 	 * function calls fops->open() with the bdev->bd_mutex lock held.
924 	 * This deadlock can be easily observed with zvols used as vdevs.
925 	 *
926 	 * To avoid a potential lock inversion deadlock we preemptively
927 	 * try to take the spa_namespace_lock().  Normally it will not
928 	 * be contended and this is safe because spa_open_common() handles
929 	 * the case where the caller already holds the spa_namespace_lock.
930 	 *
931 	 * When it is contended we risk a lock inversion if we were to
932 	 * block waiting for the lock.	Luckily, the __blkdev_get()
933 	 * function allows us to return -ERESTARTSYS which will result in
934 	 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
935 	 * called again.  This process can be repeated safely until both
936 	 * locks are acquired.
937 	 */
938 	if (!mutex_owned(&spa_namespace_lock)) {
939 		locked = mutex_tryenter(&spa_namespace_lock);
940 		if (!locked)
941 			return (SET_ERROR(EINTR));
942 	}
943 
944 	ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
945 	error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
946 	if (error)
947 		goto out_mutex;
948 
949 	zv->zv_objset = os;
950 
951 	error = zvol_setup_zv(zv);
952 
953 	if (error) {
954 		dmu_objset_disown(os, 1, zv);
955 		zv->zv_objset = NULL;
956 	}
957 
958 out_mutex:
959 	if (locked)
960 		mutex_exit(&spa_namespace_lock);
961 	return (SET_ERROR(error));
962 }
963 
964 void
965 zvol_last_close(zvol_state_t *zv)
966 {
967 	ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
968 	ASSERT(MUTEX_HELD(&zv->zv_state_lock));
969 
970 	zvol_shutdown_zv(zv);
971 
972 	dmu_objset_disown(zv->zv_objset, 1, zv);
973 	zv->zv_objset = NULL;
974 }
975 
976 typedef struct minors_job {
977 	list_t *list;
978 	list_node_t link;
979 	/* input */
980 	char *name;
981 	/* output */
982 	int error;
983 } minors_job_t;
984 
985 /*
986  * Prefetch zvol dnodes for the minors_job
987  */
988 static void
989 zvol_prefetch_minors_impl(void *arg)
990 {
991 	minors_job_t *job = arg;
992 	char *dsname = job->name;
993 	objset_t *os = NULL;
994 
995 	job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, B_TRUE,
996 	    FTAG, &os);
997 	if (job->error == 0) {
998 		dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
999 		dmu_objset_disown(os, B_TRUE, FTAG);
1000 	}
1001 }
1002 
1003 /*
1004  * Mask errors to continue dmu_objset_find() traversal
1005  */
1006 static int
1007 zvol_create_snap_minor_cb(const char *dsname, void *arg)
1008 {
1009 	minors_job_t *j = arg;
1010 	list_t *minors_list = j->list;
1011 	const char *name = j->name;
1012 
1013 	ASSERT0(MUTEX_HELD(&spa_namespace_lock));
1014 
1015 	/* skip the designated dataset */
1016 	if (name && strcmp(dsname, name) == 0)
1017 		return (0);
1018 
1019 	/* at this point, the dsname should name a snapshot */
1020 	if (strchr(dsname, '@') == 0) {
1021 		dprintf("zvol_create_snap_minor_cb(): "
1022 		    "%s is not a snapshot name\n", dsname);
1023 	} else {
1024 		minors_job_t *job;
1025 		char *n = kmem_strdup(dsname);
1026 		if (n == NULL)
1027 			return (0);
1028 
1029 		job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
1030 		job->name = n;
1031 		job->list = minors_list;
1032 		job->error = 0;
1033 		list_insert_tail(minors_list, job);
1034 		/* don't care if dispatch fails, because job->error is 0 */
1035 		taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
1036 		    TQ_SLEEP);
1037 	}
1038 
1039 	return (0);
1040 }
1041 
1042 /*
1043  * Mask errors to continue dmu_objset_find() traversal
1044  */
1045 static int
1046 zvol_create_minors_cb(const char *dsname, void *arg)
1047 {
1048 	uint64_t snapdev;
1049 	int error;
1050 	list_t *minors_list = arg;
1051 
1052 	ASSERT0(MUTEX_HELD(&spa_namespace_lock));
1053 
1054 	error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
1055 	if (error)
1056 		return (0);
1057 
1058 	/*
1059 	 * Given the name and the 'snapdev' property, create device minor nodes
1060 	 * with the linkages to zvols/snapshots as needed.
1061 	 * If the name represents a zvol, create a minor node for the zvol, then
1062 	 * check if its snapshots are 'visible', and if so, iterate over the
1063 	 * snapshots and create device minor nodes for those.
1064 	 */
1065 	if (strchr(dsname, '@') == 0) {
1066 		minors_job_t *job;
1067 		char *n = kmem_strdup(dsname);
1068 		if (n == NULL)
1069 			return (0);
1070 
1071 		job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
1072 		job->name = n;
1073 		job->list = minors_list;
1074 		job->error = 0;
1075 		list_insert_tail(minors_list, job);
1076 		/* don't care if dispatch fails, because job->error is 0 */
1077 		taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
1078 		    TQ_SLEEP);
1079 
1080 		if (snapdev == ZFS_SNAPDEV_VISIBLE) {
1081 			/*
1082 			 * traverse snapshots only, do not traverse children,
1083 			 * and skip the 'dsname'
1084 			 */
1085 			error = dmu_objset_find(dsname,
1086 			    zvol_create_snap_minor_cb, (void *)job,
1087 			    DS_FIND_SNAPSHOTS);
1088 		}
1089 	} else {
1090 		dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
1091 		    dsname);
1092 	}
1093 
1094 	return (0);
1095 }
1096 
1097 /*
1098  * Create minors for the specified dataset, including children and snapshots.
1099  * Pay attention to the 'snapdev' property and iterate over the snapshots
1100  * only if they are 'visible'. This approach allows one to assure that the
1101  * snapshot metadata is read from disk only if it is needed.
1102  *
1103  * The name can represent a dataset to be recursively scanned for zvols and
1104  * their snapshots, or a single zvol snapshot. If the name represents a
1105  * dataset, the scan is performed in two nested stages:
1106  * - scan the dataset for zvols, and
1107  * - for each zvol, create a minor node, then check if the zvol's snapshots
1108  *   are 'visible', and only then iterate over the snapshots if needed
1109  *
1110  * If the name represents a snapshot, a check is performed if the snapshot is
1111  * 'visible' (which also verifies that the parent is a zvol), and if so,
1112  * a minor node for that snapshot is created.
1113  */
1114 void
1115 zvol_create_minors_recursive(const char *name)
1116 {
1117 	list_t minors_list;
1118 	minors_job_t *job;
1119 
1120 	if (zvol_inhibit_dev)
1121 		return;
1122 
1123 	/*
1124 	 * This is the list for prefetch jobs. Whenever we found a match
1125 	 * during dmu_objset_find, we insert a minors_job to the list and do
1126 	 * taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
1127 	 * any lock because all list operation is done on the current thread.
1128 	 *
1129 	 * We will use this list to do zvol_create_minor_impl after prefetch
1130 	 * so we don't have to traverse using dmu_objset_find again.
1131 	 */
1132 	list_create(&minors_list, sizeof (minors_job_t),
1133 	    offsetof(minors_job_t, link));
1134 
1135 
1136 	if (strchr(name, '@') != NULL) {
1137 		uint64_t snapdev;
1138 
1139 		int error = dsl_prop_get_integer(name, "snapdev",
1140 		    &snapdev, NULL);
1141 
1142 		if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
1143 			(void) ops->zv_create_minor(name);
1144 	} else {
1145 		fstrans_cookie_t cookie = spl_fstrans_mark();
1146 		(void) dmu_objset_find(name, zvol_create_minors_cb,
1147 		    &minors_list, DS_FIND_CHILDREN);
1148 		spl_fstrans_unmark(cookie);
1149 	}
1150 
1151 	taskq_wait_outstanding(system_taskq, 0);
1152 
1153 	/*
1154 	 * Prefetch is completed, we can do zvol_create_minor_impl
1155 	 * sequentially.
1156 	 */
1157 	while ((job = list_head(&minors_list)) != NULL) {
1158 		list_remove(&minors_list, job);
1159 		if (!job->error)
1160 			(void) ops->zv_create_minor(job->name);
1161 		kmem_strfree(job->name);
1162 		kmem_free(job, sizeof (minors_job_t));
1163 	}
1164 
1165 	list_destroy(&minors_list);
1166 }
1167 
1168 void
1169 zvol_create_minor(const char *name)
1170 {
1171 	/*
1172 	 * Note: the dsl_pool_config_lock must not be held.
1173 	 * Minor node creation needs to obtain the zvol_state_lock.
1174 	 * zvol_open() obtains the zvol_state_lock and then the dsl pool
1175 	 * config lock.  Therefore, we can't have the config lock now if
1176 	 * we are going to wait for the zvol_state_lock, because it
1177 	 * would be a lock order inversion which could lead to deadlock.
1178 	 */
1179 
1180 	if (zvol_inhibit_dev)
1181 		return;
1182 
1183 	if (strchr(name, '@') != NULL) {
1184 		uint64_t snapdev;
1185 
1186 		int error = dsl_prop_get_integer(name,
1187 		    "snapdev", &snapdev, NULL);
1188 
1189 		if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
1190 			(void) ops->zv_create_minor(name);
1191 	} else {
1192 		(void) ops->zv_create_minor(name);
1193 	}
1194 }
1195 
1196 /*
1197  * Remove minors for specified dataset including children and snapshots.
1198  */
1199 
1200 void
1201 zvol_remove_minors_impl(const char *name)
1202 {
1203 	zvol_state_t *zv, *zv_next;
1204 	int namelen = ((name) ? strlen(name) : 0);
1205 	taskqid_t t;
1206 	list_t free_list;
1207 
1208 	if (zvol_inhibit_dev)
1209 		return;
1210 
1211 	list_create(&free_list, sizeof (zvol_state_t),
1212 	    offsetof(zvol_state_t, zv_next));
1213 
1214 	rw_enter(&zvol_state_lock, RW_WRITER);
1215 
1216 	for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1217 		zv_next = list_next(&zvol_state_list, zv);
1218 
1219 		mutex_enter(&zv->zv_state_lock);
1220 		if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1221 		    (strncmp(zv->zv_name, name, namelen) == 0 &&
1222 		    (zv->zv_name[namelen] == '/' ||
1223 		    zv->zv_name[namelen] == '@'))) {
1224 			/*
1225 			 * By holding zv_state_lock here, we guarantee that no
1226 			 * one is currently using this zv
1227 			 */
1228 
1229 			/* If in use, leave alone */
1230 			if (zv->zv_open_count > 0 ||
1231 			    atomic_read(&zv->zv_suspend_ref)) {
1232 				mutex_exit(&zv->zv_state_lock);
1233 				continue;
1234 			}
1235 
1236 			zvol_remove(zv);
1237 
1238 			/*
1239 			 * Cleared while holding zvol_state_lock as a writer
1240 			 * which will prevent zvol_open() from opening it.
1241 			 */
1242 			ops->zv_clear_private(zv);
1243 
1244 			/* Drop zv_state_lock before zvol_free() */
1245 			mutex_exit(&zv->zv_state_lock);
1246 
1247 			/* Try parallel zv_free, if failed do it in place */
1248 			t = taskq_dispatch(system_taskq,
1249 			    (task_func_t *)ops->zv_free, zv, TQ_SLEEP);
1250 			if (t == TASKQID_INVALID)
1251 				list_insert_head(&free_list, zv);
1252 		} else {
1253 			mutex_exit(&zv->zv_state_lock);
1254 		}
1255 	}
1256 	rw_exit(&zvol_state_lock);
1257 
1258 	/* Drop zvol_state_lock before calling zvol_free() */
1259 	while ((zv = list_head(&free_list)) != NULL) {
1260 		list_remove(&free_list, zv);
1261 		ops->zv_free(zv);
1262 	}
1263 }
1264 
1265 /* Remove minor for this specific volume only */
1266 static void
1267 zvol_remove_minor_impl(const char *name)
1268 {
1269 	zvol_state_t *zv = NULL, *zv_next;
1270 
1271 	if (zvol_inhibit_dev)
1272 		return;
1273 
1274 	rw_enter(&zvol_state_lock, RW_WRITER);
1275 
1276 	for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1277 		zv_next = list_next(&zvol_state_list, zv);
1278 
1279 		mutex_enter(&zv->zv_state_lock);
1280 		if (strcmp(zv->zv_name, name) == 0) {
1281 			/*
1282 			 * By holding zv_state_lock here, we guarantee that no
1283 			 * one is currently using this zv
1284 			 */
1285 
1286 			/* If in use, leave alone */
1287 			if (zv->zv_open_count > 0 ||
1288 			    atomic_read(&zv->zv_suspend_ref)) {
1289 				mutex_exit(&zv->zv_state_lock);
1290 				continue;
1291 			}
1292 			zvol_remove(zv);
1293 
1294 			ops->zv_clear_private(zv);
1295 			mutex_exit(&zv->zv_state_lock);
1296 			break;
1297 		} else {
1298 			mutex_exit(&zv->zv_state_lock);
1299 		}
1300 	}
1301 
1302 	/* Drop zvol_state_lock before calling zvol_free() */
1303 	rw_exit(&zvol_state_lock);
1304 
1305 	if (zv != NULL)
1306 		ops->zv_free(zv);
1307 }
1308 
1309 /*
1310  * Rename minors for specified dataset including children and snapshots.
1311  */
1312 static void
1313 zvol_rename_minors_impl(const char *oldname, const char *newname)
1314 {
1315 	zvol_state_t *zv, *zv_next;
1316 	int oldnamelen, newnamelen;
1317 
1318 	if (zvol_inhibit_dev)
1319 		return;
1320 
1321 	oldnamelen = strlen(oldname);
1322 	newnamelen = strlen(newname);
1323 
1324 	rw_enter(&zvol_state_lock, RW_READER);
1325 
1326 	for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1327 		zv_next = list_next(&zvol_state_list, zv);
1328 
1329 		mutex_enter(&zv->zv_state_lock);
1330 
1331 		if (strcmp(zv->zv_name, oldname) == 0) {
1332 			ops->zv_rename_minor(zv, newname);
1333 		} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1334 		    (zv->zv_name[oldnamelen] == '/' ||
1335 		    zv->zv_name[oldnamelen] == '@')) {
1336 			char *name = kmem_asprintf("%s%c%s", newname,
1337 			    zv->zv_name[oldnamelen],
1338 			    zv->zv_name + oldnamelen + 1);
1339 			ops->zv_rename_minor(zv, name);
1340 			kmem_strfree(name);
1341 		}
1342 
1343 		mutex_exit(&zv->zv_state_lock);
1344 	}
1345 
1346 	rw_exit(&zvol_state_lock);
1347 }
1348 
1349 typedef struct zvol_snapdev_cb_arg {
1350 	uint64_t snapdev;
1351 } zvol_snapdev_cb_arg_t;
1352 
1353 static int
1354 zvol_set_snapdev_cb(const char *dsname, void *param)
1355 {
1356 	zvol_snapdev_cb_arg_t *arg = param;
1357 
1358 	if (strchr(dsname, '@') == NULL)
1359 		return (0);
1360 
1361 	switch (arg->snapdev) {
1362 		case ZFS_SNAPDEV_VISIBLE:
1363 			(void) ops->zv_create_minor(dsname);
1364 			break;
1365 		case ZFS_SNAPDEV_HIDDEN:
1366 			(void) zvol_remove_minor_impl(dsname);
1367 			break;
1368 	}
1369 
1370 	return (0);
1371 }
1372 
1373 static void
1374 zvol_set_snapdev_impl(char *name, uint64_t snapdev)
1375 {
1376 	zvol_snapdev_cb_arg_t arg = {snapdev};
1377 	fstrans_cookie_t cookie = spl_fstrans_mark();
1378 	/*
1379 	 * The zvol_set_snapdev_sync() sets snapdev appropriately
1380 	 * in the dataset hierarchy. Here, we only scan snapshots.
1381 	 */
1382 	dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
1383 	spl_fstrans_unmark(cookie);
1384 }
1385 
1386 static void
1387 zvol_set_volmode_impl(char *name, uint64_t volmode)
1388 {
1389 	fstrans_cookie_t cookie;
1390 	uint64_t old_volmode;
1391 	zvol_state_t *zv;
1392 
1393 	if (strchr(name, '@') != NULL)
1394 		return;
1395 
1396 	/*
1397 	 * It's unfortunate we need to remove minors before we create new ones:
1398 	 * this is necessary because our backing gendisk (zvol_state->zv_disk)
1399 	 * could be different when we set, for instance, volmode from "geom"
1400 	 * to "dev" (or vice versa).
1401 	 */
1402 	zv = zvol_find_by_name(name, RW_NONE);
1403 	if (zv == NULL && volmode == ZFS_VOLMODE_NONE)
1404 			return;
1405 	if (zv != NULL) {
1406 		old_volmode = zv->zv_volmode;
1407 		mutex_exit(&zv->zv_state_lock);
1408 		if (old_volmode == volmode)
1409 			return;
1410 		zvol_wait_close(zv);
1411 	}
1412 	cookie = spl_fstrans_mark();
1413 	switch (volmode) {
1414 		case ZFS_VOLMODE_NONE:
1415 			(void) zvol_remove_minor_impl(name);
1416 			break;
1417 		case ZFS_VOLMODE_GEOM:
1418 		case ZFS_VOLMODE_DEV:
1419 			(void) zvol_remove_minor_impl(name);
1420 			(void) ops->zv_create_minor(name);
1421 			break;
1422 		case ZFS_VOLMODE_DEFAULT:
1423 			(void) zvol_remove_minor_impl(name);
1424 			if (zvol_volmode == ZFS_VOLMODE_NONE)
1425 				break;
1426 			else /* if zvol_volmode is invalid defaults to "geom" */
1427 				(void) ops->zv_create_minor(name);
1428 			break;
1429 	}
1430 	spl_fstrans_unmark(cookie);
1431 }
1432 
1433 static zvol_task_t *
1434 zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
1435     uint64_t value)
1436 {
1437 	zvol_task_t *task;
1438 	char *delim;
1439 
1440 	/* Never allow tasks on hidden names. */
1441 	if (name1[0] == '$')
1442 		return (NULL);
1443 
1444 	task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
1445 	task->op = op;
1446 	task->value = value;
1447 	delim = strchr(name1, '/');
1448 	strlcpy(task->pool, name1, delim ? (delim - name1 + 1) : MAXNAMELEN);
1449 
1450 	strlcpy(task->name1, name1, MAXNAMELEN);
1451 	if (name2 != NULL)
1452 		strlcpy(task->name2, name2, MAXNAMELEN);
1453 
1454 	return (task);
1455 }
1456 
1457 static void
1458 zvol_task_free(zvol_task_t *task)
1459 {
1460 	kmem_free(task, sizeof (zvol_task_t));
1461 }
1462 
1463 /*
1464  * The worker thread function performed asynchronously.
1465  */
1466 static void
1467 zvol_task_cb(void *arg)
1468 {
1469 	zvol_task_t *task = arg;
1470 
1471 	switch (task->op) {
1472 	case ZVOL_ASYNC_REMOVE_MINORS:
1473 		zvol_remove_minors_impl(task->name1);
1474 		break;
1475 	case ZVOL_ASYNC_RENAME_MINORS:
1476 		zvol_rename_minors_impl(task->name1, task->name2);
1477 		break;
1478 	case ZVOL_ASYNC_SET_SNAPDEV:
1479 		zvol_set_snapdev_impl(task->name1, task->value);
1480 		break;
1481 	case ZVOL_ASYNC_SET_VOLMODE:
1482 		zvol_set_volmode_impl(task->name1, task->value);
1483 		break;
1484 	default:
1485 		VERIFY(0);
1486 		break;
1487 	}
1488 
1489 	zvol_task_free(task);
1490 }
1491 
1492 typedef struct zvol_set_prop_int_arg {
1493 	const char *zsda_name;
1494 	uint64_t zsda_value;
1495 	zprop_source_t zsda_source;
1496 	dmu_tx_t *zsda_tx;
1497 } zvol_set_prop_int_arg_t;
1498 
1499 /*
1500  * Sanity check the dataset for safe use by the sync task.  No additional
1501  * conditions are imposed.
1502  */
1503 static int
1504 zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
1505 {
1506 	zvol_set_prop_int_arg_t *zsda = arg;
1507 	dsl_pool_t *dp = dmu_tx_pool(tx);
1508 	dsl_dir_t *dd;
1509 	int error;
1510 
1511 	error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
1512 	if (error != 0)
1513 		return (error);
1514 
1515 	dsl_dir_rele(dd, FTAG);
1516 
1517 	return (error);
1518 }
1519 
1520 /* ARGSUSED */
1521 static int
1522 zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1523 {
1524 	char dsname[MAXNAMELEN];
1525 	zvol_task_t *task;
1526 	uint64_t snapdev;
1527 
1528 	dsl_dataset_name(ds, dsname);
1529 	if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
1530 		return (0);
1531 	task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
1532 	if (task == NULL)
1533 		return (0);
1534 
1535 	(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
1536 	    task, TQ_SLEEP);
1537 	return (0);
1538 }
1539 
1540 /*
1541  * Traverse all child datasets and apply snapdev appropriately.
1542  * We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
1543  * dataset and read the effective "snapdev" on every child in the callback
1544  * function: this is because the value is not guaranteed to be the same in the
1545  * whole dataset hierarchy.
1546  */
1547 static void
1548 zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
1549 {
1550 	zvol_set_prop_int_arg_t *zsda = arg;
1551 	dsl_pool_t *dp = dmu_tx_pool(tx);
1552 	dsl_dir_t *dd;
1553 	dsl_dataset_t *ds;
1554 	int error;
1555 
1556 	VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
1557 	zsda->zsda_tx = tx;
1558 
1559 	error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
1560 	if (error == 0) {
1561 		dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
1562 		    zsda->zsda_source, sizeof (zsda->zsda_value), 1,
1563 		    &zsda->zsda_value, zsda->zsda_tx);
1564 		dsl_dataset_rele(ds, FTAG);
1565 	}
1566 	dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
1567 	    zsda, DS_FIND_CHILDREN);
1568 
1569 	dsl_dir_rele(dd, FTAG);
1570 }
1571 
1572 int
1573 zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
1574 {
1575 	zvol_set_prop_int_arg_t zsda;
1576 
1577 	zsda.zsda_name = ddname;
1578 	zsda.zsda_source = source;
1579 	zsda.zsda_value = snapdev;
1580 
1581 	return (dsl_sync_task(ddname, zvol_set_snapdev_check,
1582 	    zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
1583 }
1584 
1585 /*
1586  * Sanity check the dataset for safe use by the sync task.  No additional
1587  * conditions are imposed.
1588  */
1589 static int
1590 zvol_set_volmode_check(void *arg, dmu_tx_t *tx)
1591 {
1592 	zvol_set_prop_int_arg_t *zsda = arg;
1593 	dsl_pool_t *dp = dmu_tx_pool(tx);
1594 	dsl_dir_t *dd;
1595 	int error;
1596 
1597 	error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
1598 	if (error != 0)
1599 		return (error);
1600 
1601 	dsl_dir_rele(dd, FTAG);
1602 
1603 	return (error);
1604 }
1605 
1606 /* ARGSUSED */
1607 static int
1608 zvol_set_volmode_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1609 {
1610 	char dsname[MAXNAMELEN];
1611 	zvol_task_t *task;
1612 	uint64_t volmode;
1613 
1614 	dsl_dataset_name(ds, dsname);
1615 	if (dsl_prop_get_int_ds(ds, "volmode", &volmode) != 0)
1616 		return (0);
1617 	task = zvol_task_alloc(ZVOL_ASYNC_SET_VOLMODE, dsname, NULL, volmode);
1618 	if (task == NULL)
1619 		return (0);
1620 
1621 	(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
1622 	    task, TQ_SLEEP);
1623 	return (0);
1624 }
1625 
1626 /*
1627  * Traverse all child datasets and apply volmode appropriately.
1628  * We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
1629  * dataset and read the effective "volmode" on every child in the callback
1630  * function: this is because the value is not guaranteed to be the same in the
1631  * whole dataset hierarchy.
1632  */
1633 static void
1634 zvol_set_volmode_sync(void *arg, dmu_tx_t *tx)
1635 {
1636 	zvol_set_prop_int_arg_t *zsda = arg;
1637 	dsl_pool_t *dp = dmu_tx_pool(tx);
1638 	dsl_dir_t *dd;
1639 	dsl_dataset_t *ds;
1640 	int error;
1641 
1642 	VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
1643 	zsda->zsda_tx = tx;
1644 
1645 	error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
1646 	if (error == 0) {
1647 		dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_VOLMODE),
1648 		    zsda->zsda_source, sizeof (zsda->zsda_value), 1,
1649 		    &zsda->zsda_value, zsda->zsda_tx);
1650 		dsl_dataset_rele(ds, FTAG);
1651 	}
1652 
1653 	dmu_objset_find_dp(dp, dd->dd_object, zvol_set_volmode_sync_cb,
1654 	    zsda, DS_FIND_CHILDREN);
1655 
1656 	dsl_dir_rele(dd, FTAG);
1657 }
1658 
1659 int
1660 zvol_set_volmode(const char *ddname, zprop_source_t source, uint64_t volmode)
1661 {
1662 	zvol_set_prop_int_arg_t zsda;
1663 
1664 	zsda.zsda_name = ddname;
1665 	zsda.zsda_source = source;
1666 	zsda.zsda_value = volmode;
1667 
1668 	return (dsl_sync_task(ddname, zvol_set_volmode_check,
1669 	    zvol_set_volmode_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
1670 }
1671 
1672 void
1673 zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
1674 {
1675 	zvol_task_t *task;
1676 	taskqid_t id;
1677 
1678 	task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
1679 	if (task == NULL)
1680 		return;
1681 
1682 	id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
1683 	if ((async == B_FALSE) && (id != TASKQID_INVALID))
1684 		taskq_wait_id(spa->spa_zvol_taskq, id);
1685 }
1686 
1687 void
1688 zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
1689     boolean_t async)
1690 {
1691 	zvol_task_t *task;
1692 	taskqid_t id;
1693 
1694 	task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
1695 	if (task == NULL)
1696 		return;
1697 
1698 	id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
1699 	if ((async == B_FALSE) && (id != TASKQID_INVALID))
1700 		taskq_wait_id(spa->spa_zvol_taskq, id);
1701 }
1702 
1703 boolean_t
1704 zvol_is_zvol(const char *name)
1705 {
1706 
1707 	return (ops->zv_is_zvol(name));
1708 }
1709 
1710 void
1711 zvol_register_ops(const zvol_platform_ops_t *zvol_ops)
1712 {
1713 	ops = zvol_ops;
1714 }
1715 
1716 int
1717 zvol_init_impl(void)
1718 {
1719 	int i;
1720 
1721 	list_create(&zvol_state_list, sizeof (zvol_state_t),
1722 	    offsetof(zvol_state_t, zv_next));
1723 	rw_init(&zvol_state_lock, NULL, RW_DEFAULT, NULL);
1724 
1725 	zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
1726 	    KM_SLEEP);
1727 	for (i = 0; i < ZVOL_HT_SIZE; i++)
1728 		INIT_HLIST_HEAD(&zvol_htable[i]);
1729 
1730 	return (0);
1731 }
1732 
1733 void
1734 zvol_fini_impl(void)
1735 {
1736 	zvol_remove_minors_impl(NULL);
1737 
1738 	/*
1739 	 * The call to "zvol_remove_minors_impl" may dispatch entries to
1740 	 * the system_taskq, but it doesn't wait for those entries to
1741 	 * complete before it returns. Thus, we must wait for all of the
1742 	 * removals to finish, before we can continue.
1743 	 */
1744 	taskq_wait_outstanding(system_taskq, 0);
1745 
1746 	kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
1747 	list_destroy(&zvol_state_list);
1748 	rw_destroy(&zvol_state_lock);
1749 }
1750