xref: /illumos-gate/usr/src/uts/common/fs/zfs/zfs_vfsops.c (revision 68ac2337c38c8af06edcf32a72e42de36ec72a9d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
32 #include <sys/kmem.h>
33 #include <sys/pathname.h>
34 #include <sys/acl.h>
35 #include <sys/vnode.h>
36 #include <sys/vfs.h>
37 #include <sys/mntent.h>
38 #include <sys/mount.h>
39 #include <sys/cmn_err.h>
40 #include "fs/fs_subr.h"
41 #include <sys/zfs_znode.h>
42 #include <sys/zil.h>
43 #include <sys/fs/zfs.h>
44 #include <sys/dmu.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/spa.h>
47 #include <sys/zap.h>
48 #include <sys/varargs.h>
49 #include <sys/policy.h>
50 #include <sys/atomic.h>
51 #include <sys/mkdev.h>
52 #include <sys/modctl.h>
53 #include <sys/zfs_ioctl.h>
54 #include <sys/zfs_ctldir.h>
55 #include <sys/bootconf.h>
56 #include <sys/sunddi.h>
57 #include <sys/dnlc.h>
58 
59 int zfsfstype;
60 vfsops_t *zfs_vfsops = NULL;
61 static major_t zfs_major;
62 static minor_t zfs_minor;
63 static kmutex_t	zfs_dev_mtx;
64 
65 extern char zfs_bootpath[BO_MAXOBJNAME];
66 
67 static int zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr);
68 static int zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr);
69 static int zfs_mountroot(vfs_t *vfsp, enum whymountroot);
70 static int zfs_root(vfs_t *vfsp, vnode_t **vpp);
71 static int zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp);
72 static int zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp);
73 static void zfs_freevfs(vfs_t *vfsp);
74 static void zfs_objset_close(zfsvfs_t *zfsvfs);
75 
76 static const fs_operation_def_t zfs_vfsops_template[] = {
77 	VFSNAME_MOUNT, zfs_mount,
78 	VFSNAME_MOUNTROOT, zfs_mountroot,
79 	VFSNAME_UNMOUNT, zfs_umount,
80 	VFSNAME_ROOT, zfs_root,
81 	VFSNAME_STATVFS, zfs_statvfs,
82 	VFSNAME_SYNC, (fs_generic_func_p) zfs_sync,
83 	VFSNAME_VGET, zfs_vget,
84 	VFSNAME_FREEVFS, (fs_generic_func_p) zfs_freevfs,
85 	NULL, NULL
86 };
87 
88 static const fs_operation_def_t zfs_vfsops_eio_template[] = {
89 	VFSNAME_FREEVFS, (fs_generic_func_p) zfs_freevfs,
90 	NULL, NULL
91 };
92 
93 /*
94  * We need to keep a count of active fs's.
95  * This is necessary to prevent our module
96  * from being unloaded after a umount -f
97  */
98 static uint32_t	zfs_active_fs_count = 0;
99 
100 static char *noatime_cancel[] = { MNTOPT_ATIME, NULL };
101 static char *atime_cancel[] = { MNTOPT_NOATIME, NULL };
102 static char *noxattr_cancel[] = { MNTOPT_XATTR, NULL };
103 static char *xattr_cancel[] = { MNTOPT_NOXATTR, NULL };
104 
105 /*
106  * MNTOPT_DEFAULT was removed from MNTOPT_XATTR, since the
107  * default value is now determined by the xattr property.
108  */
109 static mntopt_t mntopts[] = {
110 	{ MNTOPT_NOXATTR, noxattr_cancel, NULL, 0, NULL },
111 	{ MNTOPT_XATTR, xattr_cancel, NULL, 0, NULL },
112 	{ MNTOPT_NOATIME, noatime_cancel, NULL, MO_DEFAULT, NULL },
113 	{ MNTOPT_ATIME, atime_cancel, NULL, 0, NULL }
114 };
115 
116 static mntopts_t zfs_mntopts = {
117 	sizeof (mntopts) / sizeof (mntopt_t),
118 	mntopts
119 };
120 
121 /*ARGSUSED*/
122 int
123 zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
124 {
125 	/*
126 	 * Data integrity is job one.  We don't want a compromised kernel
127 	 * writing to the storage pool, so we never sync during panic.
128 	 */
129 	if (panicstr)
130 		return (0);
131 
132 	/*
133 	 * SYNC_ATTR is used by fsflush() to force old filesystems like UFS
134 	 * to sync metadata, which they would otherwise cache indefinitely.
135 	 * Semantically, the only requirement is that the sync be initiated.
136 	 * The DMU syncs out txgs frequently, so there's nothing to do.
137 	 */
138 	if (flag & SYNC_ATTR)
139 		return (0);
140 
141 	if (vfsp != NULL) {
142 		/*
143 		 * Sync a specific filesystem.
144 		 */
145 		zfsvfs_t *zfsvfs = vfsp->vfs_data;
146 
147 		ZFS_ENTER(zfsvfs);
148 		if (zfsvfs->z_log != NULL)
149 			zil_commit(zfsvfs->z_log, UINT64_MAX, 0);
150 		else
151 			txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
152 		ZFS_EXIT(zfsvfs);
153 	} else {
154 		/*
155 		 * Sync all ZFS filesystems.  This is what happens when you
156 		 * run sync(1M).  Unlike other filesystems, ZFS honors the
157 		 * request by waiting for all pools to commit all dirty data.
158 		 */
159 		spa_sync_allpools();
160 	}
161 
162 	return (0);
163 }
164 
165 static int
166 zfs_create_unique_device(dev_t *dev)
167 {
168 	major_t new_major;
169 
170 	do {
171 		ASSERT3U(zfs_minor, <=, MAXMIN32);
172 		minor_t start = zfs_minor;
173 		do {
174 			mutex_enter(&zfs_dev_mtx);
175 			if (zfs_minor >= MAXMIN32) {
176 				/*
177 				 * If we're still using the real major
178 				 * keep out of /dev/zfs and /dev/zvol minor
179 				 * number space.  If we're using a getudev()'ed
180 				 * major number, we can use all of its minors.
181 				 */
182 				if (zfs_major == ddi_name_to_major(ZFS_DRIVER))
183 					zfs_minor = ZFS_MIN_MINOR;
184 				else
185 					zfs_minor = 0;
186 			} else {
187 				zfs_minor++;
188 			}
189 			*dev = makedevice(zfs_major, zfs_minor);
190 			mutex_exit(&zfs_dev_mtx);
191 		} while (vfs_devismounted(*dev) && zfs_minor != start);
192 		if (zfs_minor == start) {
193 			/*
194 			 * We are using all ~262,000 minor numbers for the
195 			 * current major number.  Create a new major number.
196 			 */
197 			if ((new_major = getudev()) == (major_t)-1) {
198 				cmn_err(CE_WARN,
199 				    "zfs_mount: Can't get unique major "
200 				    "device number.");
201 				return (-1);
202 			}
203 			mutex_enter(&zfs_dev_mtx);
204 			zfs_major = new_major;
205 			zfs_minor = 0;
206 
207 			mutex_exit(&zfs_dev_mtx);
208 		} else {
209 			break;
210 		}
211 		/* CONSTANTCONDITION */
212 	} while (1);
213 
214 	return (0);
215 }
216 
217 static void
218 atime_changed_cb(void *arg, uint64_t newval)
219 {
220 	zfsvfs_t *zfsvfs = arg;
221 
222 	if (newval == TRUE) {
223 		zfsvfs->z_atime = TRUE;
224 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
225 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
226 	} else {
227 		zfsvfs->z_atime = FALSE;
228 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
229 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
230 	}
231 }
232 
233 static void
234 xattr_changed_cb(void *arg, uint64_t newval)
235 {
236 	zfsvfs_t *zfsvfs = arg;
237 
238 	if (newval == TRUE) {
239 		/* XXX locking on vfs_flag? */
240 		zfsvfs->z_vfs->vfs_flag |= VFS_XATTR;
241 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR);
242 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_XATTR, NULL, 0);
243 	} else {
244 		/* XXX locking on vfs_flag? */
245 		zfsvfs->z_vfs->vfs_flag &= ~VFS_XATTR;
246 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_XATTR);
247 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR, NULL, 0);
248 	}
249 }
250 
251 static void
252 blksz_changed_cb(void *arg, uint64_t newval)
253 {
254 	zfsvfs_t *zfsvfs = arg;
255 
256 	if (newval < SPA_MINBLOCKSIZE ||
257 	    newval > SPA_MAXBLOCKSIZE || !ISP2(newval))
258 		newval = SPA_MAXBLOCKSIZE;
259 
260 	zfsvfs->z_max_blksz = newval;
261 	zfsvfs->z_vfs->vfs_bsize = newval;
262 }
263 
264 static void
265 readonly_changed_cb(void *arg, uint64_t newval)
266 {
267 	zfsvfs_t *zfsvfs = arg;
268 
269 	if (newval) {
270 		/* XXX locking on vfs_flag? */
271 		zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
272 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
273 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
274 		(void) zfs_delete_thread_target(zfsvfs, 0);
275 	} else {
276 		/* XXX locking on vfs_flag? */
277 		zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
278 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
279 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
280 		(void) zfs_delete_thread_target(zfsvfs, 1);
281 	}
282 }
283 
284 static void
285 devices_changed_cb(void *arg, uint64_t newval)
286 {
287 	zfsvfs_t *zfsvfs = arg;
288 
289 	if (newval == FALSE) {
290 		zfsvfs->z_vfs->vfs_flag |= VFS_NODEVICES;
291 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES);
292 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES, NULL, 0);
293 	} else {
294 		zfsvfs->z_vfs->vfs_flag &= ~VFS_NODEVICES;
295 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES);
296 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES, NULL, 0);
297 	}
298 }
299 
300 static void
301 setuid_changed_cb(void *arg, uint64_t newval)
302 {
303 	zfsvfs_t *zfsvfs = arg;
304 
305 	if (newval == FALSE) {
306 		zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
307 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
308 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
309 	} else {
310 		zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
311 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
312 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
313 	}
314 }
315 
316 static void
317 exec_changed_cb(void *arg, uint64_t newval)
318 {
319 	zfsvfs_t *zfsvfs = arg;
320 
321 	if (newval == FALSE) {
322 		zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
323 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
324 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
325 	} else {
326 		zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
327 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
328 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
329 	}
330 }
331 
332 static void
333 snapdir_changed_cb(void *arg, uint64_t newval)
334 {
335 	zfsvfs_t *zfsvfs = arg;
336 
337 	zfsvfs->z_show_ctldir = newval;
338 }
339 
340 static void
341 acl_mode_changed_cb(void *arg, uint64_t newval)
342 {
343 	zfsvfs_t *zfsvfs = arg;
344 
345 	zfsvfs->z_acl_mode = newval;
346 }
347 
348 static void
349 acl_inherit_changed_cb(void *arg, uint64_t newval)
350 {
351 	zfsvfs_t *zfsvfs = arg;
352 
353 	zfsvfs->z_acl_inherit = newval;
354 }
355 
356 static int
357 zfs_refresh_properties(vfs_t *vfsp)
358 {
359 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
360 
361 	/*
362 	 * Remount operations default to "rw" unless "ro" is explicitly
363 	 * specified.
364 	 */
365 	if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) {
366 		readonly_changed_cb(zfsvfs, B_TRUE);
367 	} else {
368 		if (!dmu_objset_is_snapshot(zfsvfs->z_os))
369 			readonly_changed_cb(zfsvfs, B_FALSE);
370 		else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL))
371 			    return (EROFS);
372 	}
373 
374 	if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
375 		devices_changed_cb(zfsvfs, B_FALSE);
376 		setuid_changed_cb(zfsvfs, B_FALSE);
377 	} else {
378 		if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
379 			devices_changed_cb(zfsvfs, B_FALSE);
380 		else if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL))
381 			devices_changed_cb(zfsvfs, B_TRUE);
382 
383 		if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
384 			setuid_changed_cb(zfsvfs, B_FALSE);
385 		else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL))
386 			setuid_changed_cb(zfsvfs, B_TRUE);
387 	}
388 
389 	if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
390 		exec_changed_cb(zfsvfs, B_FALSE);
391 	else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL))
392 		exec_changed_cb(zfsvfs, B_TRUE);
393 
394 	if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL))
395 		atime_changed_cb(zfsvfs, B_TRUE);
396 	else if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL))
397 		atime_changed_cb(zfsvfs, B_FALSE);
398 
399 	if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL))
400 		xattr_changed_cb(zfsvfs, B_TRUE);
401 	else if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL))
402 		xattr_changed_cb(zfsvfs, B_FALSE);
403 
404 	return (0);
405 }
406 
407 static int
408 zfs_register_callbacks(vfs_t *vfsp)
409 {
410 	struct dsl_dataset *ds = NULL;
411 	objset_t *os = NULL;
412 	zfsvfs_t *zfsvfs = NULL;
413 	int readonly, do_readonly = FALSE;
414 	int setuid, do_setuid = FALSE;
415 	int exec, do_exec = FALSE;
416 	int devices, do_devices = FALSE;
417 	int xattr, do_xattr = FALSE;
418 	int error = 0;
419 
420 	ASSERT(vfsp);
421 	zfsvfs = vfsp->vfs_data;
422 	ASSERT(zfsvfs);
423 	os = zfsvfs->z_os;
424 
425 	/*
426 	 * The act of registering our callbacks will destroy any mount
427 	 * options we may have.  In order to enable temporary overrides
428 	 * of mount options, we stash away the current values and
429 	 * restore them after we register the callbacks.
430 	 */
431 	if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) {
432 		readonly = B_TRUE;
433 		do_readonly = B_TRUE;
434 	} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
435 		readonly = B_FALSE;
436 		do_readonly = B_TRUE;
437 	}
438 	if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
439 		devices = B_FALSE;
440 		setuid = B_FALSE;
441 		do_devices = B_TRUE;
442 		do_setuid = B_TRUE;
443 	} else {
444 		if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) {
445 			devices = B_FALSE;
446 			do_devices = B_TRUE;
447 		} else if (vfs_optionisset(vfsp,
448 			    MNTOPT_DEVICES, NULL)) {
449 			devices = B_TRUE;
450 			do_devices = B_TRUE;
451 		}
452 
453 		if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
454 			setuid = B_FALSE;
455 			do_setuid = B_TRUE;
456 		} else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
457 			setuid = B_TRUE;
458 			do_setuid = B_TRUE;
459 		}
460 	}
461 	if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
462 		exec = B_FALSE;
463 		do_exec = B_TRUE;
464 	} else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
465 		exec = B_TRUE;
466 		do_exec = B_TRUE;
467 	}
468 	if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
469 		xattr = B_FALSE;
470 		do_xattr = B_TRUE;
471 	} else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
472 		xattr = B_TRUE;
473 		do_xattr = B_TRUE;
474 	}
475 
476 	/*
477 	 * Register property callbacks.
478 	 *
479 	 * It would probably be fine to just check for i/o error from
480 	 * the first prop_register(), but I guess I like to go
481 	 * overboard...
482 	 */
483 	ds = dmu_objset_ds(os);
484 	error = dsl_prop_register(ds, "atime", atime_changed_cb, zfsvfs);
485 	error = error ? error : dsl_prop_register(ds,
486 	    "xattr", xattr_changed_cb, zfsvfs);
487 	error = error ? error : dsl_prop_register(ds,
488 	    "recordsize", blksz_changed_cb, zfsvfs);
489 	error = error ? error : dsl_prop_register(ds,
490 	    "readonly", readonly_changed_cb, zfsvfs);
491 	error = error ? error : dsl_prop_register(ds,
492 	    "devices", devices_changed_cb, zfsvfs);
493 	error = error ? error : dsl_prop_register(ds,
494 	    "setuid", setuid_changed_cb, zfsvfs);
495 	error = error ? error : dsl_prop_register(ds,
496 	    "exec", exec_changed_cb, zfsvfs);
497 	error = error ? error : dsl_prop_register(ds,
498 	    "snapdir", snapdir_changed_cb, zfsvfs);
499 	error = error ? error : dsl_prop_register(ds,
500 	    "aclmode", acl_mode_changed_cb, zfsvfs);
501 	error = error ? error : dsl_prop_register(ds,
502 	    "aclinherit", acl_inherit_changed_cb, zfsvfs);
503 	if (error)
504 		goto unregister;
505 
506 	/*
507 	 * Invoke our callbacks to restore temporary mount options.
508 	 */
509 	if (do_readonly)
510 		readonly_changed_cb(zfsvfs, readonly);
511 	if (do_setuid)
512 		setuid_changed_cb(zfsvfs, setuid);
513 	if (do_exec)
514 		exec_changed_cb(zfsvfs, exec);
515 	if (do_devices)
516 		devices_changed_cb(zfsvfs, devices);
517 	if (do_xattr)
518 		xattr_changed_cb(zfsvfs, xattr);
519 
520 	return (0);
521 
522 unregister:
523 	/*
524 	 * We may attempt to unregister some callbacks that are not
525 	 * registered, but this is OK; it will simply return ENOMSG,
526 	 * which we will ignore.
527 	 */
528 	(void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zfsvfs);
529 	(void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zfsvfs);
530 	(void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zfsvfs);
531 	(void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zfsvfs);
532 	(void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zfsvfs);
533 	(void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs);
534 	(void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs);
535 	(void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs);
536 	(void) dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb, zfsvfs);
537 	(void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
538 	    zfsvfs);
539 	return (error);
540 
541 }
542 
543 static int
544 zfs_domount(vfs_t *vfsp, char *osname, cred_t *cr)
545 {
546 	dev_t mount_dev;
547 	uint64_t recordsize, readonly;
548 	int error = 0;
549 	int mode;
550 	zfsvfs_t *zfsvfs;
551 	znode_t *zp = NULL;
552 
553 	ASSERT(vfsp);
554 	ASSERT(osname);
555 
556 	/*
557 	 * Initialize the zfs-specific filesystem structure.
558 	 * Should probably make this a kmem cache, shuffle fields,
559 	 * and just bzero up to z_hold_mtx[].
560 	 */
561 	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
562 	zfsvfs->z_vfs = vfsp;
563 	zfsvfs->z_parent = zfsvfs;
564 	zfsvfs->z_assign = TXG_NOWAIT;
565 	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
566 	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
567 
568 	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
569 	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
570 	    offsetof(znode_t, z_link_node));
571 	rw_init(&zfsvfs->z_um_lock, NULL, RW_DEFAULT, NULL);
572 
573 	/* Initialize the generic filesystem structure. */
574 	vfsp->vfs_bcount = 0;
575 	vfsp->vfs_data = NULL;
576 
577 	if (zfs_create_unique_device(&mount_dev) == -1) {
578 		error = ENODEV;
579 		goto out;
580 	}
581 	ASSERT(vfs_devismounted(mount_dev) == 0);
582 
583 	if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
584 	    NULL))
585 		goto out;
586 
587 	vfsp->vfs_dev = mount_dev;
588 	vfsp->vfs_fstype = zfsfstype;
589 	vfsp->vfs_bsize = recordsize;
590 	vfsp->vfs_flag |= VFS_NOTRUNC;
591 	vfsp->vfs_data = zfsvfs;
592 
593 	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
594 		goto out;
595 
596 	if (readonly)
597 		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
598 	else
599 		mode = DS_MODE_PRIMARY;
600 
601 	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
602 	if (error == EROFS) {
603 		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
604 		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
605 		    &zfsvfs->z_os);
606 	}
607 
608 	if (error)
609 		goto out;
610 
611 	if (error = zfs_init_fs(zfsvfs, &zp, cr))
612 		goto out;
613 
614 	/* The call to zfs_init_fs leaves the vnode held, release it here. */
615 	VN_RELE(ZTOV(zp));
616 
617 	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
618 		uint64_t xattr;
619 
620 		ASSERT(mode & DS_MODE_READONLY);
621 		atime_changed_cb(zfsvfs, B_FALSE);
622 		readonly_changed_cb(zfsvfs, B_TRUE);
623 		if (error = dsl_prop_get_integer(osname, "xattr", &xattr, NULL))
624 			goto out;
625 		xattr_changed_cb(zfsvfs, xattr);
626 		zfsvfs->z_issnap = B_TRUE;
627 	} else {
628 		error = zfs_register_callbacks(vfsp);
629 		if (error)
630 			goto out;
631 
632 		/*
633 		 * Start a delete thread running.
634 		 */
635 		(void) zfs_delete_thread_target(zfsvfs, 1);
636 
637 		/*
638 		 * Parse and replay the intent log.
639 		 */
640 		zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
641 		    zfs_replay_vector, (void (*)(void *))zfs_delete_wait_empty);
642 
643 		if (!zil_disable)
644 			zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
645 	}
646 
647 	if (!zfsvfs->z_issnap)
648 		zfsctl_create(zfsvfs);
649 out:
650 	if (error) {
651 		if (zfsvfs->z_os)
652 			dmu_objset_close(zfsvfs->z_os);
653 		kmem_free(zfsvfs, sizeof (zfsvfs_t));
654 	} else {
655 		atomic_add_32(&zfs_active_fs_count, 1);
656 	}
657 
658 	return (error);
659 
660 }
661 
662 void
663 zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
664 {
665 	objset_t *os = zfsvfs->z_os;
666 	struct dsl_dataset *ds;
667 
668 	/*
669 	 * Unregister properties.
670 	 */
671 	if (!dmu_objset_is_snapshot(os)) {
672 		ds = dmu_objset_ds(os);
673 		VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb,
674 		    zfsvfs) == 0);
675 
676 		VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb,
677 		    zfsvfs) == 0);
678 
679 		VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb,
680 		    zfsvfs) == 0);
681 
682 		VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb,
683 		    zfsvfs) == 0);
684 
685 		VERIFY(dsl_prop_unregister(ds, "devices", devices_changed_cb,
686 		    zfsvfs) == 0);
687 
688 		VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb,
689 		    zfsvfs) == 0);
690 
691 		VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb,
692 		    zfsvfs) == 0);
693 
694 		VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
695 		    zfsvfs) == 0);
696 
697 		VERIFY(dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb,
698 		    zfsvfs) == 0);
699 
700 		VERIFY(dsl_prop_unregister(ds, "aclinherit",
701 		    acl_inherit_changed_cb, zfsvfs) == 0);
702 	}
703 }
704 
705 static int
706 zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
707 {
708 	int error = 0;
709 	int ret = 0;
710 	static int zfsrootdone = 0;
711 	zfsvfs_t *zfsvfs = NULL;
712 	znode_t *zp = NULL;
713 	vnode_t *vp = NULL;
714 
715 	ASSERT(vfsp);
716 
717 	/*
718 	 * The filesystem that we mount as root is defined in
719 	 * /etc/system using the zfsroot variable.  The value defined
720 	 * there is copied early in startup code to zfs_bootpath
721 	 * (defined in modsysfile.c).
722 	 */
723 	if (why == ROOT_INIT) {
724 		if (zfsrootdone++)
725 			return (EBUSY);
726 
727 		/*
728 		 * This needs to be done here, so that when we return from
729 		 * mountroot, the vfs resource name will be set correctly.
730 		 */
731 		if (snprintf(rootfs.bo_name, BO_MAXOBJNAME, "%s", zfs_bootpath)
732 		    >= BO_MAXOBJNAME)
733 			return (ENAMETOOLONG);
734 
735 		if (error = vfs_lock(vfsp))
736 			return (error);
737 
738 		if (error = zfs_domount(vfsp, zfs_bootpath, CRED()))
739 			goto out;
740 
741 		zfsvfs = (zfsvfs_t *)vfsp->vfs_data;
742 		ASSERT(zfsvfs);
743 		if (error = zfs_zget(zfsvfs, zfsvfs->z_root, &zp))
744 			goto out;
745 
746 		vp = ZTOV(zp);
747 		mutex_enter(&vp->v_lock);
748 		vp->v_flag |= VROOT;
749 		mutex_exit(&vp->v_lock);
750 		rootvp = vp;
751 
752 		/*
753 		 * The zfs_zget call above returns with a hold on vp, we release
754 		 * it here.
755 		 */
756 		VN_RELE(vp);
757 
758 		/*
759 		 * Mount root as readonly initially, it will be remouted
760 		 * read/write by /lib/svc/method/fs-usr.
761 		 */
762 		readonly_changed_cb(vfsp->vfs_data, B_TRUE);
763 		vfs_add((struct vnode *)0, vfsp,
764 		    (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
765 out:
766 		vfs_unlock(vfsp);
767 		ret = (error) ? error : 0;
768 		return (ret);
769 
770 	} else if (why == ROOT_REMOUNT) {
771 
772 		readonly_changed_cb(vfsp->vfs_data, B_FALSE);
773 		vfsp->vfs_flag |= VFS_REMOUNT;
774 		return (zfs_refresh_properties(vfsp));
775 
776 	} else if (why == ROOT_UNMOUNT) {
777 		zfs_unregister_callbacks((zfsvfs_t *)vfsp->vfs_data);
778 		(void) zfs_sync(vfsp, 0, 0);
779 		return (0);
780 	}
781 
782 	/*
783 	 * if "why" is equal to anything else other than ROOT_INIT,
784 	 * ROOT_REMOUNT, or ROOT_UNMOUNT, we do not support it.
785 	 */
786 	return (ENOTSUP);
787 }
788 
789 /*ARGSUSED*/
790 static int
791 zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
792 {
793 	char		*osname;
794 	pathname_t	spn;
795 	int		error = 0;
796 	uio_seg_t	fromspace = (uap->flags & MS_SYSSPACE) ?
797 				UIO_SYSSPACE : UIO_USERSPACE;
798 	int		canwrite;
799 
800 	if (mvp->v_type != VDIR)
801 		return (ENOTDIR);
802 
803 	mutex_enter(&mvp->v_lock);
804 	if ((uap->flags & MS_REMOUNT) == 0 &&
805 	    (uap->flags & MS_OVERLAY) == 0 &&
806 	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
807 		mutex_exit(&mvp->v_lock);
808 		return (EBUSY);
809 	}
810 	mutex_exit(&mvp->v_lock);
811 
812 	/*
813 	 * ZFS does not support passing unparsed data in via MS_DATA.
814 	 * Users should use the MS_OPTIONSTR interface; this means
815 	 * that all option parsing is already done and the options struct
816 	 * can be interrogated.
817 	 */
818 	if ((uap->flags & MS_DATA) && uap->datalen > 0)
819 		return (EINVAL);
820 
821 	/*
822 	 * When doing a remount, we simply refresh our temporary properties
823 	 * according to those options set in the current VFS options.
824 	 */
825 	if (uap->flags & MS_REMOUNT) {
826 		return (zfs_refresh_properties(vfsp));
827 	}
828 
829 	/*
830 	 * Get the objset name (the "special" mount argument).
831 	 */
832 	if (error = pn_get(uap->spec, fromspace, &spn))
833 		return (error);
834 
835 	osname = spn.pn_path;
836 
837 	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
838 		goto out;
839 
840 	/*
841 	 * Refuse to mount a filesystem if we are in a local zone and the
842 	 * dataset is not visible.
843 	 */
844 	if (!INGLOBALZONE(curproc) &&
845 	    (!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
846 		error = EPERM;
847 		goto out;
848 	}
849 
850 	error = zfs_domount(vfsp, osname, cr);
851 
852 out:
853 	pn_free(&spn);
854 	return (error);
855 }
856 
857 static int
858 zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp)
859 {
860 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
861 	dev32_t d32;
862 	uint64_t refdbytes, availbytes, usedobjs, availobjs;
863 
864 	ZFS_ENTER(zfsvfs);
865 
866 	dmu_objset_space(zfsvfs->z_os,
867 	    &refdbytes, &availbytes, &usedobjs, &availobjs);
868 
869 	/*
870 	 * The underlying storage pool actually uses multiple block sizes.
871 	 * We report the fragsize as the smallest block size we support,
872 	 * and we report our blocksize as the filesystem's maximum blocksize.
873 	 */
874 	statp->f_frsize = 1UL << SPA_MINBLOCKSHIFT;
875 	statp->f_bsize = zfsvfs->z_max_blksz;
876 
877 	/*
878 	 * The following report "total" blocks of various kinds in the
879 	 * file system, but reported in terms of f_frsize - the
880 	 * "fragment" size.
881 	 */
882 
883 	statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
884 	statp->f_bfree = availbytes >> SPA_MINBLOCKSHIFT;
885 	statp->f_bavail = statp->f_bfree; /* no root reservation */
886 
887 	/*
888 	 * statvfs() should really be called statufs(), because it assumes
889 	 * static metadata.  ZFS doesn't preallocate files, so the best
890 	 * we can do is report the max that could possibly fit in f_files,
891 	 * and that minus the number actually used in f_ffree.
892 	 * For f_ffree, report the smaller of the number of object available
893 	 * and the number of blocks (each object will take at least a block).
894 	 */
895 	statp->f_ffree = MIN(availobjs, statp->f_bfree);
896 	statp->f_favail = statp->f_ffree;	/* no "root reservation" */
897 	statp->f_files = statp->f_ffree + usedobjs;
898 
899 	(void) cmpldev(&d32, vfsp->vfs_dev);
900 	statp->f_fsid = d32;
901 
902 	/*
903 	 * We're a zfs filesystem.
904 	 */
905 	(void) strcpy(statp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
906 
907 	statp->f_flag = vf_to_stf(vfsp->vfs_flag);
908 
909 	statp->f_namemax = ZFS_MAXNAMELEN;
910 
911 	/*
912 	 * We have all of 32 characters to stuff a string here.
913 	 * Is there anything useful we could/should provide?
914 	 */
915 	bzero(statp->f_fstr, sizeof (statp->f_fstr));
916 
917 	ZFS_EXIT(zfsvfs);
918 	return (0);
919 }
920 
921 static int
922 zfs_root(vfs_t *vfsp, vnode_t **vpp)
923 {
924 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
925 	znode_t *rootzp;
926 	int error;
927 
928 	ZFS_ENTER(zfsvfs);
929 
930 	error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
931 	if (error == 0)
932 		*vpp = ZTOV(rootzp);
933 
934 	ZFS_EXIT(zfsvfs);
935 	return (error);
936 }
937 
938 /*ARGSUSED*/
939 static int
940 zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
941 {
942 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
943 	int ret;
944 
945 	if ((ret = secpolicy_fs_unmount(cr, vfsp)) != 0)
946 		return (ret);
947 
948 
949 	(void) dnlc_purge_vfsp(vfsp, 0);
950 
951 	/*
952 	 * Unmount any snapshots mounted under .zfs before unmounting the
953 	 * dataset itself.
954 	 */
955 	if (zfsvfs->z_ctldir != NULL &&
956 	    (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
957 		return (ret);
958 
959 	if (fflag & MS_FORCE) {
960 		vfsp->vfs_flag |= VFS_UNMOUNTED;
961 		zfsvfs->z_unmounted1 = B_TRUE;
962 
963 		/*
964 		 * Wait for all zfs threads to leave zfs.
965 		 * Grabbing a rwlock as reader in all vops and
966 		 * as writer here doesn't work because it too easy to get
967 		 * multiple reader enters as zfs can re-enter itself.
968 		 * This can lead to deadlock if there is an intervening
969 		 * rw_enter as writer.
970 		 * So a file system threads ref count (z_op_cnt) is used.
971 		 * A polling loop on z_op_cnt may seem inefficient, but
972 		 * - this saves all threads on exit from having to grab a
973 		 *   mutex in order to cv_signal
974 		 * - only occurs on forced unmount in the rare case when
975 		 *   there are outstanding threads within the file system.
976 		 */
977 		while (zfsvfs->z_op_cnt) {
978 			delay(1);
979 		}
980 
981 		zfs_objset_close(zfsvfs);
982 
983 		return (0);
984 	}
985 	/*
986 	 * Stop all delete threads.
987 	 */
988 	(void) zfs_delete_thread_target(zfsvfs, 0);
989 
990 	/*
991 	 * Check the number of active vnodes in the file system.
992 	 * Our count is maintained in the vfs structure, but the number
993 	 * is off by 1 to indicate a hold on the vfs structure itself.
994 	 *
995 	 * The '.zfs' directory maintains a reference of its own, and any active
996 	 * references underneath are reflected in the vnode count.
997 	 */
998 	if (zfsvfs->z_ctldir == NULL) {
999 		if (vfsp->vfs_count > 1) {
1000 			if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0)
1001 				(void) zfs_delete_thread_target(zfsvfs, 1);
1002 			return (EBUSY);
1003 		}
1004 	} else {
1005 		if (vfsp->vfs_count > 2 ||
1006 		    (zfsvfs->z_ctldir->v_count > 1 && !(fflag & MS_FORCE))) {
1007 			if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0)
1008 				(void) zfs_delete_thread_target(zfsvfs, 1);
1009 			return (EBUSY);
1010 		}
1011 	}
1012 
1013 	vfsp->vfs_flag |= VFS_UNMOUNTED;
1014 	zfs_objset_close(zfsvfs);
1015 
1016 	return (0);
1017 }
1018 
1019 static int
1020 zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
1021 {
1022 	zfsvfs_t	*zfsvfs = vfsp->vfs_data;
1023 	znode_t		*zp;
1024 	uint64_t	object = 0;
1025 	uint64_t	fid_gen = 0;
1026 	uint64_t	gen_mask;
1027 	uint64_t	zp_gen;
1028 	int 		i, err;
1029 
1030 	*vpp = NULL;
1031 
1032 	ZFS_ENTER(zfsvfs);
1033 
1034 	if (fidp->fid_len == LONG_FID_LEN) {
1035 		zfid_long_t	*zlfid = (zfid_long_t *)fidp;
1036 		uint64_t	objsetid = 0;
1037 		uint64_t	setgen = 0;
1038 
1039 		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1040 			objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1041 
1042 		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1043 			setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1044 
1045 		ZFS_EXIT(zfsvfs);
1046 
1047 		err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
1048 		if (err)
1049 			return (EINVAL);
1050 		ZFS_ENTER(zfsvfs);
1051 	}
1052 
1053 	if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1054 		zfid_short_t	*zfid = (zfid_short_t *)fidp;
1055 
1056 		for (i = 0; i < sizeof (zfid->zf_object); i++)
1057 			object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1058 
1059 		for (i = 0; i < sizeof (zfid->zf_gen); i++)
1060 			fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1061 	} else {
1062 		ZFS_EXIT(zfsvfs);
1063 		return (EINVAL);
1064 	}
1065 
1066 	/* A zero fid_gen means we are in the .zfs control directories */
1067 	if (fid_gen == 0 &&
1068 	    (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1069 		*vpp = zfsvfs->z_ctldir;
1070 		ASSERT(*vpp != NULL);
1071 		if (object == ZFSCTL_INO_SNAPDIR) {
1072 			VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL,
1073 			    0, NULL, NULL) == 0);
1074 		} else {
1075 			VN_HOLD(*vpp);
1076 		}
1077 		ZFS_EXIT(zfsvfs);
1078 		return (0);
1079 	}
1080 
1081 	gen_mask = -1ULL >> (64 - 8 * i);
1082 
1083 	dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
1084 	if (err = zfs_zget(zfsvfs, object, &zp)) {
1085 		ZFS_EXIT(zfsvfs);
1086 		return (err);
1087 	}
1088 	zp_gen = zp->z_phys->zp_gen & gen_mask;
1089 	if (zp_gen == 0)
1090 		zp_gen = 1;
1091 	if (zp->z_reap || zp_gen != fid_gen) {
1092 		dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
1093 		VN_RELE(ZTOV(zp));
1094 		ZFS_EXIT(zfsvfs);
1095 		return (EINVAL);
1096 	}
1097 
1098 	*vpp = ZTOV(zp);
1099 	ZFS_EXIT(zfsvfs);
1100 	return (0);
1101 }
1102 
1103 static void
1104 zfs_objset_close(zfsvfs_t *zfsvfs)
1105 {
1106 	zfs_delete_t	*zd = &zfsvfs->z_delete_head;
1107 	znode_t		*zp, *nextzp;
1108 	objset_t	*os = zfsvfs->z_os;
1109 
1110 	/*
1111 	 * Stop all delete threads.
1112 	 */
1113 	(void) zfs_delete_thread_target(zfsvfs, 0);
1114 
1115 	/*
1116 	 * For forced unmount, at this point all vops except zfs_inactive
1117 	 * are erroring EIO. We need to now suspend zfs_inactive threads
1118 	 * while we are freeing dbufs before switching zfs_inactive
1119 	 * to use behaviour without a objset.
1120 	 */
1121 	rw_enter(&zfsvfs->z_um_lock, RW_WRITER);
1122 
1123 	/*
1124 	 * Release all delete in progress znodes
1125 	 * They will be processed when the file system remounts.
1126 	 */
1127 	mutex_enter(&zd->z_mutex);
1128 	while (zp = list_head(&zd->z_znodes)) {
1129 		list_remove(&zd->z_znodes, zp);
1130 		zp->z_dbuf_held = 0;
1131 		dmu_buf_rele(zp->z_dbuf, NULL);
1132 	}
1133 	mutex_exit(&zd->z_mutex);
1134 
1135 	/*
1136 	 * Release all holds on dbufs
1137 	 * Note, although we have stopped all other vop threads and
1138 	 * zfs_inactive(), the dmu can callback via znode_pageout_func()
1139 	 * which can zfs_znode_free() the znode.
1140 	 * So we lock z_all_znodes; search the list for a held
1141 	 * dbuf; drop the lock (we know zp can't disappear if we hold
1142 	 * a dbuf lock; then regrab the lock and restart.
1143 	 */
1144 	mutex_enter(&zfsvfs->z_znodes_lock);
1145 	for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) {
1146 		nextzp = list_next(&zfsvfs->z_all_znodes, zp);
1147 		if (zp->z_dbuf_held) {
1148 			/* dbufs should only be held when force unmounting */
1149 			zp->z_dbuf_held = 0;
1150 			mutex_exit(&zfsvfs->z_znodes_lock);
1151 			dmu_buf_rele(zp->z_dbuf, NULL);
1152 			/* Start again */
1153 			mutex_enter(&zfsvfs->z_znodes_lock);
1154 			nextzp = list_head(&zfsvfs->z_all_znodes);
1155 		}
1156 	}
1157 	mutex_exit(&zfsvfs->z_znodes_lock);
1158 
1159 	/*
1160 	 * Unregister properties.
1161 	 */
1162 	if (!dmu_objset_is_snapshot(os))
1163 		zfs_unregister_callbacks(zfsvfs);
1164 
1165 	/*
1166 	 * Switch zfs_inactive to behaviour without an objset.
1167 	 * It just tosses cached pages and frees the znode & vnode.
1168 	 * Then re-enable zfs_inactive threads in that new behaviour.
1169 	 */
1170 	zfsvfs->z_unmounted2 = B_TRUE;
1171 	rw_exit(&zfsvfs->z_um_lock); /* re-enable any zfs_inactive threads */
1172 
1173 	/*
1174 	 * Close the zil. Can't close the zil while zfs_inactive
1175 	 * threads are blocked as zil_close can call zfs_inactive.
1176 	 */
1177 	if (zfsvfs->z_log) {
1178 		zil_close(zfsvfs->z_log);
1179 		zfsvfs->z_log = NULL;
1180 	}
1181 
1182 	/*
1183 	 * Evict all dbufs so that cached znodes will be freed
1184 	 */
1185 	if (dmu_objset_evict_dbufs(os, 1)) {
1186 		txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1187 		(void) dmu_objset_evict_dbufs(os, 0);
1188 	}
1189 
1190 	/*
1191 	 * Finally close the objset
1192 	 */
1193 	dmu_objset_close(os);
1194 
1195 	/*
1196 	 * We can now safely destroy the '.zfs' directory node.
1197 	 */
1198 	if (zfsvfs->z_ctldir != NULL)
1199 		zfsctl_destroy(zfsvfs);
1200 
1201 }
1202 
1203 static void
1204 zfs_freevfs(vfs_t *vfsp)
1205 {
1206 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1207 
1208 	kmem_free(zfsvfs, sizeof (zfsvfs_t));
1209 
1210 	atomic_add_32(&zfs_active_fs_count, -1);
1211 }
1212 
1213 /*
1214  * VFS_INIT() initialization.  Note that there is no VFS_FINI(),
1215  * so we can't safely do any non-idempotent initialization here.
1216  * Leave that to zfs_init() and zfs_fini(), which are called
1217  * from the module's _init() and _fini() entry points.
1218  */
1219 /*ARGSUSED*/
1220 static int
1221 zfs_vfsinit(int fstype, char *name)
1222 {
1223 	int error;
1224 
1225 	zfsfstype = fstype;
1226 
1227 	/*
1228 	 * Setup vfsops and vnodeops tables.
1229 	 */
1230 	error = vfs_setfsops(fstype, zfs_vfsops_template, &zfs_vfsops);
1231 	if (error != 0) {
1232 		cmn_err(CE_WARN, "zfs: bad vfs ops template");
1233 	}
1234 
1235 	error = zfs_create_op_tables();
1236 	if (error) {
1237 		zfs_remove_op_tables();
1238 		cmn_err(CE_WARN, "zfs: bad vnode ops template");
1239 		(void) vfs_freevfsops_by_type(zfsfstype);
1240 		return (error);
1241 	}
1242 
1243 	mutex_init(&zfs_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
1244 
1245 	/*
1246 	 * Unique major number for all zfs mounts.
1247 	 * If we run out of 32-bit minors, we'll getudev() another major.
1248 	 */
1249 	zfs_major = ddi_name_to_major(ZFS_DRIVER);
1250 	zfs_minor = ZFS_MIN_MINOR;
1251 
1252 	return (0);
1253 }
1254 
1255 void
1256 zfs_init(void)
1257 {
1258 	/*
1259 	 * Initialize .zfs directory structures
1260 	 */
1261 	zfsctl_init();
1262 
1263 	/*
1264 	 * Initialize znode cache, vnode ops, etc...
1265 	 */
1266 	zfs_znode_init();
1267 }
1268 
1269 void
1270 zfs_fini(void)
1271 {
1272 	zfsctl_fini();
1273 	zfs_znode_fini();
1274 }
1275 
1276 int
1277 zfs_busy(void)
1278 {
1279 	return (zfs_active_fs_count != 0);
1280 }
1281 
1282 static vfsdef_t vfw = {
1283 	VFSDEF_VERSION,
1284 	MNTTYPE_ZFS,
1285 	zfs_vfsinit,
1286 	VSW_HASPROTO|VSW_CANRWRO|VSW_CANREMOUNT|VSW_VOLATILEDEV|VSW_STATS,
1287 	&zfs_mntopts
1288 };
1289 
1290 struct modlfs zfs_modlfs = {
1291 	&mod_fsops, "ZFS filesystem version " ZFS_VERSION_STRING, &vfw
1292 };
1293