xref: /illumos-gate/usr/src/uts/common/fs/zfs/zfs_vfsops.c (revision ce8eb11a8717b4a57c68fd77ab9f8aac15b16bf2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sysmacros.h>
32 #include <sys/kmem.h>
33 #include <sys/pathname.h>
34 #include <sys/vnode.h>
35 #include <sys/vfs.h>
36 #include <sys/vfs_opreg.h>
37 #include <sys/mntent.h>
38 #include <sys/mount.h>
39 #include <sys/cmn_err.h>
40 #include "fs/fs_subr.h"
41 #include <sys/zfs_znode.h>
42 #include <sys/zfs_dir.h>
43 #include <sys/zil.h>
44 #include <sys/fs/zfs.h>
45 #include <sys/dmu.h>
46 #include <sys/dsl_prop.h>
47 #include <sys/dsl_dataset.h>
48 #include <sys/dsl_deleg.h>
49 #include <sys/spa.h>
50 #include <sys/zap.h>
51 #include <sys/varargs.h>
52 #include <sys/policy.h>
53 #include <sys/atomic.h>
54 #include <sys/mkdev.h>
55 #include <sys/modctl.h>
56 #include <sys/refstr.h>
57 #include <sys/zfs_ioctl.h>
58 #include <sys/zfs_ctldir.h>
59 #include <sys/bootconf.h>
60 #include <sys/sunddi.h>
61 #include <sys/dnlc.h>
62 
63 int zfsfstype;
64 vfsops_t *zfs_vfsops = NULL;
65 static major_t zfs_major;
66 static minor_t zfs_minor;
67 static kmutex_t	zfs_dev_mtx;
68 
69 static int zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr);
70 static int zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr);
71 static int zfs_mountroot(vfs_t *vfsp, enum whymountroot);
72 static int zfs_root(vfs_t *vfsp, vnode_t **vpp);
73 static int zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp);
74 static int zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp);
75 static void zfs_freevfs(vfs_t *vfsp);
76 static void zfs_objset_close(zfsvfs_t *zfsvfs);
77 
78 static const fs_operation_def_t zfs_vfsops_template[] = {
79 	VFSNAME_MOUNT,		{ .vfs_mount = zfs_mount },
80 	VFSNAME_MOUNTROOT,	{ .vfs_mountroot = zfs_mountroot },
81 	VFSNAME_UNMOUNT,	{ .vfs_unmount = zfs_umount },
82 	VFSNAME_ROOT,		{ .vfs_root = zfs_root },
83 	VFSNAME_STATVFS,	{ .vfs_statvfs = zfs_statvfs },
84 	VFSNAME_SYNC,		{ .vfs_sync = zfs_sync },
85 	VFSNAME_VGET,		{ .vfs_vget = zfs_vget },
86 	VFSNAME_FREEVFS,	{ .vfs_freevfs = zfs_freevfs },
87 	NULL,			NULL
88 };
89 
90 static const fs_operation_def_t zfs_vfsops_eio_template[] = {
91 	VFSNAME_FREEVFS,	{ .vfs_freevfs =  zfs_freevfs },
92 	NULL,			NULL
93 };
94 
95 /*
96  * We need to keep a count of active fs's.
97  * This is necessary to prevent our module
98  * from being unloaded after a umount -f
99  */
100 static uint32_t	zfs_active_fs_count = 0;
101 
102 static char *noatime_cancel[] = { MNTOPT_ATIME, NULL };
103 static char *atime_cancel[] = { MNTOPT_NOATIME, NULL };
104 static char *noxattr_cancel[] = { MNTOPT_XATTR, NULL };
105 static char *xattr_cancel[] = { MNTOPT_NOXATTR, NULL };
106 
107 /*
108  * MO_DEFAULT is not used since the default value is determined
109  * by the equivalent property.
110  */
111 static mntopt_t mntopts[] = {
112 	{ MNTOPT_NOXATTR, noxattr_cancel, NULL, 0, NULL },
113 	{ MNTOPT_XATTR, xattr_cancel, NULL, 0, NULL },
114 	{ MNTOPT_NOATIME, noatime_cancel, NULL, 0, NULL },
115 	{ MNTOPT_ATIME, atime_cancel, NULL, 0, NULL }
116 };
117 
118 static mntopts_t zfs_mntopts = {
119 	sizeof (mntopts) / sizeof (mntopt_t),
120 	mntopts
121 };
122 
123 /*ARGSUSED*/
124 int
125 zfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
126 {
127 	/*
128 	 * Data integrity is job one.  We don't want a compromised kernel
129 	 * writing to the storage pool, so we never sync during panic.
130 	 */
131 	if (panicstr)
132 		return (0);
133 
134 	/*
135 	 * SYNC_ATTR is used by fsflush() to force old filesystems like UFS
136 	 * to sync metadata, which they would otherwise cache indefinitely.
137 	 * Semantically, the only requirement is that the sync be initiated.
138 	 * The DMU syncs out txgs frequently, so there's nothing to do.
139 	 */
140 	if (flag & SYNC_ATTR)
141 		return (0);
142 
143 	if (vfsp != NULL) {
144 		/*
145 		 * Sync a specific filesystem.
146 		 */
147 		zfsvfs_t *zfsvfs = vfsp->vfs_data;
148 
149 		ZFS_ENTER(zfsvfs);
150 		if (zfsvfs->z_log != NULL)
151 			zil_commit(zfsvfs->z_log, UINT64_MAX, 0);
152 		else
153 			txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
154 		ZFS_EXIT(zfsvfs);
155 	} else {
156 		/*
157 		 * Sync all ZFS filesystems.  This is what happens when you
158 		 * run sync(1M).  Unlike other filesystems, ZFS honors the
159 		 * request by waiting for all pools to commit all dirty data.
160 		 */
161 		spa_sync_allpools();
162 	}
163 
164 	return (0);
165 }
166 
167 static int
168 zfs_create_unique_device(dev_t *dev)
169 {
170 	major_t new_major;
171 
172 	do {
173 		ASSERT3U(zfs_minor, <=, MAXMIN32);
174 		minor_t start = zfs_minor;
175 		do {
176 			mutex_enter(&zfs_dev_mtx);
177 			if (zfs_minor >= MAXMIN32) {
178 				/*
179 				 * If we're still using the real major
180 				 * keep out of /dev/zfs and /dev/zvol minor
181 				 * number space.  If we're using a getudev()'ed
182 				 * major number, we can use all of its minors.
183 				 */
184 				if (zfs_major == ddi_name_to_major(ZFS_DRIVER))
185 					zfs_minor = ZFS_MIN_MINOR;
186 				else
187 					zfs_minor = 0;
188 			} else {
189 				zfs_minor++;
190 			}
191 			*dev = makedevice(zfs_major, zfs_minor);
192 			mutex_exit(&zfs_dev_mtx);
193 		} while (vfs_devismounted(*dev) && zfs_minor != start);
194 		if (zfs_minor == start) {
195 			/*
196 			 * We are using all ~262,000 minor numbers for the
197 			 * current major number.  Create a new major number.
198 			 */
199 			if ((new_major = getudev()) == (major_t)-1) {
200 				cmn_err(CE_WARN,
201 				    "zfs_mount: Can't get unique major "
202 				    "device number.");
203 				return (-1);
204 			}
205 			mutex_enter(&zfs_dev_mtx);
206 			zfs_major = new_major;
207 			zfs_minor = 0;
208 
209 			mutex_exit(&zfs_dev_mtx);
210 		} else {
211 			break;
212 		}
213 		/* CONSTANTCONDITION */
214 	} while (1);
215 
216 	return (0);
217 }
218 
219 static void
220 atime_changed_cb(void *arg, uint64_t newval)
221 {
222 	zfsvfs_t *zfsvfs = arg;
223 
224 	if (newval == TRUE) {
225 		zfsvfs->z_atime = TRUE;
226 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
227 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
228 	} else {
229 		zfsvfs->z_atime = FALSE;
230 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
231 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
232 	}
233 }
234 
235 static void
236 xattr_changed_cb(void *arg, uint64_t newval)
237 {
238 	zfsvfs_t *zfsvfs = arg;
239 
240 	if (newval == TRUE) {
241 		/* XXX locking on vfs_flag? */
242 		zfsvfs->z_vfs->vfs_flag |= VFS_XATTR;
243 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR);
244 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_XATTR, NULL, 0);
245 	} else {
246 		/* XXX locking on vfs_flag? */
247 		zfsvfs->z_vfs->vfs_flag &= ~VFS_XATTR;
248 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_XATTR);
249 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR, NULL, 0);
250 	}
251 }
252 
253 static void
254 blksz_changed_cb(void *arg, uint64_t newval)
255 {
256 	zfsvfs_t *zfsvfs = arg;
257 
258 	if (newval < SPA_MINBLOCKSIZE ||
259 	    newval > SPA_MAXBLOCKSIZE || !ISP2(newval))
260 		newval = SPA_MAXBLOCKSIZE;
261 
262 	zfsvfs->z_max_blksz = newval;
263 	zfsvfs->z_vfs->vfs_bsize = newval;
264 }
265 
266 static void
267 readonly_changed_cb(void *arg, uint64_t newval)
268 {
269 	zfsvfs_t *zfsvfs = arg;
270 
271 	if (newval) {
272 		/* XXX locking on vfs_flag? */
273 		zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
274 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
275 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
276 	} else {
277 		/* XXX locking on vfs_flag? */
278 		zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
279 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
280 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
281 	}
282 }
283 
284 static void
285 devices_changed_cb(void *arg, uint64_t newval)
286 {
287 	zfsvfs_t *zfsvfs = arg;
288 
289 	if (newval == FALSE) {
290 		zfsvfs->z_vfs->vfs_flag |= VFS_NODEVICES;
291 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES);
292 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES, NULL, 0);
293 	} else {
294 		zfsvfs->z_vfs->vfs_flag &= ~VFS_NODEVICES;
295 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NODEVICES);
296 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_DEVICES, NULL, 0);
297 	}
298 }
299 
300 static void
301 setuid_changed_cb(void *arg, uint64_t newval)
302 {
303 	zfsvfs_t *zfsvfs = arg;
304 
305 	if (newval == FALSE) {
306 		zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
307 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
308 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
309 	} else {
310 		zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
311 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
312 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
313 	}
314 }
315 
316 static void
317 exec_changed_cb(void *arg, uint64_t newval)
318 {
319 	zfsvfs_t *zfsvfs = arg;
320 
321 	if (newval == FALSE) {
322 		zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
323 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
324 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
325 	} else {
326 		zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
327 		vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
328 		vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
329 	}
330 }
331 
332 static void
333 snapdir_changed_cb(void *arg, uint64_t newval)
334 {
335 	zfsvfs_t *zfsvfs = arg;
336 
337 	zfsvfs->z_show_ctldir = newval;
338 }
339 
340 static void
341 acl_mode_changed_cb(void *arg, uint64_t newval)
342 {
343 	zfsvfs_t *zfsvfs = arg;
344 
345 	zfsvfs->z_acl_mode = newval;
346 }
347 
348 static void
349 acl_inherit_changed_cb(void *arg, uint64_t newval)
350 {
351 	zfsvfs_t *zfsvfs = arg;
352 
353 	zfsvfs->z_acl_inherit = newval;
354 }
355 
356 static int
357 zfs_register_callbacks(vfs_t *vfsp)
358 {
359 	struct dsl_dataset *ds = NULL;
360 	objset_t *os = NULL;
361 	zfsvfs_t *zfsvfs = NULL;
362 	int readonly, do_readonly = FALSE;
363 	int setuid, do_setuid = FALSE;
364 	int exec, do_exec = FALSE;
365 	int devices, do_devices = FALSE;
366 	int xattr, do_xattr = FALSE;
367 	int atime, do_atime = FALSE;
368 	int error = 0;
369 
370 	ASSERT(vfsp);
371 	zfsvfs = vfsp->vfs_data;
372 	ASSERT(zfsvfs);
373 	os = zfsvfs->z_os;
374 
375 	/*
376 	 * The act of registering our callbacks will destroy any mount
377 	 * options we may have.  In order to enable temporary overrides
378 	 * of mount options, we stash away the current values and
379 	 * restore them after we register the callbacks.
380 	 */
381 	if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) {
382 		readonly = B_TRUE;
383 		do_readonly = B_TRUE;
384 	} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
385 		readonly = B_FALSE;
386 		do_readonly = B_TRUE;
387 	}
388 	if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
389 		devices = B_FALSE;
390 		setuid = B_FALSE;
391 		do_devices = B_TRUE;
392 		do_setuid = B_TRUE;
393 	} else {
394 		if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) {
395 			devices = B_FALSE;
396 			do_devices = B_TRUE;
397 		} else if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL)) {
398 			devices = B_TRUE;
399 			do_devices = B_TRUE;
400 		}
401 
402 		if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
403 			setuid = B_FALSE;
404 			do_setuid = B_TRUE;
405 		} else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
406 			setuid = B_TRUE;
407 			do_setuid = B_TRUE;
408 		}
409 	}
410 	if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
411 		exec = B_FALSE;
412 		do_exec = B_TRUE;
413 	} else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
414 		exec = B_TRUE;
415 		do_exec = B_TRUE;
416 	}
417 	if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
418 		xattr = B_FALSE;
419 		do_xattr = B_TRUE;
420 	} else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
421 		xattr = B_TRUE;
422 		do_xattr = B_TRUE;
423 	}
424 	if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
425 		atime = B_FALSE;
426 		do_atime = B_TRUE;
427 	} else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
428 		atime = B_TRUE;
429 		do_atime = B_TRUE;
430 	}
431 
432 	/*
433 	 * Register property callbacks.
434 	 *
435 	 * It would probably be fine to just check for i/o error from
436 	 * the first prop_register(), but I guess I like to go
437 	 * overboard...
438 	 */
439 	ds = dmu_objset_ds(os);
440 	error = dsl_prop_register(ds, "atime", atime_changed_cb, zfsvfs);
441 	error = error ? error : dsl_prop_register(ds,
442 	    "xattr", xattr_changed_cb, zfsvfs);
443 	error = error ? error : dsl_prop_register(ds,
444 	    "recordsize", blksz_changed_cb, zfsvfs);
445 	error = error ? error : dsl_prop_register(ds,
446 	    "readonly", readonly_changed_cb, zfsvfs);
447 	error = error ? error : dsl_prop_register(ds,
448 	    "devices", devices_changed_cb, zfsvfs);
449 	error = error ? error : dsl_prop_register(ds,
450 	    "setuid", setuid_changed_cb, zfsvfs);
451 	error = error ? error : dsl_prop_register(ds,
452 	    "exec", exec_changed_cb, zfsvfs);
453 	error = error ? error : dsl_prop_register(ds,
454 	    "snapdir", snapdir_changed_cb, zfsvfs);
455 	error = error ? error : dsl_prop_register(ds,
456 	    "aclmode", acl_mode_changed_cb, zfsvfs);
457 	error = error ? error : dsl_prop_register(ds,
458 	    "aclinherit", acl_inherit_changed_cb, zfsvfs);
459 	if (error)
460 		goto unregister;
461 
462 	/*
463 	 * Invoke our callbacks to restore temporary mount options.
464 	 */
465 	if (do_readonly)
466 		readonly_changed_cb(zfsvfs, readonly);
467 	if (do_setuid)
468 		setuid_changed_cb(zfsvfs, setuid);
469 	if (do_exec)
470 		exec_changed_cb(zfsvfs, exec);
471 	if (do_devices)
472 		devices_changed_cb(zfsvfs, devices);
473 	if (do_xattr)
474 		xattr_changed_cb(zfsvfs, xattr);
475 	if (do_atime)
476 		atime_changed_cb(zfsvfs, atime);
477 
478 	return (0);
479 
480 unregister:
481 	/*
482 	 * We may attempt to unregister some callbacks that are not
483 	 * registered, but this is OK; it will simply return ENOMSG,
484 	 * which we will ignore.
485 	 */
486 	(void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zfsvfs);
487 	(void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zfsvfs);
488 	(void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zfsvfs);
489 	(void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zfsvfs);
490 	(void) dsl_prop_unregister(ds, "devices", devices_changed_cb, zfsvfs);
491 	(void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs);
492 	(void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs);
493 	(void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs);
494 	(void) dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb, zfsvfs);
495 	(void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
496 	    zfsvfs);
497 	return (error);
498 
499 }
500 
501 static int
502 zfs_domount(vfs_t *vfsp, char *osname, cred_t *cr)
503 {
504 	dev_t mount_dev;
505 	uint64_t recordsize, readonly;
506 	int error = 0;
507 	int mode;
508 	zfsvfs_t *zfsvfs;
509 	znode_t *zp = NULL;
510 
511 	ASSERT(vfsp);
512 	ASSERT(osname);
513 
514 	/*
515 	 * Initialize the zfs-specific filesystem structure.
516 	 * Should probably make this a kmem cache, shuffle fields,
517 	 * and just bzero up to z_hold_mtx[].
518 	 */
519 	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
520 	zfsvfs->z_vfs = vfsp;
521 	zfsvfs->z_parent = zfsvfs;
522 	zfsvfs->z_assign = TXG_NOWAIT;
523 	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
524 	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
525 
526 	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
527 	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
528 	    offsetof(znode_t, z_link_node));
529 	rw_init(&zfsvfs->z_um_lock, NULL, RW_DEFAULT, NULL);
530 
531 	/* Initialize the generic filesystem structure. */
532 	vfsp->vfs_bcount = 0;
533 	vfsp->vfs_data = NULL;
534 
535 	if (zfs_create_unique_device(&mount_dev) == -1) {
536 		error = ENODEV;
537 		goto out;
538 	}
539 	ASSERT(vfs_devismounted(mount_dev) == 0);
540 
541 	if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
542 	    NULL))
543 		goto out;
544 
545 	vfsp->vfs_dev = mount_dev;
546 	vfsp->vfs_fstype = zfsfstype;
547 	vfsp->vfs_bsize = recordsize;
548 	vfsp->vfs_flag |= VFS_NOTRUNC;
549 	vfsp->vfs_data = zfsvfs;
550 
551 	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
552 		goto out;
553 
554 	if (readonly)
555 		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
556 	else
557 		mode = DS_MODE_PRIMARY;
558 
559 	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
560 	if (error == EROFS) {
561 		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
562 		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
563 		    &zfsvfs->z_os);
564 	}
565 
566 	if (error)
567 		goto out;
568 
569 	if (error = zfs_init_fs(zfsvfs, &zp, cr))
570 		goto out;
571 
572 	/* The call to zfs_init_fs leaves the vnode held, release it here. */
573 	VN_RELE(ZTOV(zp));
574 
575 	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
576 		uint64_t xattr;
577 
578 		ASSERT(mode & DS_MODE_READONLY);
579 		atime_changed_cb(zfsvfs, B_FALSE);
580 		readonly_changed_cb(zfsvfs, B_TRUE);
581 		if (error = dsl_prop_get_integer(osname, "xattr", &xattr, NULL))
582 			goto out;
583 		xattr_changed_cb(zfsvfs, xattr);
584 		zfsvfs->z_issnap = B_TRUE;
585 	} else {
586 		error = zfs_register_callbacks(vfsp);
587 		if (error)
588 			goto out;
589 
590 		if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY))
591 			zfs_unlinked_drain(zfsvfs);
592 
593 		/*
594 		 * Parse and replay the intent log.
595 		 *
596 		 * Because of ziltest, this must be done after
597 		 * zfs_unlinked_drain().  (Further note: ziltest doesn't
598 		 * use readonly mounts, where zfs_unlinked_drain() isn't
599 		 * called.)  This is because ziltest causes spa_sync()
600 		 * to think it's committed, but actually it is not, so
601 		 * the intent log contains many txg's worth of changes.
602 		 *
603 		 * In particular, if object N is in the unlinked set in
604 		 * the last txg to actually sync, then it could be
605 		 * actually freed in a later txg and then reallocated in
606 		 * a yet later txg.  This would write a "create object
607 		 * N" record to the intent log.  Normally, this would be
608 		 * fine because the spa_sync() would have written out
609 		 * the fact that object N is free, before we could write
610 		 * the "create object N" intent log record.
611 		 *
612 		 * But when we are in ziltest mode, we advance the "open
613 		 * txg" without actually spa_sync()-ing the changes to
614 		 * disk.  So we would see that object N is still
615 		 * allocated and in the unlinked set, and there is an
616 		 * intent log record saying to allocate it.
617 		 */
618 		zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
619 		    zfs_replay_vector);
620 
621 		if (!zil_disable)
622 			zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
623 	}
624 
625 	if (!zfsvfs->z_issnap)
626 		zfsctl_create(zfsvfs);
627 out:
628 	if (error) {
629 		if (zfsvfs->z_os)
630 			dmu_objset_close(zfsvfs->z_os);
631 		kmem_free(zfsvfs, sizeof (zfsvfs_t));
632 	} else {
633 		atomic_add_32(&zfs_active_fs_count, 1);
634 	}
635 
636 	return (error);
637 }
638 
639 void
640 zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
641 {
642 	objset_t *os = zfsvfs->z_os;
643 	struct dsl_dataset *ds;
644 
645 	/*
646 	 * Unregister properties.
647 	 */
648 	if (!dmu_objset_is_snapshot(os)) {
649 		ds = dmu_objset_ds(os);
650 		VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb,
651 		    zfsvfs) == 0);
652 
653 		VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb,
654 		    zfsvfs) == 0);
655 
656 		VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb,
657 		    zfsvfs) == 0);
658 
659 		VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb,
660 		    zfsvfs) == 0);
661 
662 		VERIFY(dsl_prop_unregister(ds, "devices", devices_changed_cb,
663 		    zfsvfs) == 0);
664 
665 		VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb,
666 		    zfsvfs) == 0);
667 
668 		VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb,
669 		    zfsvfs) == 0);
670 
671 		VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
672 		    zfsvfs) == 0);
673 
674 		VERIFY(dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb,
675 		    zfsvfs) == 0);
676 
677 		VERIFY(dsl_prop_unregister(ds, "aclinherit",
678 		    acl_inherit_changed_cb, zfsvfs) == 0);
679 	}
680 }
681 
682 /*
683  * Convert a decimal digit string to a uint64_t integer.
684  */
685 static int
686 str_to_uint64(char *str, uint64_t *objnum)
687 {
688 	uint64_t num = 0;
689 
690 	while (*str) {
691 		if (*str < '0' || *str > '9')
692 			return (EINVAL);
693 
694 		num = num*10 + *str++ - '0';
695 	}
696 
697 	*objnum = num;
698 	return (0);
699 }
700 
701 /*
702  * The boot path passed from the boot loader is in the form of
703  * "rootpool-name/root-filesystem-object-number'. Convert this
704  * string to a dataset name: "rootpool-name/root-filesystem-name".
705  */
706 static int
707 parse_bootpath(char *bpath, char *outpath)
708 {
709 	char *slashp;
710 	uint64_t objnum;
711 	int error;
712 
713 	if (*bpath == 0 || *bpath == '/')
714 		return (EINVAL);
715 
716 	slashp = strchr(bpath, '/');
717 
718 	/* if no '/', just return the pool name */
719 	if (slashp == NULL) {
720 		(void) strcpy(outpath, bpath);
721 		return (0);
722 	}
723 
724 	if (error = str_to_uint64(slashp+1, &objnum))
725 		return (error);
726 
727 	*slashp = '\0';
728 	error = dsl_dsobj_to_dsname(bpath, objnum, outpath);
729 	*slashp = '/';
730 
731 	return (error);
732 }
733 
734 static int
735 zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
736 {
737 	int error = 0;
738 	int ret = 0;
739 	static int zfsrootdone = 0;
740 	zfsvfs_t *zfsvfs = NULL;
741 	znode_t *zp = NULL;
742 	vnode_t *vp = NULL;
743 	char *zfs_bootpath;
744 
745 	ASSERT(vfsp);
746 
747 	/*
748 	 * The filesystem that we mount as root is defined in the
749 	 * "zfs-bootfs" property.
750 	 */
751 	if (why == ROOT_INIT) {
752 		if (zfsrootdone++)
753 			return (EBUSY);
754 
755 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
756 		    DDI_PROP_DONTPASS, "zfs-bootfs", &zfs_bootpath) !=
757 		    DDI_SUCCESS)
758 			return (EIO);
759 
760 		error = parse_bootpath(zfs_bootpath, rootfs.bo_name);
761 		ddi_prop_free(zfs_bootpath);
762 
763 		if (error)
764 			return (error);
765 
766 		if (error = vfs_lock(vfsp))
767 			return (error);
768 
769 		if (error = zfs_domount(vfsp, rootfs.bo_name, CRED()))
770 			goto out;
771 
772 		zfsvfs = (zfsvfs_t *)vfsp->vfs_data;
773 		ASSERT(zfsvfs);
774 		if (error = zfs_zget(zfsvfs, zfsvfs->z_root, &zp))
775 			goto out;
776 
777 		vp = ZTOV(zp);
778 		mutex_enter(&vp->v_lock);
779 		vp->v_flag |= VROOT;
780 		mutex_exit(&vp->v_lock);
781 		rootvp = vp;
782 
783 		/*
784 		 * The zfs_zget call above returns with a hold on vp, we release
785 		 * it here.
786 		 */
787 		VN_RELE(vp);
788 
789 		/*
790 		 * Mount root as readonly initially, it will be remouted
791 		 * read/write by /lib/svc/method/fs-usr.
792 		 */
793 		readonly_changed_cb(vfsp->vfs_data, B_TRUE);
794 		vfs_add((struct vnode *)0, vfsp,
795 		    (vfsp->vfs_flag & VFS_RDONLY) ? MS_RDONLY : 0);
796 out:
797 		vfs_unlock(vfsp);
798 		ret = (error) ? error : 0;
799 		return (ret);
800 	} else if (why == ROOT_REMOUNT) {
801 		readonly_changed_cb(vfsp->vfs_data, B_FALSE);
802 		vfsp->vfs_flag |= VFS_REMOUNT;
803 
804 		/* refresh mount options */
805 		zfs_unregister_callbacks(vfsp->vfs_data);
806 		return (zfs_register_callbacks(vfsp));
807 
808 	} else if (why == ROOT_UNMOUNT) {
809 		zfs_unregister_callbacks((zfsvfs_t *)vfsp->vfs_data);
810 		(void) zfs_sync(vfsp, 0, 0);
811 		return (0);
812 	}
813 
814 	/*
815 	 * if "why" is equal to anything else other than ROOT_INIT,
816 	 * ROOT_REMOUNT, or ROOT_UNMOUNT, we do not support it.
817 	 */
818 	return (ENOTSUP);
819 }
820 
821 /*ARGSUSED*/
822 static int
823 zfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
824 {
825 	char		*osname;
826 	pathname_t	spn;
827 	int		error = 0;
828 	uio_seg_t	fromspace = (uap->flags & MS_SYSSPACE) ?
829 	    UIO_SYSSPACE : UIO_USERSPACE;
830 	int		canwrite;
831 
832 	if (mvp->v_type != VDIR)
833 		return (ENOTDIR);
834 
835 	mutex_enter(&mvp->v_lock);
836 	if ((uap->flags & MS_REMOUNT) == 0 &&
837 	    (uap->flags & MS_OVERLAY) == 0 &&
838 	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
839 		mutex_exit(&mvp->v_lock);
840 		return (EBUSY);
841 	}
842 	mutex_exit(&mvp->v_lock);
843 
844 	/*
845 	 * ZFS does not support passing unparsed data in via MS_DATA.
846 	 * Users should use the MS_OPTIONSTR interface; this means
847 	 * that all option parsing is already done and the options struct
848 	 * can be interrogated.
849 	 */
850 	if ((uap->flags & MS_DATA) && uap->datalen > 0)
851 		return (EINVAL);
852 
853 	/*
854 	 * Get the objset name (the "special" mount argument).
855 	 */
856 	if (error = pn_get(uap->spec, fromspace, &spn))
857 		return (error);
858 
859 	osname = spn.pn_path;
860 
861 	/*
862 	 * Check for mount privilege?
863 	 *
864 	 * If we don't have privilege then see if
865 	 * we have local permission to allow it
866 	 */
867 	error = secpolicy_fs_mount(cr, mvp, vfsp);
868 	if (error) {
869 		error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr);
870 		if (error == 0) {
871 			vattr_t		vattr;
872 
873 			/*
874 			 * Make sure user is the owner of the mount point
875 			 * or has sufficient privileges.
876 			 */
877 
878 			vattr.va_mask = AT_UID;
879 
880 			if (error = VOP_GETATTR(mvp, &vattr, 0, cr)) {
881 				goto out;
882 			}
883 
884 			if (error = secpolicy_vnode_owner(cr, vattr.va_uid)) {
885 				goto out;
886 			}
887 
888 			if (error = VOP_ACCESS(mvp, VWRITE, 0, cr)) {
889 				goto out;
890 			}
891 
892 			secpolicy_fs_mount_clearopts(cr, vfsp);
893 		} else {
894 			goto out;
895 		}
896 	}
897 
898 	/*
899 	 * Refuse to mount a filesystem if we are in a local zone and the
900 	 * dataset is not visible.
901 	 */
902 	if (!INGLOBALZONE(curproc) &&
903 	    (!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
904 		error = EPERM;
905 		goto out;
906 	}
907 
908 	/*
909 	 * When doing a remount, we simply refresh our temporary properties
910 	 * according to those options set in the current VFS options.
911 	 */
912 	if (uap->flags & MS_REMOUNT) {
913 		/* refresh mount options */
914 		zfs_unregister_callbacks(vfsp->vfs_data);
915 		error = zfs_register_callbacks(vfsp);
916 		goto out;
917 	}
918 
919 	error = zfs_domount(vfsp, osname, cr);
920 
921 out:
922 	pn_free(&spn);
923 	return (error);
924 }
925 
926 static int
927 zfs_statvfs(vfs_t *vfsp, struct statvfs64 *statp)
928 {
929 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
930 	dev32_t d32;
931 	uint64_t refdbytes, availbytes, usedobjs, availobjs;
932 
933 	ZFS_ENTER(zfsvfs);
934 
935 	dmu_objset_space(zfsvfs->z_os,
936 	    &refdbytes, &availbytes, &usedobjs, &availobjs);
937 
938 	/*
939 	 * The underlying storage pool actually uses multiple block sizes.
940 	 * We report the fragsize as the smallest block size we support,
941 	 * and we report our blocksize as the filesystem's maximum blocksize.
942 	 */
943 	statp->f_frsize = 1UL << SPA_MINBLOCKSHIFT;
944 	statp->f_bsize = zfsvfs->z_max_blksz;
945 
946 	/*
947 	 * The following report "total" blocks of various kinds in the
948 	 * file system, but reported in terms of f_frsize - the
949 	 * "fragment" size.
950 	 */
951 
952 	statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
953 	statp->f_bfree = availbytes >> SPA_MINBLOCKSHIFT;
954 	statp->f_bavail = statp->f_bfree; /* no root reservation */
955 
956 	/*
957 	 * statvfs() should really be called statufs(), because it assumes
958 	 * static metadata.  ZFS doesn't preallocate files, so the best
959 	 * we can do is report the max that could possibly fit in f_files,
960 	 * and that minus the number actually used in f_ffree.
961 	 * For f_ffree, report the smaller of the number of object available
962 	 * and the number of blocks (each object will take at least a block).
963 	 */
964 	statp->f_ffree = MIN(availobjs, statp->f_bfree);
965 	statp->f_favail = statp->f_ffree;	/* no "root reservation" */
966 	statp->f_files = statp->f_ffree + usedobjs;
967 
968 	(void) cmpldev(&d32, vfsp->vfs_dev);
969 	statp->f_fsid = d32;
970 
971 	/*
972 	 * We're a zfs filesystem.
973 	 */
974 	(void) strcpy(statp->f_basetype, vfssw[vfsp->vfs_fstype].vsw_name);
975 
976 	statp->f_flag = vf_to_stf(vfsp->vfs_flag);
977 
978 	statp->f_namemax = ZFS_MAXNAMELEN;
979 
980 	/*
981 	 * We have all of 32 characters to stuff a string here.
982 	 * Is there anything useful we could/should provide?
983 	 */
984 	bzero(statp->f_fstr, sizeof (statp->f_fstr));
985 
986 	ZFS_EXIT(zfsvfs);
987 	return (0);
988 }
989 
990 static int
991 zfs_root(vfs_t *vfsp, vnode_t **vpp)
992 {
993 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
994 	znode_t *rootzp;
995 	int error;
996 
997 	ZFS_ENTER(zfsvfs);
998 
999 	error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
1000 	if (error == 0)
1001 		*vpp = ZTOV(rootzp);
1002 
1003 	ZFS_EXIT(zfsvfs);
1004 	return (error);
1005 }
1006 
1007 /*ARGSUSED*/
1008 static int
1009 zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
1010 {
1011 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1012 	int ret;
1013 
1014 	ret = secpolicy_fs_unmount(cr, vfsp);
1015 	if (ret) {
1016 		ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
1017 		    ZFS_DELEG_PERM_MOUNT, cr);
1018 		if (ret)
1019 			return (ret);
1020 	}
1021 
1022 	/*
1023 	 * We purge the parent filesystem's vfsp as the parent filesystem
1024 	 * and all of its snapshots have their vnode's v_vfsp set to the
1025 	 * parent's filesystem's vfsp.  Note, 'z_parent' is self
1026 	 * referential for non-snapshots.
1027 	 */
1028 	(void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
1029 
1030 	/*
1031 	 * Unmount any snapshots mounted under .zfs before unmounting the
1032 	 * dataset itself.
1033 	 */
1034 	if (zfsvfs->z_ctldir != NULL &&
1035 	    (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) {
1036 		return (ret);
1037 	}
1038 
1039 	if (fflag & MS_FORCE) {
1040 		vfsp->vfs_flag |= VFS_UNMOUNTED;
1041 		zfsvfs->z_unmounted1 = B_TRUE;
1042 
1043 		/*
1044 		 * Ensure that z_unmounted1 reaches global visibility
1045 		 * before z_op_cnt.
1046 		 */
1047 		membar_producer();
1048 
1049 		/*
1050 		 * Wait for all zfs threads to leave zfs.
1051 		 * Grabbing a rwlock as reader in all vops and
1052 		 * as writer here doesn't work because it too easy to get
1053 		 * multiple reader enters as zfs can re-enter itself.
1054 		 * This can lead to deadlock if there is an intervening
1055 		 * rw_enter as writer.
1056 		 * So a file system threads ref count (z_op_cnt) is used.
1057 		 * A polling loop on z_op_cnt may seem inefficient, but
1058 		 * - this saves all threads on exit from having to grab a
1059 		 *   mutex in order to cv_signal
1060 		 * - only occurs on forced unmount in the rare case when
1061 		 *   there are outstanding threads within the file system.
1062 		 */
1063 		while (zfsvfs->z_op_cnt) {
1064 			delay(1);
1065 		}
1066 
1067 		zfs_objset_close(zfsvfs);
1068 
1069 		return (0);
1070 	}
1071 	/*
1072 	 * Check the number of active vnodes in the file system.
1073 	 * Our count is maintained in the vfs structure, but the number
1074 	 * is off by 1 to indicate a hold on the vfs structure itself.
1075 	 *
1076 	 * The '.zfs' directory maintains a reference of its own, and any active
1077 	 * references underneath are reflected in the vnode count.
1078 	 */
1079 	if (zfsvfs->z_ctldir == NULL) {
1080 		if (vfsp->vfs_count > 1)
1081 			return (EBUSY);
1082 	} else {
1083 		if (vfsp->vfs_count > 2 ||
1084 		    (zfsvfs->z_ctldir->v_count > 1 && !(fflag & MS_FORCE))) {
1085 			return (EBUSY);
1086 		}
1087 	}
1088 
1089 	vfsp->vfs_flag |= VFS_UNMOUNTED;
1090 	zfs_objset_close(zfsvfs);
1091 
1092 	return (0);
1093 }
1094 
1095 static int
1096 zfs_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp)
1097 {
1098 	zfsvfs_t	*zfsvfs = vfsp->vfs_data;
1099 	znode_t		*zp;
1100 	uint64_t	object = 0;
1101 	uint64_t	fid_gen = 0;
1102 	uint64_t	gen_mask;
1103 	uint64_t	zp_gen;
1104 	int 		i, err;
1105 
1106 	*vpp = NULL;
1107 
1108 	ZFS_ENTER(zfsvfs);
1109 
1110 	if (fidp->fid_len == LONG_FID_LEN) {
1111 		zfid_long_t	*zlfid = (zfid_long_t *)fidp;
1112 		uint64_t	objsetid = 0;
1113 		uint64_t	setgen = 0;
1114 
1115 		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1116 			objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1117 
1118 		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1119 			setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1120 
1121 		ZFS_EXIT(zfsvfs);
1122 
1123 		err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
1124 		if (err)
1125 			return (EINVAL);
1126 		ZFS_ENTER(zfsvfs);
1127 	}
1128 
1129 	if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1130 		zfid_short_t	*zfid = (zfid_short_t *)fidp;
1131 
1132 		for (i = 0; i < sizeof (zfid->zf_object); i++)
1133 			object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1134 
1135 		for (i = 0; i < sizeof (zfid->zf_gen); i++)
1136 			fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1137 	} else {
1138 		ZFS_EXIT(zfsvfs);
1139 		return (EINVAL);
1140 	}
1141 
1142 	/* A zero fid_gen means we are in the .zfs control directories */
1143 	if (fid_gen == 0 &&
1144 	    (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1145 		*vpp = zfsvfs->z_ctldir;
1146 		ASSERT(*vpp != NULL);
1147 		if (object == ZFSCTL_INO_SNAPDIR) {
1148 			VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL,
1149 			    0, NULL, NULL) == 0);
1150 		} else {
1151 			VN_HOLD(*vpp);
1152 		}
1153 		ZFS_EXIT(zfsvfs);
1154 		return (0);
1155 	}
1156 
1157 	gen_mask = -1ULL >> (64 - 8 * i);
1158 
1159 	dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
1160 	if (err = zfs_zget(zfsvfs, object, &zp)) {
1161 		ZFS_EXIT(zfsvfs);
1162 		return (err);
1163 	}
1164 	zp_gen = zp->z_phys->zp_gen & gen_mask;
1165 	if (zp_gen == 0)
1166 		zp_gen = 1;
1167 	if (zp->z_unlinked || zp_gen != fid_gen) {
1168 		dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
1169 		VN_RELE(ZTOV(zp));
1170 		ZFS_EXIT(zfsvfs);
1171 		return (EINVAL);
1172 	}
1173 
1174 	*vpp = ZTOV(zp);
1175 	ZFS_EXIT(zfsvfs);
1176 	return (0);
1177 }
1178 
1179 static void
1180 zfs_objset_close(zfsvfs_t *zfsvfs)
1181 {
1182 	znode_t		*zp, *nextzp;
1183 	objset_t	*os = zfsvfs->z_os;
1184 
1185 	/*
1186 	 * For forced unmount, at this point all vops except zfs_inactive
1187 	 * are erroring EIO. We need to now suspend zfs_inactive threads
1188 	 * while we are freeing dbufs before switching zfs_inactive
1189 	 * to use behaviour without a objset.
1190 	 */
1191 	rw_enter(&zfsvfs->z_um_lock, RW_WRITER);
1192 
1193 	/*
1194 	 * Release all holds on dbufs
1195 	 * Note, although we have stopped all other vop threads and
1196 	 * zfs_inactive(), the dmu can callback via znode_pageout_func()
1197 	 * which can zfs_znode_free() the znode.
1198 	 * So we lock z_all_znodes; search the list for a held
1199 	 * dbuf; drop the lock (we know zp can't disappear if we hold
1200 	 * a dbuf lock; then regrab the lock and restart.
1201 	 */
1202 	mutex_enter(&zfsvfs->z_znodes_lock);
1203 	for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) {
1204 		nextzp = list_next(&zfsvfs->z_all_znodes, zp);
1205 		if (zp->z_dbuf_held) {
1206 			/* dbufs should only be held when force unmounting */
1207 			zp->z_dbuf_held = 0;
1208 			mutex_exit(&zfsvfs->z_znodes_lock);
1209 			dmu_buf_rele(zp->z_dbuf, NULL);
1210 			/* Start again */
1211 			mutex_enter(&zfsvfs->z_znodes_lock);
1212 			nextzp = list_head(&zfsvfs->z_all_znodes);
1213 		}
1214 	}
1215 	mutex_exit(&zfsvfs->z_znodes_lock);
1216 
1217 	/*
1218 	 * Unregister properties.
1219 	 */
1220 	if (!dmu_objset_is_snapshot(os))
1221 		zfs_unregister_callbacks(zfsvfs);
1222 
1223 	/*
1224 	 * Switch zfs_inactive to behaviour without an objset.
1225 	 * It just tosses cached pages and frees the znode & vnode.
1226 	 * Then re-enable zfs_inactive threads in that new behaviour.
1227 	 */
1228 	zfsvfs->z_unmounted2 = B_TRUE;
1229 	rw_exit(&zfsvfs->z_um_lock); /* re-enable any zfs_inactive threads */
1230 
1231 	/*
1232 	 * Close the zil. Can't close the zil while zfs_inactive
1233 	 * threads are blocked as zil_close can call zfs_inactive.
1234 	 */
1235 	if (zfsvfs->z_log) {
1236 		zil_close(zfsvfs->z_log);
1237 		zfsvfs->z_log = NULL;
1238 	}
1239 
1240 	/*
1241 	 * Evict all dbufs so that cached znodes will be freed
1242 	 */
1243 	if (dmu_objset_evict_dbufs(os, 1)) {
1244 		txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1245 		(void) dmu_objset_evict_dbufs(os, 0);
1246 	}
1247 
1248 	/*
1249 	 * Finally close the objset
1250 	 */
1251 	dmu_objset_close(os);
1252 
1253 	/*
1254 	 * We can now safely destroy the '.zfs' directory node.
1255 	 */
1256 	if (zfsvfs->z_ctldir != NULL)
1257 		zfsctl_destroy(zfsvfs);
1258 
1259 }
1260 
1261 static void
1262 zfs_freevfs(vfs_t *vfsp)
1263 {
1264 	zfsvfs_t *zfsvfs = vfsp->vfs_data;
1265 
1266 	kmem_free(zfsvfs, sizeof (zfsvfs_t));
1267 
1268 	atomic_add_32(&zfs_active_fs_count, -1);
1269 }
1270 
1271 /*
1272  * VFS_INIT() initialization.  Note that there is no VFS_FINI(),
1273  * so we can't safely do any non-idempotent initialization here.
1274  * Leave that to zfs_init() and zfs_fini(), which are called
1275  * from the module's _init() and _fini() entry points.
1276  */
1277 /*ARGSUSED*/
1278 static int
1279 zfs_vfsinit(int fstype, char *name)
1280 {
1281 	int error;
1282 
1283 	zfsfstype = fstype;
1284 
1285 	/*
1286 	 * Setup vfsops and vnodeops tables.
1287 	 */
1288 	error = vfs_setfsops(fstype, zfs_vfsops_template, &zfs_vfsops);
1289 	if (error != 0) {
1290 		cmn_err(CE_WARN, "zfs: bad vfs ops template");
1291 	}
1292 
1293 	error = zfs_create_op_tables();
1294 	if (error) {
1295 		zfs_remove_op_tables();
1296 		cmn_err(CE_WARN, "zfs: bad vnode ops template");
1297 		(void) vfs_freevfsops_by_type(zfsfstype);
1298 		return (error);
1299 	}
1300 
1301 	mutex_init(&zfs_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
1302 
1303 	/*
1304 	 * Unique major number for all zfs mounts.
1305 	 * If we run out of 32-bit minors, we'll getudev() another major.
1306 	 */
1307 	zfs_major = ddi_name_to_major(ZFS_DRIVER);
1308 	zfs_minor = ZFS_MIN_MINOR;
1309 
1310 	return (0);
1311 }
1312 
1313 void
1314 zfs_init(void)
1315 {
1316 	/*
1317 	 * Initialize .zfs directory structures
1318 	 */
1319 	zfsctl_init();
1320 
1321 	/*
1322 	 * Initialize znode cache, vnode ops, etc...
1323 	 */
1324 	zfs_znode_init();
1325 }
1326 
1327 void
1328 zfs_fini(void)
1329 {
1330 	zfsctl_fini();
1331 	zfs_znode_fini();
1332 }
1333 
1334 int
1335 zfs_busy(void)
1336 {
1337 	return (zfs_active_fs_count != 0);
1338 }
1339 
1340 int
1341 zfs_get_stats(objset_t *os, nvlist_t *nv)
1342 {
1343 	int error;
1344 	uint64_t val;
1345 
1346 	error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &val);
1347 	if (error == 0)
1348 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VERSION, val);
1349 
1350 	return (error);
1351 }
1352 
1353 int
1354 zfs_set_version(const char *name, uint64_t newvers)
1355 {
1356 	int error;
1357 	objset_t *os;
1358 	dmu_tx_t *tx;
1359 	uint64_t curvers;
1360 
1361 	/*
1362 	 * XXX for now, require that the filesystem be unmounted.  Would
1363 	 * be nice to find the zfsvfs_t and just update that if
1364 	 * possible.
1365 	 */
1366 
1367 	if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
1368 		return (EINVAL);
1369 
1370 	error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_PRIMARY, &os);
1371 	if (error)
1372 		return (error);
1373 
1374 	error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1375 	    8, 1, &curvers);
1376 	if (error)
1377 		goto out;
1378 	if (newvers < curvers) {
1379 		error = EINVAL;
1380 		goto out;
1381 	}
1382 
1383 	tx = dmu_tx_create(os);
1384 	dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR);
1385 	error = dmu_tx_assign(tx, TXG_WAIT);
1386 	if (error) {
1387 		dmu_tx_abort(tx);
1388 		goto out;
1389 	}
1390 	error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1,
1391 	    &newvers, tx);
1392 
1393 	spa_history_internal_log(LOG_DS_UPGRADE,
1394 	    dmu_objset_spa(os), tx, CRED(),
1395 	    "oldver=%llu newver=%llu dataset = %llu", curvers, newvers,
1396 	    dmu_objset_id(os));
1397 	dmu_tx_commit(tx);
1398 
1399 out:
1400 	dmu_objset_close(os);
1401 	return (error);
1402 }
1403 
1404 static vfsdef_t vfw = {
1405 	VFSDEF_VERSION,
1406 	MNTTYPE_ZFS,
1407 	zfs_vfsinit,
1408 	VSW_HASPROTO|VSW_CANRWRO|VSW_CANREMOUNT|VSW_VOLATILEDEV|VSW_STATS,
1409 	&zfs_mntopts
1410 };
1411 
1412 struct modlfs zfs_modlfs = {
1413 	&mod_fsops, "ZFS filesystem version " SPA_VERSION_STRING, &vfw
1414 };
1415