xref: /freebsd/sys/ufs/ffs/ffs_vfsops.c (revision 9207b4cff7b8d483f4dd3c62266c2b58819eb7f9)
1 /*
2  * Copyright (c) 1989, 1991, 1993, 1994
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
34  * $FreeBSD$
35  */
36 
37 #include "opt_quota.h"
38 #include "opt_ufs.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/namei.h>
43 #include <sys/proc.h>
44 #include <sys/kernel.h>
45 #include <sys/vnode.h>
46 #include <sys/mount.h>
47 #include <sys/bio.h>
48 #include <sys/buf.h>
49 #include <sys/conf.h>
50 #include <sys/fcntl.h>
51 #include <sys/disklabel.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
54 
55 #include <ufs/ufs/extattr.h>
56 #include <ufs/ufs/quota.h>
57 #include <ufs/ufs/ufsmount.h>
58 #include <ufs/ufs/inode.h>
59 #include <ufs/ufs/ufs_extern.h>
60 
61 #include <ufs/ffs/fs.h>
62 #include <ufs/ffs/ffs_extern.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_page.h>
66 
67 static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
68 
69 static int	ffs_sbupdate __P((struct ufsmount *, int));
70 int	ffs_reload __P((struct mount *,struct ucred *,struct thread *));
71 static int	ffs_oldfscompat __P((struct fs *));
72 static int	ffs_init __P((struct vfsconf *));
73 
74 static struct vfsops ufs_vfsops = {
75 	ffs_mount,
76 	ufs_start,
77 	ffs_unmount,
78 	ufs_root,
79 	ufs_quotactl,
80 	ffs_statfs,
81 	ffs_sync,
82 	ffs_vget,
83 	ffs_fhtovp,
84 	vfs_stdcheckexp,
85 	ffs_vptofh,
86 	ffs_init,
87 	vfs_stduninit,
88 #ifdef UFS_EXTATTR
89 	ufs_extattrctl,
90 #else
91 	vfs_stdextattrctl,
92 #endif
93 };
94 
95 VFS_SET(ufs_vfsops, ufs, 0);
96 
97 /*
98  * ffs_mount
99  *
100  * Called when mounting local physical media
101  *
102  * PARAMETERS:
103  *		mountroot
104  *			mp	mount point structure
105  *			path	NULL (flag for root mount!!!)
106  *			data	<unused>
107  *			ndp	<unused>
108  *			p	process (user credentials check [statfs])
109  *
110  *		mount
111  *			mp	mount point structure
112  *			path	path to mount point
113  *			data	pointer to argument struct in user space
114  *			ndp	mount point namei() return (used for
115  *				credentials on reload), reused to look
116  *				up block device.
117  *			p	process (user credentials check)
118  *
119  * RETURNS:	0	Success
120  *		!0	error number (errno.h)
121  *
122  * LOCK STATE:
123  *
124  *		ENTRY
125  *			mount point is locked
126  *		EXIT
127  *			mount point is locked
128  *
129  * NOTES:
130  *		A NULL path can be used for a flag since the mount
131  *		system call will fail with EFAULT in copyinstr in
132  *		namei() if it is a genuine NULL from the user.
133  */
134 int
135 ffs_mount(mp, path, data, ndp, td)
136         struct mount		*mp;	/* mount struct pointer*/
137         char			*path;	/* path to mount point*/
138         caddr_t			data;	/* arguments to FS specific mount*/
139         struct nameidata	*ndp;	/* mount point credentials*/
140         struct thread		*td;	/* process requesting mount*/
141 {
142 	size_t		size;
143 	struct vnode	*devvp;
144 	struct ufs_args args;
145 	struct ufsmount *ump = 0;
146 	register struct fs *fs;
147 	int error, flags;
148 	mode_t accessmode;
149 
150 	/*
151 	 * Use NULL path to indicate we are mounting the root file system.
152 	 */
153 	if (path == NULL) {
154 		if ((error = bdevvp(rootdev, &rootvp))) {
155 			printf("ffs_mountroot: can't find rootvp\n");
156 			return (error);
157 		}
158 
159 		if ((error = ffs_mountfs(rootvp, mp, td, M_FFSNODE)) != 0)
160 			return (error);
161 
162 		(void)VFS_STATFS(mp, &mp->mnt_stat, td);
163 		return (0);
164 	}
165 
166 	/*
167 	 * Mounting non-root file system or updating a file system
168 	 */
169 	if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0)
170 		return (error);
171 
172 	/*
173 	 * If updating, check whether changing from read-only to
174 	 * read/write; if there is no device name, that's all we do.
175 	 */
176 	if (mp->mnt_flag & MNT_UPDATE) {
177 		ump = VFSTOUFS(mp);
178 		fs = ump->um_fs;
179 		devvp = ump->um_devvp;
180 		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
181 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
182 				return (error);
183 			flags = WRITECLOSE;
184 			if (mp->mnt_flag & MNT_FORCE)
185 				flags |= FORCECLOSE;
186 			if (mp->mnt_flag & MNT_SOFTDEP) {
187 				error = softdep_flushfiles(mp, flags, td);
188 			} else {
189 				error = ffs_flushfiles(mp, flags, td);
190 			}
191 			if (error) {
192 				vn_finished_write(mp);
193 				return (error);
194 			}
195 			if (fs->fs_pendingblocks != 0 ||
196 			    fs->fs_pendinginodes != 0) {
197 				printf("%s: update error: blocks %d files %d\n",
198 				    fs->fs_fsmnt, fs->fs_pendingblocks,
199 				    fs->fs_pendinginodes);
200 				fs->fs_pendingblocks = 0;
201 				fs->fs_pendinginodes = 0;
202 			}
203 			fs->fs_ronly = 1;
204 			if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
205 				fs->fs_clean = 1;
206 			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
207 				fs->fs_ronly = 0;
208 				fs->fs_clean = 0;
209 				vn_finished_write(mp);
210 				return (error);
211 			}
212 			vn_finished_write(mp);
213 		}
214 		if ((mp->mnt_flag & MNT_RELOAD) &&
215 		    (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, td)) != 0)
216 			return (error);
217 		if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
218 			/*
219 			 * If upgrade to read-write by non-root, then verify
220 			 * that user has necessary permissions on the device.
221 			 */
222 			if (suser_td(td)) {
223 				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
224 				if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
225 				    td->td_proc->p_ucred, td)) != 0) {
226 					VOP_UNLOCK(devvp, 0, td);
227 					return (error);
228 				}
229 				VOP_UNLOCK(devvp, 0, td);
230 			}
231 			fs->fs_flags &= ~FS_UNCLEAN;
232 			if (fs->fs_clean == 0) {
233 				fs->fs_flags |= FS_UNCLEAN;
234 				if ((mp->mnt_flag & MNT_FORCE) ||
235 				    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
236 				     (fs->fs_flags & FS_DOSOFTDEP))) {
237 					printf("WARNING: %s was not %s\n",
238 					   fs->fs_fsmnt, "properly dismounted");
239 				} else {
240 					printf(
241 "WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
242 					    fs->fs_fsmnt);
243 					return (EPERM);
244 				}
245 			}
246 			if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
247 				return (error);
248 			fs->fs_ronly = 0;
249 			fs->fs_clean = 0;
250 			if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
251 				vn_finished_write(mp);
252 				return (error);
253 			}
254 			/* check to see if we need to start softdep */
255 			if ((fs->fs_flags & FS_DOSOFTDEP) &&
256 			    (error = softdep_mount(devvp, mp, fs, td->td_proc->p_ucred))){
257 				vn_finished_write(mp);
258 				return (error);
259 			}
260 			if (fs->fs_snapinum[0] != 0)
261 				ffs_snapshot_mount(mp);
262 			vn_finished_write(mp);
263 		}
264 		/*
265 		 * Soft updates is incompatible with "async",
266 		 * so if we are doing softupdates stop the user
267 		 * from setting the async flag in an update.
268 		 * Softdep_mount() clears it in an initial mount
269 		 * or ro->rw remount.
270 		 */
271 		if (mp->mnt_flag & MNT_SOFTDEP)
272 			mp->mnt_flag &= ~MNT_ASYNC;
273 		/*
274 		 * If not updating name, process export requests.
275 		 */
276 		if (args.fspec == 0)
277 			return (vfs_export(mp, &args.export));
278 		/*
279 		 * If this is a snapshot request, take the snapshot.
280 		 */
281 		if (mp->mnt_flag & MNT_SNAPSHOT)
282 			return (ffs_snapshot(mp, args.fspec));
283 	}
284 
285 	/*
286 	 * Not an update, or updating the name: look up the name
287 	 * and verify that it refers to a sensible block device.
288 	 */
289 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, td);
290 	if ((error = namei(ndp)) != 0)
291 		return (error);
292 	NDFREE(ndp, NDF_ONLY_PNBUF);
293 	devvp = ndp->ni_vp;
294 	if (!vn_isdisk(devvp, &error)) {
295 		vrele(devvp);
296 		return (error);
297 	}
298 
299 	/*
300 	 * If mount by non-root, then verify that user has necessary
301 	 * permissions on the device.
302 	 */
303 	if (suser_td(td)) {
304 		accessmode = VREAD;
305 		if ((mp->mnt_flag & MNT_RDONLY) == 0)
306 			accessmode |= VWRITE;
307 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
308 		if ((error = VOP_ACCESS(devvp, accessmode, td->td_proc->p_ucred, td))!= 0){
309 			vput(devvp);
310 			return (error);
311 		}
312 		VOP_UNLOCK(devvp, 0, td);
313 	}
314 
315 	if (mp->mnt_flag & MNT_UPDATE) {
316 		/*
317 		 * Update only
318 		 *
319 		 * If it's not the same vnode, or at least the same device
320 		 * then it's not correct.
321 		 */
322 
323 		if (devvp != ump->um_devvp &&
324 		    devvp->v_rdev != ump->um_devvp->v_rdev)
325 			error = EINVAL;	/* needs translation */
326 		vrele(devvp);
327 		if (error)
328 			return (error);
329 	} else {
330 		/*
331 		 * New mount
332 		 *
333 		 * We need the name for the mount point (also used for
334 		 * "last mounted on") copied in. If an error occurs,
335 		 * the mount point is discarded by the upper level code.
336 		 * Note that vfs_mount() populates f_mntonname for us.
337 		 */
338 		if ((error = ffs_mountfs(devvp, mp, td, M_FFSNODE)) != 0) {
339 			vrele(devvp);
340 			return (error);
341 		}
342 	}
343 	/*
344 	 * Save "mounted from" device name info for mount point (NULL pad).
345 	 */
346 	copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
347 	bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
348 	/*
349 	 * Initialize filesystem stat information in mount struct.
350 	 */
351 	(void)VFS_STATFS(mp, &mp->mnt_stat, td);
352 	return (0);
353 }
354 
355 /*
356  * Reload all incore data for a filesystem (used after running fsck on
357  * the root filesystem and finding things to fix). The filesystem must
358  * be mounted read-only.
359  *
360  * Things to do to update the mount:
361  *	1) invalidate all cached meta-data.
362  *	2) re-read superblock from disk.
363  *	3) re-read summary information from disk.
364  *	4) invalidate all inactive vnodes.
365  *	5) invalidate all cached file data.
366  *	6) re-read inode data for all active vnodes.
367  */
368 int
369 ffs_reload(mp, cred, td)
370 	register struct mount *mp;
371 	struct ucred *cred;
372 	struct thread *td;
373 {
374 	register struct vnode *vp, *nvp, *devvp;
375 	struct inode *ip;
376 	void *space;
377 	struct buf *bp;
378 	struct fs *fs, *newfs;
379 	struct partinfo dpart;
380 	dev_t dev;
381 	int i, blks, size, error;
382 	int32_t *lp;
383 
384 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
385 		return (EINVAL);
386 	/*
387 	 * Step 1: invalidate all cached meta-data.
388 	 */
389 	devvp = VFSTOUFS(mp)->um_devvp;
390 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
391 	error = vinvalbuf(devvp, 0, cred, td, 0, 0);
392 	VOP_UNLOCK(devvp, 0, td);
393 	if (error)
394 		panic("ffs_reload: dirty1");
395 
396 	dev = devvp->v_rdev;
397 
398 	/*
399 	 * Only VMIO the backing device if the backing device is a real
400 	 * block device.
401 	 */
402 	if (vn_isdisk(devvp, NULL)) {
403 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
404 		vfs_object_create(devvp, td, td->td_proc->p_ucred);
405 		mtx_lock(&devvp->v_interlock);
406 		VOP_UNLOCK(devvp, LK_INTERLOCK, td);
407 	}
408 
409 	/*
410 	 * Step 2: re-read superblock from disk.
411 	 */
412 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, td) != 0)
413 		size = DEV_BSIZE;
414 	else
415 		size = dpart.disklab->d_secsize;
416 	if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0)
417 		return (error);
418 	newfs = (struct fs *)bp->b_data;
419 	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
420 		newfs->fs_bsize < sizeof(struct fs)) {
421 			brelse(bp);
422 			return (EIO);		/* XXX needs translation */
423 	}
424 	fs = VFSTOUFS(mp)->um_fs;
425 	/*
426 	 * Copy pointer fields back into superblock before copying in	XXX
427 	 * new superblock. These should really be in the ufsmount.	XXX
428 	 * Note that important parameters (eg fs_ncg) are unchanged.
429 	 */
430 	newfs->fs_csp = fs->fs_csp;
431 	newfs->fs_maxcluster = fs->fs_maxcluster;
432 	newfs->fs_contigdirs = fs->fs_contigdirs;
433 	newfs->fs_active = fs->fs_active;
434 	bcopy(newfs, fs, (u_int)fs->fs_sbsize);
435 	if (fs->fs_sbsize < SBSIZE)
436 		bp->b_flags |= B_INVAL | B_NOCACHE;
437 	brelse(bp);
438 	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
439 	ffs_oldfscompat(fs);
440 	/* An old fsck may have zeroed these fields, so recheck them. */
441 	if (fs->fs_avgfilesize <= 0)		/* XXX */
442 		fs->fs_avgfilesize = AVFILESIZ;	/* XXX */
443 	if (fs->fs_avgfpdir <= 0)		/* XXX */
444 		fs->fs_avgfpdir = AFPDIR;	/* XXX */
445 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
446 		printf("%s: reload pending error: blocks %d files %d\n",
447 		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
448 		fs->fs_pendingblocks = 0;
449 		fs->fs_pendinginodes = 0;
450 	}
451 
452 	/*
453 	 * Step 3: re-read summary information from disk.
454 	 */
455 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
456 	space = fs->fs_csp;
457 	for (i = 0; i < blks; i += fs->fs_frag) {
458 		size = fs->fs_bsize;
459 		if (i + fs->fs_frag > blks)
460 			size = (blks - i) * fs->fs_fsize;
461 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
462 		    NOCRED, &bp);
463 		if (error)
464 			return (error);
465 		bcopy(bp->b_data, space, (u_int)size);
466 		space = (char *)space + size;
467 		brelse(bp);
468 	}
469 	/*
470 	 * We no longer know anything about clusters per cylinder group.
471 	 */
472 	if (fs->fs_contigsumsize > 0) {
473 		lp = fs->fs_maxcluster;
474 		for (i = 0; i < fs->fs_ncg; i++)
475 			*lp++ = fs->fs_contigsumsize;
476 	}
477 
478 loop:
479 	mtx_lock(&mntvnode_mtx);
480 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
481 		if (vp->v_mount != mp) {
482 			mtx_unlock(&mntvnode_mtx);
483 			goto loop;
484 		}
485 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
486 		mtx_unlock(&mntvnode_mtx);
487 		/*
488 		 * Step 4: invalidate all inactive vnodes.
489 		 */
490 		if (vrecycle(vp, NULL, td))
491 			goto loop;
492 		/*
493 		 * Step 5: invalidate all cached file data.
494 		 */
495 		mtx_lock(&vp->v_interlock);
496 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
497 			goto loop;
498 		}
499 		if (vinvalbuf(vp, 0, cred, td, 0, 0))
500 			panic("ffs_reload: dirty2");
501 		/*
502 		 * Step 6: re-read inode data for all active vnodes.
503 		 */
504 		ip = VTOI(vp);
505 		error =
506 		    bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
507 		    (int)fs->fs_bsize, NOCRED, &bp);
508 		if (error) {
509 			vput(vp);
510 			return (error);
511 		}
512 		ip->i_din = *((struct dinode *)bp->b_data +
513 		    ino_to_fsbo(fs, ip->i_number));
514 		ip->i_effnlink = ip->i_nlink;
515 		brelse(bp);
516 		vput(vp);
517 		mtx_lock(&mntvnode_mtx);
518 	}
519 	mtx_unlock(&mntvnode_mtx);
520 	return (0);
521 }
522 
523 #include <sys/sysctl.h>
524 int bigcgs = 0;
525 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
526 
527 /*
528  * Common code for mount and mountroot
529  */
530 int
531 ffs_mountfs(devvp, mp, td, malloctype)
532 	register struct vnode *devvp;
533 	struct mount *mp;
534 	struct thread *td;
535 	struct malloc_type *malloctype;
536 {
537 	register struct ufsmount *ump;
538 	struct buf *bp;
539 	register struct fs *fs;
540 	dev_t dev;
541 	struct partinfo dpart;
542 	void *space;
543 	int error, i, blks, size, ronly;
544 	int32_t *lp;
545 	struct ucred *cred;
546 	u_int64_t maxfilesize;					/* XXX */
547 	size_t strsize;
548 	int ncount;
549 
550 	dev = devvp->v_rdev;
551 	cred = td ? td->td_proc->p_ucred : NOCRED;
552 	/*
553 	 * Disallow multiple mounts of the same device.
554 	 * Disallow mounting of a device that is currently in use
555 	 * (except for root, which might share swap device for miniroot).
556 	 * Flush out any old buffers remaining from a previous use.
557 	 */
558 	error = vfs_mountedon(devvp);
559 	if (error)
560 		return (error);
561 	ncount = vcount(devvp);
562 
563 	if (ncount > 1 && devvp != rootvp)
564 		return (EBUSY);
565 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
566 	error = vinvalbuf(devvp, V_SAVE, cred, td, 0, 0);
567 	VOP_UNLOCK(devvp, 0, td);
568 	if (error)
569 		return (error);
570 
571 	/*
572 	 * Only VMIO the backing device if the backing device is a real
573 	 * block device.
574 	 * Note that it is optional that the backing device be VMIOed.  This
575 	 * increases the opportunity for metadata caching.
576 	 */
577 	if (vn_isdisk(devvp, NULL)) {
578 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
579 		vfs_object_create(devvp, td, cred);
580 		mtx_lock(&devvp->v_interlock);
581 		VOP_UNLOCK(devvp, LK_INTERLOCK, td);
582 	}
583 
584 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
585 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
586 	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, td);
587 	VOP_UNLOCK(devvp, 0, td);
588 	if (error)
589 		return (error);
590 	if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
591 		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
592 	if (mp->mnt_iosize_max > MAXPHYS)
593 		mp->mnt_iosize_max = MAXPHYS;
594 
595 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, td) != 0)
596 		size = DEV_BSIZE;
597 	else
598 		size = dpart.disklab->d_secsize;
599 
600 	bp = NULL;
601 	ump = NULL;
602 	if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0)
603 		goto out;
604 	fs = (struct fs *)bp->b_data;
605 	if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
606 	    fs->fs_bsize < sizeof(struct fs)) {
607 		error = EINVAL;		/* XXX needs translation */
608 		goto out;
609 	}
610 	fs->fs_fmod = 0;
611 	fs->fs_flags &= ~FS_UNCLEAN;
612 	if (fs->fs_clean == 0) {
613 		fs->fs_flags |= FS_UNCLEAN;
614 		if (ronly || (mp->mnt_flag & MNT_FORCE) ||
615 		    ((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
616 		     (fs->fs_flags & FS_DOSOFTDEP))) {
617 			printf(
618 "WARNING: %s was not properly dismounted\n",
619 			    fs->fs_fsmnt);
620 		} else {
621 			printf(
622 "WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
623 			    fs->fs_fsmnt);
624 			error = EPERM;
625 			goto out;
626 		}
627 		if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
628 			printf("%s: lost blocks %d files %d\n", fs->fs_fsmnt,
629 			    fs->fs_pendingblocks, fs->fs_pendinginodes);
630 			fs->fs_pendingblocks = 0;
631 			fs->fs_pendinginodes = 0;
632 		}
633 	}
634 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
635 		printf("%s: mount pending error: blocks %d files %d\n",
636 		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
637 		fs->fs_pendingblocks = 0;
638 		fs->fs_pendinginodes = 0;
639 	}
640 	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
641 	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
642 		error = EROFS;          /* needs translation */
643 		goto out;
644 	}
645 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
646 	ump->um_malloctype = malloctype;
647 	ump->um_i_effnlink_valid = 1;
648 	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
649 	    M_WAITOK);
650 	ump->um_blkatoff = ffs_blkatoff;
651 	ump->um_truncate = ffs_truncate;
652 	ump->um_update = ffs_update;
653 	ump->um_valloc = ffs_valloc;
654 	ump->um_vfree = ffs_vfree;
655 	ump->um_balloc = ffs_balloc;
656 	bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
657 	if (fs->fs_sbsize < SBSIZE)
658 		bp->b_flags |= B_INVAL | B_NOCACHE;
659 	brelse(bp);
660 	bp = NULL;
661 	fs = ump->um_fs;
662 	fs->fs_ronly = ronly;
663 	size = fs->fs_cssize;
664 	blks = howmany(size, fs->fs_fsize);
665 	if (fs->fs_contigsumsize > 0)
666 		size += fs->fs_ncg * sizeof(int32_t);
667 	size += fs->fs_ncg * sizeof(u_int8_t);
668 	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
669 	fs->fs_csp = space;
670 	for (i = 0; i < blks; i += fs->fs_frag) {
671 		size = fs->fs_bsize;
672 		if (i + fs->fs_frag > blks)
673 			size = (blks - i) * fs->fs_fsize;
674 		if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
675 		    cred, &bp)) != 0) {
676 			free(fs->fs_csp, M_UFSMNT);
677 			goto out;
678 		}
679 		bcopy(bp->b_data, space, (u_int)size);
680 		space = (char *)space + size;
681 		brelse(bp);
682 		bp = NULL;
683 	}
684 	if (fs->fs_contigsumsize > 0) {
685 		fs->fs_maxcluster = lp = space;
686 		for (i = 0; i < fs->fs_ncg; i++)
687 			*lp++ = fs->fs_contigsumsize;
688 		space = lp;
689 	}
690 	size = fs->fs_ncg * sizeof(u_int8_t);
691 	fs->fs_contigdirs = (u_int8_t *)space;
692 	bzero(fs->fs_contigdirs, size);
693 	fs->fs_active = NULL;
694 	/* Compatibility for old filesystems 	   XXX */
695 	if (fs->fs_avgfilesize <= 0)		/* XXX */
696 		fs->fs_avgfilesize = AVFILESIZ;	/* XXX */
697 	if (fs->fs_avgfpdir <= 0)		/* XXX */
698 		fs->fs_avgfpdir = AFPDIR;	/* XXX */
699 	mp->mnt_data = (qaddr_t)ump;
700 	mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
701 	mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
702 	if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
703 	    vfs_getvfs(&mp->mnt_stat.f_fsid))
704 		vfs_getnewfsid(mp);
705 	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
706 	mp->mnt_flag |= MNT_LOCAL;
707 	ump->um_mountp = mp;
708 	ump->um_dev = dev;
709 	ump->um_devvp = devvp;
710 	ump->um_nindir = fs->fs_nindir;
711 	ump->um_bptrtodb = fs->fs_fsbtodb;
712 	ump->um_seqinc = fs->fs_frag;
713 	for (i = 0; i < MAXQUOTAS; i++)
714 		ump->um_quotas[i] = NULLVP;
715 #ifdef UFS_EXTATTR
716 	ufs_extattr_uepm_init(&ump->um_extattr);
717 #endif
718 	devvp->v_rdev->si_mountpoint = mp;
719 	ffs_oldfscompat(fs);
720 
721 	/*
722 	 * Set FS local "last mounted on" information (NULL pad)
723 	 */
724 	copystr(	mp->mnt_stat.f_mntonname,	/* mount point*/
725 			fs->fs_fsmnt,			/* copy area*/
726 			sizeof(fs->fs_fsmnt) - 1,	/* max size*/
727 			&strsize);			/* real size*/
728 	bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
729 
730 	if( mp->mnt_flag & MNT_ROOTFS) {
731 		/*
732 		 * Root mount; update timestamp in mount structure.
733 		 * this will be used by the common root mount code
734 		 * to update the system clock.
735 		 */
736 		mp->mnt_time = fs->fs_time;
737 	}
738 
739 	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
740 	maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1;	/* XXX */
741 	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
742 		fs->fs_maxfilesize = maxfilesize;		/* XXX */
743 	if (bigcgs) {
744 		if (fs->fs_sparecon[0] <= 0)
745 			fs->fs_sparecon[0] = fs->fs_cgsize;
746 		fs->fs_cgsize = fs->fs_bsize;
747 	}
748 	if (ronly == 0) {
749 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
750 		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
751 			free(fs->fs_csp, M_UFSMNT);
752 			goto out;
753 		}
754 		if (fs->fs_snapinum[0] != 0)
755 			ffs_snapshot_mount(mp);
756 		fs->fs_fmod = 1;
757 		fs->fs_clean = 0;
758 		(void) ffs_sbupdate(ump, MNT_WAIT);
759 	}
760 #ifdef UFS_EXTATTR
761 #ifdef UFS_EXTATTR_AUTOSTART
762 	/*
763 	 *
764 	 * Auto-starting does the following:
765 	 *	- check for /.attribute in the fs, and extattr_start if so
766 	 *	- for each file in .attribute, enable that file with
767 	 * 	  an attribute of the same name.
768 	 * Not clear how to report errors -- probably eat them.
769 	 * This would all happen while the file system was busy/not
770 	 * available, so would effectively be "atomic".
771 	 */
772 	(void) ufs_extattr_autostart(mp, td);
773 #endif /* !UFS_EXTATTR_AUTOSTART */
774 #endif /* !UFS_EXTATTR */
775 	return (0);
776 out:
777 	devvp->v_rdev->si_mountpoint = NULL;
778 	if (bp)
779 		brelse(bp);
780 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, td);
781 	if (ump) {
782 		free(ump->um_fs, M_UFSMNT);
783 		free(ump, M_UFSMNT);
784 		mp->mnt_data = (qaddr_t)0;
785 	}
786 	return (error);
787 }
788 
789 /*
790  * Sanity checks for old file systems.
791  *
792  * XXX - goes away some day.
793  */
794 static int
795 ffs_oldfscompat(fs)
796 	struct fs *fs;
797 {
798 
799 	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
800 	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
801 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
802 		fs->fs_nrpos = 8;				/* XXX */
803 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
804 #if 0
805 		int i;						/* XXX */
806 		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
807 								/* XXX */
808 		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
809 		for (i = 0; i < NIADDR; i++) {			/* XXX */
810 			sizepb *= NINDIR(fs);			/* XXX */
811 			fs->fs_maxfilesize += sizepb;		/* XXX */
812 		}						/* XXX */
813 #endif
814 		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
815 		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
816 		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
817 	}							/* XXX */
818 	return (0);
819 }
820 
821 /*
822  * unmount system call
823  */
824 int
825 ffs_unmount(mp, mntflags, td)
826 	struct mount *mp;
827 	int mntflags;
828 	struct thread *td;
829 {
830 	register struct ufsmount *ump = VFSTOUFS(mp);
831 	register struct fs *fs;
832 	int error, flags;
833 
834 	flags = 0;
835 	if (mntflags & MNT_FORCE) {
836 		flags |= FORCECLOSE;
837 	}
838 #ifdef UFS_EXTATTR
839 	if ((error = ufs_extattr_stop(mp, td))) {
840 		if (error != EOPNOTSUPP)
841 			printf("ffs_unmount: ufs_extattr_stop returned %d\n",
842 			    error);
843 	} else {
844 		ufs_extattr_uepm_destroy(&ump->um_extattr);
845 	}
846 #endif
847 	if (mp->mnt_flag & MNT_SOFTDEP) {
848 		if ((error = softdep_flushfiles(mp, flags, td)) != 0)
849 			return (error);
850 	} else {
851 		if ((error = ffs_flushfiles(mp, flags, td)) != 0)
852 			return (error);
853 	}
854 	fs = ump->um_fs;
855 	if (bigcgs) {
856 		fs->fs_cgsize = fs->fs_sparecon[0];
857 		fs->fs_sparecon[0] = 0;
858 	}
859 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
860 		printf("%s: unmount pending error: blocks %d files %d\n",
861 		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
862 		fs->fs_pendingblocks = 0;
863 		fs->fs_pendinginodes = 0;
864 	}
865 	if (fs->fs_ronly == 0) {
866 		fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
867 		error = ffs_sbupdate(ump, MNT_WAIT);
868 		if (error) {
869 			fs->fs_clean = 0;
870 			return (error);
871 		}
872 	}
873 	ump->um_devvp->v_rdev->si_mountpoint = NULL;
874 
875 	vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, td, 0, 0);
876 	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
877 		NOCRED, td);
878 
879 	vrele(ump->um_devvp);
880 
881 	free(fs->fs_csp, M_UFSMNT);
882 	free(fs, M_UFSMNT);
883 	free(ump, M_UFSMNT);
884 	mp->mnt_data = (qaddr_t)0;
885 	mp->mnt_flag &= ~MNT_LOCAL;
886 	return (error);
887 }
888 
889 /*
890  * Flush out all the files in a filesystem.
891  */
892 int
893 ffs_flushfiles(mp, flags, td)
894 	register struct mount *mp;
895 	int flags;
896 	struct thread *td;
897 {
898 	register struct ufsmount *ump;
899 	int error;
900 
901 	ump = VFSTOUFS(mp);
902 #ifdef QUOTA
903 	if (mp->mnt_flag & MNT_QUOTA) {
904 		int i;
905 		error = vflush(mp, 0, SKIPSYSTEM|flags);
906 		if (error)
907 			return (error);
908 		for (i = 0; i < MAXQUOTAS; i++) {
909 			if (ump->um_quotas[i] == NULLVP)
910 				continue;
911 			quotaoff(td, mp, i);
912 		}
913 		/*
914 		 * Here we fall through to vflush again to ensure
915 		 * that we have gotten rid of all the system vnodes.
916 		 */
917 	}
918 #endif
919 	if (ump->um_devvp->v_flag & VCOPYONWRITE) {
920 		if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
921 			return (error);
922 		ffs_snapshot_unmount(mp);
923 		/*
924 		 * Here we fall through to vflush again to ensure
925 		 * that we have gotten rid of all the system vnodes.
926 		 */
927 	}
928         /*
929 	 * Flush all the files.
930 	 */
931 	if ((error = vflush(mp, 0, flags)) != 0)
932 		return (error);
933 	/*
934 	 * Flush filesystem metadata.
935 	 */
936 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, td);
937 	error = VOP_FSYNC(ump->um_devvp, td->td_proc->p_ucred, MNT_WAIT, td);
938 	VOP_UNLOCK(ump->um_devvp, 0, td);
939 	return (error);
940 }
941 
942 /*
943  * Get file system statistics.
944  */
945 int
946 ffs_statfs(mp, sbp, td)
947 	struct mount *mp;
948 	register struct statfs *sbp;
949 	struct thread *td;
950 {
951 	register struct ufsmount *ump;
952 	register struct fs *fs;
953 
954 	ump = VFSTOUFS(mp);
955 	fs = ump->um_fs;
956 	if (fs->fs_magic != FS_MAGIC)
957 		panic("ffs_statfs");
958 	sbp->f_bsize = fs->fs_fsize;
959 	sbp->f_iosize = fs->fs_bsize;
960 	sbp->f_blocks = fs->fs_dsize;
961 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
962 	    fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
963 	sbp->f_bavail = freespace(fs, fs->fs_minfree) +
964 	    dbtofsb(fs, fs->fs_pendingblocks);
965 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
966 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
967 	if (sbp != &mp->mnt_stat) {
968 		sbp->f_type = mp->mnt_vfc->vfc_typenum;
969 		bcopy((caddr_t)mp->mnt_stat.f_mntonname,
970 			(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
971 		bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
972 			(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
973 	}
974 	return (0);
975 }
976 
977 /*
978  * Go through the disk queues to initiate sandbagged IO;
979  * go through the inodes to write those that have been modified;
980  * initiate the writing of the super block if it has been modified.
981  *
982  * Note: we are always called with the filesystem marked `MPBUSY'.
983  */
984 int
985 ffs_sync(mp, waitfor, cred, td)
986 	struct mount *mp;
987 	int waitfor;
988 	struct ucred *cred;
989 	struct thread *td;
990 {
991 	struct vnode *nvp, *vp, *devvp;
992 	struct inode *ip;
993 	struct ufsmount *ump = VFSTOUFS(mp);
994 	struct fs *fs;
995 	int error, count, wait, lockreq, allerror = 0;
996 
997 	fs = ump->um_fs;
998 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
999 		printf("fs = %s\n", fs->fs_fsmnt);
1000 		panic("ffs_sync: rofs mod");
1001 	}
1002 	/*
1003 	 * Write back each (modified) inode.
1004 	 */
1005 	wait = 0;
1006 	lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1007 	if (waitfor == MNT_WAIT) {
1008 		wait = 1;
1009 		lockreq = LK_EXCLUSIVE;
1010 	}
1011 	mtx_lock(&mntvnode_mtx);
1012 loop:
1013 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
1014 		/*
1015 		 * If the vnode that we are about to sync is no longer
1016 		 * associated with this mount point, start over.
1017 		 */
1018 		if (vp->v_mount != mp)
1019 			goto loop;
1020 
1021 		/*
1022 		 * Depend on the mntvnode_slock to keep things stable enough
1023 		 * for a quick test.  Since there might be hundreds of
1024 		 * thousands of vnodes, we cannot afford even a subroutine
1025 		 * call unless there's a good chance that we have work to do.
1026 		 */
1027 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
1028 		ip = VTOI(vp);
1029 		if (vp->v_type == VNON || ((ip->i_flag &
1030 		    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1031 		    TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
1032 			continue;
1033 		}
1034 		if (vp->v_type != VCHR) {
1035 			mtx_unlock(&mntvnode_mtx);
1036 			if ((error = vget(vp, lockreq, td)) != 0) {
1037 				mtx_lock(&mntvnode_mtx);
1038 				if (error == ENOENT)
1039 					goto loop;
1040 			} else {
1041 				if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0)
1042 					allerror = error;
1043 				VOP_UNLOCK(vp, 0, td);
1044 				vrele(vp);
1045 				mtx_lock(&mntvnode_mtx);
1046 			}
1047 		} else {
1048 			mtx_unlock(&mntvnode_mtx);
1049 			UFS_UPDATE(vp, wait);
1050 			mtx_lock(&mntvnode_mtx);
1051 		}
1052 		if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp)
1053 			goto loop;
1054 	}
1055 	mtx_unlock(&mntvnode_mtx);
1056 	/*
1057 	 * Force stale file system control information to be flushed.
1058 	 */
1059 	if (waitfor == MNT_WAIT) {
1060 		if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1061 			allerror = error;
1062 		/* Flushed work items may create new vnodes to clean */
1063 		if (count) {
1064 			mtx_lock(&mntvnode_mtx);
1065 			goto loop;
1066 		}
1067 	}
1068 #ifdef QUOTA
1069 	qsync(mp);
1070 #endif
1071 	devvp = ump->um_devvp;
1072 	mtx_lock(&devvp->v_interlock);
1073 	if (waitfor != MNT_LAZY &&
1074 	    (devvp->v_numoutput > 0 || TAILQ_FIRST(&devvp->v_dirtyblkhd))) {
1075 		mtx_unlock(&devvp->v_interlock);
1076 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
1077 		if ((error = VOP_FSYNC(devvp, cred, waitfor, td)) != 0)
1078 			allerror = error;
1079 		VOP_UNLOCK(devvp, 0, td);
1080 		if (waitfor == MNT_WAIT) {
1081 			mtx_lock(&mntvnode_mtx);
1082 			goto loop;
1083 		}
1084 	} else
1085 		mtx_unlock(&devvp->v_interlock);
1086 	/*
1087 	 * Write back modified superblock.
1088 	 */
1089 	if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
1090 		allerror = error;
1091 	return (allerror);
1092 }
1093 
1094 /*
1095  * Look up a FFS dinode number to find its incore vnode, otherwise read it
1096  * in from disk.  If it is in core, wait for the lock bit to clear, then
1097  * return the inode locked.  Detection and handling of mount points must be
1098  * done by the calling routine.
1099  */
1100 static int ffs_inode_hash_lock;
1101 /*
1102  * ffs_inode_hash_lock is a variable to manage mutual exclusion
1103  * of vnode allocation and intertion to the hash, especially to
1104  * avoid holding more than one vnodes for the same inode in the
1105  * hash table. ffs_inode_hash_lock must hence be tested-and-set
1106  * or cleared atomically, accomplished by ffs_inode_hash_mtx.
1107  *
1108  * As vnode allocation may block during MALLOC() and zone
1109  * allocation, we should also do msleep() to give away the CPU
1110  * if anyone else is allocating a vnode. lockmgr is not suitable
1111  * here because someone else may insert to the hash table the
1112  * vnode we are trying to allocate during our sleep, in which
1113  * case the hash table needs to be examined once again after
1114  * waking up.
1115  */
1116 static struct mtx ffs_inode_hash_mtx;
1117 
1118 int
1119 ffs_vget(mp, ino, vpp)
1120 	struct mount *mp;
1121 	ino_t ino;
1122 	struct vnode **vpp;
1123 {
1124 	struct fs *fs;
1125 	struct inode *ip;
1126 	struct ufsmount *ump;
1127 	struct buf *bp;
1128 	struct vnode *vp;
1129 	dev_t dev;
1130 	int error, want_wakeup;
1131 
1132 	ump = VFSTOUFS(mp);
1133 	dev = ump->um_dev;
1134 restart:
1135 	if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
1136 		return (0);
1137 	}
1138 
1139 	/*
1140 	 * Lock out the creation of new entries in the FFS hash table in
1141 	 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate
1142 	 * may occur!
1143 	 */
1144 	mtx_lock(&ffs_inode_hash_mtx);
1145 	if (ffs_inode_hash_lock) {
1146 		while (ffs_inode_hash_lock) {
1147 			ffs_inode_hash_lock = -1;
1148 			msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
1149 		}
1150 		mtx_unlock(&ffs_inode_hash_mtx);
1151 		goto restart;
1152 	}
1153 	ffs_inode_hash_lock = 1;
1154 	mtx_unlock(&ffs_inode_hash_mtx);
1155 
1156 	/*
1157 	 * If this MALLOC() is performed after the getnewvnode()
1158 	 * it might block, leaving a vnode with a NULL v_data to be
1159 	 * found by ffs_sync() if a sync happens to fire right then,
1160 	 * which will cause a panic because ffs_sync() blindly
1161 	 * dereferences vp->v_data (as well it should).
1162 	 */
1163 	MALLOC(ip, struct inode *, sizeof(struct inode),
1164 	    ump->um_malloctype, M_WAITOK);
1165 
1166 	/* Allocate a new vnode/inode. */
1167 	error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
1168 	if (error) {
1169 		/*
1170 		 * Do not wake up processes while holding the mutex,
1171 		 * otherwise the processes waken up immediately hit
1172 		 * themselves into the mutex.
1173 		 */
1174 		mtx_lock(&ffs_inode_hash_mtx);
1175 		want_wakeup = ffs_inode_hash_lock < 0;
1176 		ffs_inode_hash_lock = 0;
1177 		mtx_unlock(&ffs_inode_hash_mtx);
1178 		if (want_wakeup)
1179 			wakeup(&ffs_inode_hash_lock);
1180 		*vpp = NULL;
1181 		FREE(ip, ump->um_malloctype);
1182 		return (error);
1183 	}
1184 	bzero((caddr_t)ip, sizeof(struct inode));
1185 	/*
1186 	 * FFS supports lock sharing in the stack of vnodes
1187 	 */
1188 	vp->v_vnlock = &vp->v_lock;
1189 	lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
1190 	vp->v_data = ip;
1191 	ip->i_vnode = vp;
1192 	ip->i_fs = fs = ump->um_fs;
1193 	ip->i_dev = dev;
1194 	ip->i_number = ino;
1195 #ifdef QUOTA
1196 	{
1197 		int i;
1198 		for (i = 0; i < MAXQUOTAS; i++)
1199 			ip->i_dquot[i] = NODQUOT;
1200 	}
1201 #endif
1202 	/*
1203 	 * Put it onto its hash chain and lock it so that other requests for
1204 	 * this inode will block if they arrive while we are sleeping waiting
1205 	 * for old data structures to be purged or for the contents of the
1206 	 * disk portion of this inode to be read.
1207 	 */
1208 	ufs_ihashins(ip);
1209 
1210 	/*
1211 	 * Do not wake up processes while holding the mutex,
1212 	 * otherwise the processes waken up immediately hit
1213 	 * themselves into the mutex.
1214 	 */
1215 	mtx_lock(&ffs_inode_hash_mtx);
1216 	want_wakeup = ffs_inode_hash_lock < 0;
1217 	ffs_inode_hash_lock = 0;
1218 	mtx_unlock(&ffs_inode_hash_mtx);
1219 	if (want_wakeup)
1220 		wakeup(&ffs_inode_hash_lock);
1221 
1222 	/* Read in the disk contents for the inode, copy into the inode. */
1223 	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1224 	    (int)fs->fs_bsize, NOCRED, &bp);
1225 	if (error) {
1226 		/*
1227 		 * The inode does not contain anything useful, so it would
1228 		 * be misleading to leave it on its hash chain. With mode
1229 		 * still zero, it will be unlinked and returned to the free
1230 		 * list by vput().
1231 		 */
1232 		brelse(bp);
1233 		vput(vp);
1234 		*vpp = NULL;
1235 		return (error);
1236 	}
1237 	ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
1238 	if (DOINGSOFTDEP(vp))
1239 		softdep_load_inodeblock(ip);
1240 	else
1241 		ip->i_effnlink = ip->i_nlink;
1242 	bqrelse(bp);
1243 
1244 	/*
1245 	 * Initialize the vnode from the inode, check for aliases.
1246 	 * Note that the underlying vnode may have changed.
1247 	 */
1248 	error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1249 	if (error) {
1250 		vput(vp);
1251 		*vpp = NULL;
1252 		return (error);
1253 	}
1254 	/*
1255 	 * Finish inode initialization now that aliasing has been resolved.
1256 	 */
1257 	ip->i_devvp = ump->um_devvp;
1258 	VREF(ip->i_devvp);
1259 	/*
1260 	 * Set up a generation number for this inode if it does not
1261 	 * already have one. This should only happen on old filesystems.
1262 	 */
1263 	if (ip->i_gen == 0) {
1264 		ip->i_gen = random() / 2 + 1;
1265 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
1266 			ip->i_flag |= IN_MODIFIED;
1267 	}
1268 	/*
1269 	 * Ensure that uid and gid are correct. This is a temporary
1270 	 * fix until fsck has been changed to do the update.
1271 	 */
1272 	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
1273 		ip->i_uid = ip->i_din.di_ouid;		/* XXX */
1274 		ip->i_gid = ip->i_din.di_ogid;		/* XXX */
1275 	}						/* XXX */
1276 
1277 	*vpp = vp;
1278 	return (0);
1279 }
1280 
1281 /*
1282  * File handle to vnode
1283  *
1284  * Have to be really careful about stale file handles:
1285  * - check that the inode number is valid
1286  * - call ffs_vget() to get the locked inode
1287  * - check for an unallocated inode (i_mode == 0)
1288  * - check that the given client host has export rights and return
1289  *   those rights via. exflagsp and credanonp
1290  */
1291 int
1292 ffs_fhtovp(mp, fhp, vpp)
1293 	register struct mount *mp;
1294 	struct fid *fhp;
1295 	struct vnode **vpp;
1296 {
1297 	register struct ufid *ufhp;
1298 	struct fs *fs;
1299 
1300 	ufhp = (struct ufid *)fhp;
1301 	fs = VFSTOUFS(mp)->um_fs;
1302 	if (ufhp->ufid_ino < ROOTINO ||
1303 	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1304 		return (ESTALE);
1305 	return (ufs_fhtovp(mp, ufhp, vpp));
1306 }
1307 
1308 /*
1309  * Vnode pointer to File handle
1310  */
1311 /* ARGSUSED */
1312 int
1313 ffs_vptofh(vp, fhp)
1314 	struct vnode *vp;
1315 	struct fid *fhp;
1316 {
1317 	register struct inode *ip;
1318 	register struct ufid *ufhp;
1319 
1320 	ip = VTOI(vp);
1321 	ufhp = (struct ufid *)fhp;
1322 	ufhp->ufid_len = sizeof(struct ufid);
1323 	ufhp->ufid_ino = ip->i_number;
1324 	ufhp->ufid_gen = ip->i_gen;
1325 	return (0);
1326 }
1327 
1328 /*
1329  * Initialize the filesystem; just use ufs_init.
1330  */
1331 static int
1332 ffs_init(vfsp)
1333 	struct vfsconf *vfsp;
1334 {
1335 
1336 	softdep_initialize();
1337 	mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
1338 	return (ufs_init(vfsp));
1339 }
1340 
1341 /*
1342  * Write a superblock and associated information back to disk.
1343  */
1344 static int
1345 ffs_sbupdate(mp, waitfor)
1346 	struct ufsmount *mp;
1347 	int waitfor;
1348 {
1349 	register struct fs *dfs, *fs = mp->um_fs;
1350 	register struct buf *bp;
1351 	int blks;
1352 	void *space;
1353 	int i, size, error, allerror = 0;
1354 
1355 	/*
1356 	 * First write back the summary information.
1357 	 */
1358 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
1359 	space = fs->fs_csp;
1360 	for (i = 0; i < blks; i += fs->fs_frag) {
1361 		size = fs->fs_bsize;
1362 		if (i + fs->fs_frag > blks)
1363 			size = (blks - i) * fs->fs_fsize;
1364 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1365 		    size, 0, 0);
1366 		bcopy(space, bp->b_data, (u_int)size);
1367 		space = (char *)space + size;
1368 		if (waitfor != MNT_WAIT)
1369 			bawrite(bp);
1370 		else if ((error = bwrite(bp)) != 0)
1371 			allerror = error;
1372 	}
1373 	/*
1374 	 * Now write back the superblock itself. If any errors occurred
1375 	 * up to this point, then fail so that the superblock avoids
1376 	 * being written out as clean.
1377 	 */
1378 	if (allerror)
1379 		return (allerror);
1380 	bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
1381 	fs->fs_fmod = 0;
1382 	fs->fs_time = time_second;
1383 	bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
1384 	/* Restore compatibility to old file systems.		   XXX */
1385 	dfs = (struct fs *)bp->b_data;				/* XXX */
1386 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
1387 		dfs->fs_nrpos = -1;				/* XXX */
1388 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
1389 		int32_t *lp, tmp;				/* XXX */
1390 								/* XXX */
1391 		lp = (int32_t *)&dfs->fs_qbmask;		/* XXX */
1392 		tmp = lp[4];					/* XXX */
1393 		for (i = 4; i > 0; i--)				/* XXX */
1394 			lp[i] = lp[i-1];			/* XXX */
1395 		lp[0] = tmp;					/* XXX */
1396 	}							/* XXX */
1397 	dfs->fs_maxfilesize = mp->um_savedmaxfilesize;		/* XXX */
1398 	if (waitfor != MNT_WAIT)
1399 		bawrite(bp);
1400 	else if ((error = bwrite(bp)) != 0)
1401 		allerror = error;
1402 	return (allerror);
1403 }
1404