xref: /freebsd/sys/ufs/ffs/ffs_vnops.c (revision d8b878873e7aa8df1972cc6a642804b17eb61087)
1 /*-
2  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and Network Associates Laboratories, the Security
7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9  * research program
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Copyright (c) 1982, 1986, 1989, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 4. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	from: @(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
60  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61  *	@(#)ffs_vnops.c	8.15 (Berkeley) 5/14/95
62  */
63 
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66 
67 #include <sys/param.h>
68 #include <sys/bio.h>
69 #include <sys/systm.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/stat.h>
82 #include <sys/vmmeter.h>
83 #include <sys/vnode.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vnode_pager.h>
91 
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
97 
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include "opt_directio.h"
101 #include "opt_ffs.h"
102 
103 #ifdef DIRECTIO
104 extern int	ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
105 #endif
106 static vop_fsync_t	ffs_fsync;
107 static vop_lock1_t	ffs_lock;
108 static vop_getpages_t	ffs_getpages;
109 static vop_read_t	ffs_read;
110 static vop_write_t	ffs_write;
111 static int	ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
112 static int	ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
113 		    struct ucred *cred);
114 static vop_strategy_t	ffsext_strategy;
115 static vop_closeextattr_t	ffs_closeextattr;
116 static vop_deleteextattr_t	ffs_deleteextattr;
117 static vop_getextattr_t	ffs_getextattr;
118 static vop_listextattr_t	ffs_listextattr;
119 static vop_openextattr_t	ffs_openextattr;
120 static vop_setextattr_t	ffs_setextattr;
121 static vop_vptofh_t	ffs_vptofh;
122 
123 
124 /* Global vfs data structures for ufs. */
125 struct vop_vector ffs_vnodeops1 = {
126 	.vop_default =		&ufs_vnodeops,
127 	.vop_fsync =		ffs_fsync,
128 	.vop_getpages =		ffs_getpages,
129 	.vop_lock1 =		ffs_lock,
130 	.vop_read =		ffs_read,
131 	.vop_reallocblks =	ffs_reallocblks,
132 	.vop_write =		ffs_write,
133 	.vop_vptofh =		ffs_vptofh,
134 };
135 
136 struct vop_vector ffs_fifoops1 = {
137 	.vop_default =		&ufs_fifoops,
138 	.vop_fsync =		ffs_fsync,
139 	.vop_reallocblks =	ffs_reallocblks, /* XXX: really ??? */
140 	.vop_vptofh =		ffs_vptofh,
141 };
142 
143 /* Global vfs data structures for ufs. */
144 struct vop_vector ffs_vnodeops2 = {
145 	.vop_default =		&ufs_vnodeops,
146 	.vop_fsync =		ffs_fsync,
147 	.vop_getpages =		ffs_getpages,
148 	.vop_lock1 =		ffs_lock,
149 	.vop_read =		ffs_read,
150 	.vop_reallocblks =	ffs_reallocblks,
151 	.vop_write =		ffs_write,
152 	.vop_closeextattr =	ffs_closeextattr,
153 	.vop_deleteextattr =	ffs_deleteextattr,
154 	.vop_getextattr =	ffs_getextattr,
155 	.vop_listextattr =	ffs_listextattr,
156 	.vop_openextattr =	ffs_openextattr,
157 	.vop_setextattr =	ffs_setextattr,
158 	.vop_vptofh =		ffs_vptofh,
159 };
160 
161 struct vop_vector ffs_fifoops2 = {
162 	.vop_default =		&ufs_fifoops,
163 	.vop_fsync =		ffs_fsync,
164 	.vop_lock1 =		ffs_lock,
165 	.vop_reallocblks =	ffs_reallocblks,
166 	.vop_strategy =		ffsext_strategy,
167 	.vop_closeextattr =	ffs_closeextattr,
168 	.vop_deleteextattr =	ffs_deleteextattr,
169 	.vop_getextattr =	ffs_getextattr,
170 	.vop_listextattr =	ffs_listextattr,
171 	.vop_openextattr =	ffs_openextattr,
172 	.vop_setextattr =	ffs_setextattr,
173 	.vop_vptofh =		ffs_vptofh,
174 };
175 
176 /*
177  * Synch an open file.
178  */
179 /* ARGSUSED */
180 static int
181 ffs_fsync(struct vop_fsync_args *ap)
182 {
183 	struct vnode *vp;
184 	struct bufobj *bo;
185 	int error;
186 
187 	vp = ap->a_vp;
188 	bo = &vp->v_bufobj;
189 retry:
190 	error = ffs_syncvnode(vp, ap->a_waitfor);
191 	if (error)
192 		return (error);
193 	if (ap->a_waitfor == MNT_WAIT &&
194 	    (vp->v_mount->mnt_flag & MNT_SOFTDEP)) {
195 		error = softdep_fsync(vp);
196 		if (error)
197 			return (error);
198 
199 		/*
200 		 * The softdep_fsync() function may drop vp lock,
201 		 * allowing for dirty buffers to reappear on the
202 		 * bo_dirty list. Recheck and resync as needed.
203 		 */
204 		BO_LOCK(bo);
205 		if (vp->v_type == VREG && (bo->bo_numoutput > 0 ||
206 		    bo->bo_dirty.bv_cnt > 0)) {
207 			BO_UNLOCK(bo);
208 			goto retry;
209 		}
210 		BO_UNLOCK(bo);
211 	}
212 	return (0);
213 }
214 
215 int
216 ffs_syncvnode(struct vnode *vp, int waitfor)
217 {
218 	struct inode *ip = VTOI(vp);
219 	struct bufobj *bo;
220 	struct buf *bp;
221 	struct buf *nbp;
222 	int s, error, wait, passes, skipmeta;
223 	ufs_lbn_t lbn;
224 
225 	wait = (waitfor == MNT_WAIT);
226 	lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
227 	bo = &vp->v_bufobj;
228 	ip->i_flag &= ~IN_NEEDSYNC;
229 
230 	/*
231 	 * Flush all dirty buffers associated with a vnode.
232 	 */
233 	passes = NIADDR + 1;
234 	skipmeta = 0;
235 	if (wait)
236 		skipmeta = 1;
237 	s = splbio();
238 	BO_LOCK(bo);
239 loop:
240 	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
241 		bp->b_vflags &= ~BV_SCANNED;
242 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
243 		/*
244 		 * Reasons to skip this buffer: it has already been considered
245 		 * on this pass, this pass is the first time through on a
246 		 * synchronous flush request and the buffer being considered
247 		 * is metadata, the buffer has dependencies that will cause
248 		 * it to be redirtied and it has not already been deferred,
249 		 * or it is already being written.
250 		 */
251 		if ((bp->b_vflags & BV_SCANNED) != 0)
252 			continue;
253 		bp->b_vflags |= BV_SCANNED;
254 		if ((skipmeta == 1 && bp->b_lblkno < 0))
255 			continue;
256 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
257 			continue;
258 		BO_UNLOCK(bo);
259 		if (!wait && !LIST_EMPTY(&bp->b_dep) &&
260 		    (bp->b_flags & B_DEFERRED) == 0 &&
261 		    buf_countdeps(bp, 0)) {
262 			bp->b_flags |= B_DEFERRED;
263 			BUF_UNLOCK(bp);
264 			BO_LOCK(bo);
265 			continue;
266 		}
267 		if ((bp->b_flags & B_DELWRI) == 0)
268 			panic("ffs_fsync: not dirty");
269 		/*
270 		 * If this is a synchronous flush request, or it is not a
271 		 * file or device, start the write on this buffer immediately.
272 		 */
273 		if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) {
274 
275 			/*
276 			 * On our final pass through, do all I/O synchronously
277 			 * so that we can find out if our flush is failing
278 			 * because of write errors.
279 			 */
280 			if (passes > 0 || !wait) {
281 				if ((bp->b_flags & B_CLUSTEROK) && !wait) {
282 					(void) vfs_bio_awrite(bp);
283 				} else {
284 					bremfree(bp);
285 					splx(s);
286 					(void) bawrite(bp);
287 					s = splbio();
288 				}
289 			} else {
290 				bremfree(bp);
291 				splx(s);
292 				if ((error = bwrite(bp)) != 0)
293 					return (error);
294 				s = splbio();
295 			}
296 		} else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) {
297 			/*
298 			 * If the buffer is for data that has been truncated
299 			 * off the file, then throw it away.
300 			 */
301 			bremfree(bp);
302 			bp->b_flags |= B_INVAL | B_NOCACHE;
303 			splx(s);
304 			brelse(bp);
305 			s = splbio();
306 		} else
307 			vfs_bio_awrite(bp);
308 
309 		/*
310 		 * Since we may have slept during the I/O, we need
311 		 * to start from a known point.
312 		 */
313 		BO_LOCK(bo);
314 		nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
315 	}
316 	/*
317 	 * If we were asked to do this synchronously, then go back for
318 	 * another pass, this time doing the metadata.
319 	 */
320 	if (skipmeta) {
321 		skipmeta = 0;
322 		goto loop;
323 	}
324 
325 	if (wait) {
326 		bufobj_wwait(bo, 3, 0);
327 		BO_UNLOCK(bo);
328 
329 		/*
330 		 * Ensure that any filesystem metatdata associated
331 		 * with the vnode has been written.
332 		 */
333 		splx(s);
334 		if ((error = softdep_sync_metadata(vp)) != 0)
335 			return (error);
336 		s = splbio();
337 
338 		BO_LOCK(bo);
339 		if (bo->bo_dirty.bv_cnt > 0) {
340 			/*
341 			 * Block devices associated with filesystems may
342 			 * have new I/O requests posted for them even if
343 			 * the vnode is locked, so no amount of trying will
344 			 * get them clean. Thus we give block devices a
345 			 * good effort, then just give up. For all other file
346 			 * types, go around and try again until it is clean.
347 			 */
348 			if (passes > 0) {
349 				passes -= 1;
350 				goto loop;
351 			}
352 #ifdef INVARIANTS
353 			if (!vn_isdisk(vp, NULL))
354 				vprint("ffs_fsync: dirty", vp);
355 #endif
356 		}
357 	}
358 	BO_UNLOCK(bo);
359 	splx(s);
360 	return (ffs_update(vp, wait));
361 }
362 
363 static int
364 ffs_lock(ap)
365 	struct vop_lock1_args /* {
366 		struct vnode *a_vp;
367 		int a_flags;
368 		struct thread *a_td;
369 		char *file;
370 		int line;
371 	} */ *ap;
372 {
373 #ifndef NO_FFS_SNAPSHOT
374 	struct vnode *vp;
375 	int flags;
376 	struct lock *lkp;
377 	int result;
378 
379 	switch (ap->a_flags & LK_TYPE_MASK) {
380 	case LK_SHARED:
381 	case LK_UPGRADE:
382 	case LK_EXCLUSIVE:
383 		vp = ap->a_vp;
384 		flags = ap->a_flags;
385 		for (;;) {
386 #ifdef DEBUG_VFS_LOCKS
387 			KASSERT(vp->v_holdcnt != 0,
388 			    ("ffs_lock %p: zero hold count", vp));
389 #endif
390 			lkp = vp->v_vnlock;
391 			result = _lockmgr_args(lkp, flags, VI_MTX(vp),
392 			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
393 			    ap->a_file, ap->a_line);
394 			if (lkp == vp->v_vnlock || result != 0)
395 				break;
396 			/*
397 			 * Apparent success, except that the vnode
398 			 * mutated between snapshot file vnode and
399 			 * regular file vnode while this process
400 			 * slept.  The lock currently held is not the
401 			 * right lock.  Release it, and try to get the
402 			 * new lock.
403 			 */
404 			(void) _lockmgr_args(lkp, LK_RELEASE, NULL,
405 			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
406 			    ap->a_file, ap->a_line);
407 			if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
408 			    (LK_INTERLOCK | LK_NOWAIT))
409 				return (EBUSY);
410 			if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
411 				flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
412 			flags &= ~LK_INTERLOCK;
413 		}
414 		break;
415 	default:
416 		result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
417 	}
418 	return (result);
419 #else
420 	return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
421 #endif
422 }
423 
424 /*
425  * Vnode op for reading.
426  */
427 /* ARGSUSED */
428 static int
429 ffs_read(ap)
430 	struct vop_read_args /* {
431 		struct vnode *a_vp;
432 		struct uio *a_uio;
433 		int a_ioflag;
434 		struct ucred *a_cred;
435 	} */ *ap;
436 {
437 	struct vnode *vp;
438 	struct inode *ip;
439 	struct uio *uio;
440 	struct fs *fs;
441 	struct buf *bp;
442 	ufs_lbn_t lbn, nextlbn;
443 	off_t bytesinfile;
444 	long size, xfersize, blkoffset;
445 	int error, orig_resid;
446 	int seqcount;
447 	int ioflag;
448 
449 	vp = ap->a_vp;
450 	uio = ap->a_uio;
451 	ioflag = ap->a_ioflag;
452 	if (ap->a_ioflag & IO_EXT)
453 #ifdef notyet
454 		return (ffs_extread(vp, uio, ioflag));
455 #else
456 		panic("ffs_read+IO_EXT");
457 #endif
458 #ifdef DIRECTIO
459 	if ((ioflag & IO_DIRECT) != 0) {
460 		int workdone;
461 
462 		error = ffs_rawread(vp, uio, &workdone);
463 		if (error != 0 || workdone != 0)
464 			return error;
465 	}
466 #endif
467 
468 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
469 	ip = VTOI(vp);
470 
471 #ifdef INVARIANTS
472 	if (uio->uio_rw != UIO_READ)
473 		panic("ffs_read: mode");
474 
475 	if (vp->v_type == VLNK) {
476 		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
477 			panic("ffs_read: short symlink");
478 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
479 		panic("ffs_read: type %d",  vp->v_type);
480 #endif
481 	orig_resid = uio->uio_resid;
482 	KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
483 	if (orig_resid == 0)
484 		return (0);
485 	KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
486 	fs = ip->i_fs;
487 	if (uio->uio_offset < ip->i_size &&
488 	    uio->uio_offset >= fs->fs_maxfilesize)
489 		return (EOVERFLOW);
490 
491 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
492 		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
493 			break;
494 		lbn = lblkno(fs, uio->uio_offset);
495 		nextlbn = lbn + 1;
496 
497 		/*
498 		 * size of buffer.  The buffer representing the
499 		 * end of the file is rounded up to the size of
500 		 * the block type ( fragment or full block,
501 		 * depending ).
502 		 */
503 		size = blksize(fs, ip, lbn);
504 		blkoffset = blkoff(fs, uio->uio_offset);
505 
506 		/*
507 		 * The amount we want to transfer in this iteration is
508 		 * one FS block less the amount of the data before
509 		 * our startpoint (duh!)
510 		 */
511 		xfersize = fs->fs_bsize - blkoffset;
512 
513 		/*
514 		 * But if we actually want less than the block,
515 		 * or the file doesn't have a whole block more of data,
516 		 * then use the lesser number.
517 		 */
518 		if (uio->uio_resid < xfersize)
519 			xfersize = uio->uio_resid;
520 		if (bytesinfile < xfersize)
521 			xfersize = bytesinfile;
522 
523 		if (lblktosize(fs, nextlbn) >= ip->i_size) {
524 			/*
525 			 * Don't do readahead if this is the end of the file.
526 			 */
527 			error = bread(vp, lbn, size, NOCRED, &bp);
528 		} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
529 			/*
530 			 * Otherwise if we are allowed to cluster,
531 			 * grab as much as we can.
532 			 *
533 			 * XXX  This may not be a win if we are not
534 			 * doing sequential access.
535 			 */
536 			error = cluster_read(vp, ip->i_size, lbn,
537 				size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
538 		} else if (seqcount > 1) {
539 			/*
540 			 * If we are NOT allowed to cluster, then
541 			 * if we appear to be acting sequentially,
542 			 * fire off a request for a readahead
543 			 * as well as a read. Note that the 4th and 5th
544 			 * arguments point to arrays of the size specified in
545 			 * the 6th argument.
546 			 */
547 			int nextsize = blksize(fs, ip, nextlbn);
548 			error = breadn(vp, lbn,
549 			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
550 		} else {
551 			/*
552 			 * Failing all of the above, just read what the
553 			 * user asked for. Interestingly, the same as
554 			 * the first option above.
555 			 */
556 			error = bread(vp, lbn, size, NOCRED, &bp);
557 		}
558 		if (error) {
559 			brelse(bp);
560 			bp = NULL;
561 			break;
562 		}
563 
564 		/*
565 		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
566 		 * will cause us to attempt to release the buffer later on
567 		 * and will cause the buffer cache to attempt to free the
568 		 * underlying pages.
569 		 */
570 		if (ioflag & IO_DIRECT)
571 			bp->b_flags |= B_DIRECT;
572 
573 		/*
574 		 * We should only get non-zero b_resid when an I/O error
575 		 * has occurred, which should cause us to break above.
576 		 * However, if the short read did not cause an error,
577 		 * then we want to ensure that we do not uiomove bad
578 		 * or uninitialized data.
579 		 */
580 		size -= bp->b_resid;
581 		if (size < xfersize) {
582 			if (size == 0)
583 				break;
584 			xfersize = size;
585 		}
586 
587 		error = uiomove((char *)bp->b_data + blkoffset,
588 		    (int)xfersize, uio);
589 		if (error)
590 			break;
591 
592 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
593 		   (LIST_EMPTY(&bp->b_dep))) {
594 			/*
595 			 * If there are no dependencies, and it's VMIO,
596 			 * then we don't need the buf, mark it available
597 			 * for freeing. The VM has the data.
598 			 */
599 			bp->b_flags |= B_RELBUF;
600 			brelse(bp);
601 		} else {
602 			/*
603 			 * Otherwise let whoever
604 			 * made the request take care of
605 			 * freeing it. We just queue
606 			 * it onto another list.
607 			 */
608 			bqrelse(bp);
609 		}
610 	}
611 
612 	/*
613 	 * This can only happen in the case of an error
614 	 * because the loop above resets bp to NULL on each iteration
615 	 * and on normal completion has not set a new value into it.
616 	 * so it must have come from a 'break' statement
617 	 */
618 	if (bp != NULL) {
619 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
620 		   (LIST_EMPTY(&bp->b_dep))) {
621 			bp->b_flags |= B_RELBUF;
622 			brelse(bp);
623 		} else {
624 			bqrelse(bp);
625 		}
626 	}
627 
628 	if ((error == 0 || uio->uio_resid != orig_resid) &&
629 	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 &&
630 	    (ip->i_flag & IN_ACCESS) == 0) {
631 		VI_LOCK(vp);
632 		ip->i_flag |= IN_ACCESS;
633 		VI_UNLOCK(vp);
634 	}
635 	return (error);
636 }
637 
638 /*
639  * Vnode op for writing.
640  */
641 static int
642 ffs_write(ap)
643 	struct vop_write_args /* {
644 		struct vnode *a_vp;
645 		struct uio *a_uio;
646 		int a_ioflag;
647 		struct ucred *a_cred;
648 	} */ *ap;
649 {
650 	struct vnode *vp;
651 	struct uio *uio;
652 	struct inode *ip;
653 	struct fs *fs;
654 	struct buf *bp;
655 	struct thread *td;
656 	ufs_lbn_t lbn;
657 	off_t osize;
658 	int seqcount;
659 	int blkoffset, error, flags, ioflag, resid, size, xfersize;
660 
661 	vp = ap->a_vp;
662 	uio = ap->a_uio;
663 	ioflag = ap->a_ioflag;
664 	if (ap->a_ioflag & IO_EXT)
665 #ifdef notyet
666 		return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
667 #else
668 		panic("ffs_write+IO_EXT");
669 #endif
670 
671 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
672 	ip = VTOI(vp);
673 
674 #ifdef INVARIANTS
675 	if (uio->uio_rw != UIO_WRITE)
676 		panic("ffs_write: mode");
677 #endif
678 
679 	switch (vp->v_type) {
680 	case VREG:
681 		if (ioflag & IO_APPEND)
682 			uio->uio_offset = ip->i_size;
683 		if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
684 			return (EPERM);
685 		/* FALLTHROUGH */
686 	case VLNK:
687 		break;
688 	case VDIR:
689 		panic("ffs_write: dir write");
690 		break;
691 	default:
692 		panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
693 			(int)uio->uio_offset,
694 			(int)uio->uio_resid
695 		);
696 	}
697 
698 	KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
699 	KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
700 	fs = ip->i_fs;
701 	if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
702 		return (EFBIG);
703 	/*
704 	 * Maybe this should be above the vnode op call, but so long as
705 	 * file servers have no limits, I don't think it matters.
706 	 */
707 	td = uio->uio_td;
708 	if (vp->v_type == VREG && td != NULL) {
709 		PROC_LOCK(td->td_proc);
710 		if (uio->uio_offset + uio->uio_resid >
711 		    lim_cur(td->td_proc, RLIMIT_FSIZE)) {
712 			psignal(td->td_proc, SIGXFSZ);
713 			PROC_UNLOCK(td->td_proc);
714 			return (EFBIG);
715 		}
716 		PROC_UNLOCK(td->td_proc);
717 	}
718 
719 	resid = uio->uio_resid;
720 	osize = ip->i_size;
721 	if (seqcount > BA_SEQMAX)
722 		flags = BA_SEQMAX << BA_SEQSHIFT;
723 	else
724 		flags = seqcount << BA_SEQSHIFT;
725 	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
726 		flags |= IO_SYNC;
727 
728 	for (error = 0; uio->uio_resid > 0;) {
729 		lbn = lblkno(fs, uio->uio_offset);
730 		blkoffset = blkoff(fs, uio->uio_offset);
731 		xfersize = fs->fs_bsize - blkoffset;
732 		if (uio->uio_resid < xfersize)
733 			xfersize = uio->uio_resid;
734 		if (uio->uio_offset + xfersize > ip->i_size)
735 			vnode_pager_setsize(vp, uio->uio_offset + xfersize);
736 
737                 /*
738 		 * We must perform a read-before-write if the transfer size
739 		 * does not cover the entire buffer.
740                  */
741 		if (fs->fs_bsize > xfersize)
742 			flags |= BA_CLRBUF;
743 		else
744 			flags &= ~BA_CLRBUF;
745 /* XXX is uio->uio_offset the right thing here? */
746 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
747 		    ap->a_cred, flags, &bp);
748 		if (error != 0) {
749 			vnode_pager_setsize(vp, ip->i_size);
750 			break;
751 		}
752 		/*
753 		 * If the buffer is not valid we have to clear out any
754 		 * garbage data from the pages instantiated for the buffer.
755 		 * If we do not, a failed uiomove() during a write can leave
756 		 * the prior contents of the pages exposed to a userland
757 		 * mmap().  XXX deal with uiomove() errors a better way.
758 		 */
759 		if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
760 			vfs_bio_clrbuf(bp);
761 		if (ioflag & IO_DIRECT)
762 			bp->b_flags |= B_DIRECT;
763 		if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
764 			bp->b_flags |= B_NOCACHE;
765 
766 		if (uio->uio_offset + xfersize > ip->i_size) {
767 			ip->i_size = uio->uio_offset + xfersize;
768 			DIP_SET(ip, i_size, ip->i_size);
769 		}
770 
771 		size = blksize(fs, ip, lbn) - bp->b_resid;
772 		if (size < xfersize)
773 			xfersize = size;
774 
775 		error =
776 		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
777 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
778 		   (LIST_EMPTY(&bp->b_dep))) {
779 			bp->b_flags |= B_RELBUF;
780 		}
781 
782 		/*
783 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
784 		 * if we have a severe page deficiency write the buffer
785 		 * asynchronously.  Otherwise try to cluster, and if that
786 		 * doesn't do it then either do an async write (if O_DIRECT),
787 		 * or a delayed write (if not).
788 		 */
789 		if (ioflag & IO_SYNC) {
790 			(void)bwrite(bp);
791 		} else if (vm_page_count_severe() ||
792 			    buf_dirty_count_severe() ||
793 			    (ioflag & IO_ASYNC)) {
794 			bp->b_flags |= B_CLUSTEROK;
795 			bawrite(bp);
796 		} else if (xfersize + blkoffset == fs->fs_bsize) {
797 			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
798 				bp->b_flags |= B_CLUSTEROK;
799 				cluster_write(vp, bp, ip->i_size, seqcount);
800 			} else {
801 				bawrite(bp);
802 			}
803 		} else if (ioflag & IO_DIRECT) {
804 			bp->b_flags |= B_CLUSTEROK;
805 			bawrite(bp);
806 		} else {
807 			bp->b_flags |= B_CLUSTEROK;
808 			bdwrite(bp);
809 		}
810 		if (error || xfersize == 0)
811 			break;
812 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
813 	}
814 	/*
815 	 * If we successfully wrote any data, and we are not the superuser
816 	 * we clear the setuid and setgid bits as a precaution against
817 	 * tampering.
818 	 */
819 	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
820 	    ap->a_cred) {
821 		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
822 			ip->i_mode &= ~(ISUID | ISGID);
823 			DIP_SET(ip, i_mode, ip->i_mode);
824 		}
825 	}
826 	if (error) {
827 		if (ioflag & IO_UNIT) {
828 			(void)ffs_truncate(vp, osize,
829 			    IO_NORMAL | (ioflag & IO_SYNC),
830 			    ap->a_cred, uio->uio_td);
831 			uio->uio_offset -= resid - uio->uio_resid;
832 			uio->uio_resid = resid;
833 		}
834 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
835 		error = ffs_update(vp, 1);
836 	return (error);
837 }
838 
839 /*
840  * get page routine
841  */
842 static int
843 ffs_getpages(ap)
844 	struct vop_getpages_args *ap;
845 {
846 	int i;
847 	vm_page_t mreq;
848 	int pcount;
849 
850 	pcount = round_page(ap->a_count) / PAGE_SIZE;
851 	mreq = ap->a_m[ap->a_reqpage];
852 
853 	/*
854 	 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
855 	 * then the entire page is valid.  Since the page may be mapped,
856 	 * user programs might reference data beyond the actual end of file
857 	 * occuring within the page.  We have to zero that data.
858 	 */
859 	VM_OBJECT_LOCK(mreq->object);
860 	if (mreq->valid) {
861 		if (mreq->valid != VM_PAGE_BITS_ALL)
862 			vm_page_zero_invalid(mreq, TRUE);
863 		vm_page_lock_queues();
864 		for (i = 0; i < pcount; i++) {
865 			if (i != ap->a_reqpage) {
866 				vm_page_free(ap->a_m[i]);
867 			}
868 		}
869 		vm_page_unlock_queues();
870 		VM_OBJECT_UNLOCK(mreq->object);
871 		return VM_PAGER_OK;
872 	}
873 	VM_OBJECT_UNLOCK(mreq->object);
874 
875 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
876 					    ap->a_count,
877 					    ap->a_reqpage);
878 }
879 
880 
881 /*
882  * Extended attribute area reading.
883  */
884 static int
885 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
886 {
887 	struct inode *ip;
888 	struct ufs2_dinode *dp;
889 	struct fs *fs;
890 	struct buf *bp;
891 	ufs_lbn_t lbn, nextlbn;
892 	off_t bytesinfile;
893 	long size, xfersize, blkoffset;
894 	int error, orig_resid;
895 
896 	ip = VTOI(vp);
897 	fs = ip->i_fs;
898 	dp = ip->i_din2;
899 
900 #ifdef INVARIANTS
901 	if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
902 		panic("ffs_extread: mode");
903 
904 #endif
905 	orig_resid = uio->uio_resid;
906 	KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
907 	if (orig_resid == 0)
908 		return (0);
909 	KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
910 
911 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
912 		if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
913 			break;
914 		lbn = lblkno(fs, uio->uio_offset);
915 		nextlbn = lbn + 1;
916 
917 		/*
918 		 * size of buffer.  The buffer representing the
919 		 * end of the file is rounded up to the size of
920 		 * the block type ( fragment or full block,
921 		 * depending ).
922 		 */
923 		size = sblksize(fs, dp->di_extsize, lbn);
924 		blkoffset = blkoff(fs, uio->uio_offset);
925 
926 		/*
927 		 * The amount we want to transfer in this iteration is
928 		 * one FS block less the amount of the data before
929 		 * our startpoint (duh!)
930 		 */
931 		xfersize = fs->fs_bsize - blkoffset;
932 
933 		/*
934 		 * But if we actually want less than the block,
935 		 * or the file doesn't have a whole block more of data,
936 		 * then use the lesser number.
937 		 */
938 		if (uio->uio_resid < xfersize)
939 			xfersize = uio->uio_resid;
940 		if (bytesinfile < xfersize)
941 			xfersize = bytesinfile;
942 
943 		if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
944 			/*
945 			 * Don't do readahead if this is the end of the info.
946 			 */
947 			error = bread(vp, -1 - lbn, size, NOCRED, &bp);
948 		} else {
949 			/*
950 			 * If we have a second block, then
951 			 * fire off a request for a readahead
952 			 * as well as a read. Note that the 4th and 5th
953 			 * arguments point to arrays of the size specified in
954 			 * the 6th argument.
955 			 */
956 			int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
957 
958 			nextlbn = -1 - nextlbn;
959 			error = breadn(vp, -1 - lbn,
960 			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
961 		}
962 		if (error) {
963 			brelse(bp);
964 			bp = NULL;
965 			break;
966 		}
967 
968 		/*
969 		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
970 		 * will cause us to attempt to release the buffer later on
971 		 * and will cause the buffer cache to attempt to free the
972 		 * underlying pages.
973 		 */
974 		if (ioflag & IO_DIRECT)
975 			bp->b_flags |= B_DIRECT;
976 
977 		/*
978 		 * We should only get non-zero b_resid when an I/O error
979 		 * has occurred, which should cause us to break above.
980 		 * However, if the short read did not cause an error,
981 		 * then we want to ensure that we do not uiomove bad
982 		 * or uninitialized data.
983 		 */
984 		size -= bp->b_resid;
985 		if (size < xfersize) {
986 			if (size == 0)
987 				break;
988 			xfersize = size;
989 		}
990 
991 		error = uiomove((char *)bp->b_data + blkoffset,
992 					(int)xfersize, uio);
993 		if (error)
994 			break;
995 
996 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
997 		   (LIST_EMPTY(&bp->b_dep))) {
998 			/*
999 			 * If there are no dependencies, and it's VMIO,
1000 			 * then we don't need the buf, mark it available
1001 			 * for freeing. The VM has the data.
1002 			 */
1003 			bp->b_flags |= B_RELBUF;
1004 			brelse(bp);
1005 		} else {
1006 			/*
1007 			 * Otherwise let whoever
1008 			 * made the request take care of
1009 			 * freeing it. We just queue
1010 			 * it onto another list.
1011 			 */
1012 			bqrelse(bp);
1013 		}
1014 	}
1015 
1016 	/*
1017 	 * This can only happen in the case of an error
1018 	 * because the loop above resets bp to NULL on each iteration
1019 	 * and on normal completion has not set a new value into it.
1020 	 * so it must have come from a 'break' statement
1021 	 */
1022 	if (bp != NULL) {
1023 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1024 		   (LIST_EMPTY(&bp->b_dep))) {
1025 			bp->b_flags |= B_RELBUF;
1026 			brelse(bp);
1027 		} else {
1028 			bqrelse(bp);
1029 		}
1030 	}
1031 	return (error);
1032 }
1033 
1034 /*
1035  * Extended attribute area writing.
1036  */
1037 static int
1038 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
1039 {
1040 	struct inode *ip;
1041 	struct ufs2_dinode *dp;
1042 	struct fs *fs;
1043 	struct buf *bp;
1044 	ufs_lbn_t lbn;
1045 	off_t osize;
1046 	int blkoffset, error, flags, resid, size, xfersize;
1047 
1048 	ip = VTOI(vp);
1049 	fs = ip->i_fs;
1050 	dp = ip->i_din2;
1051 
1052 	KASSERT(!(ip->i_flag & IN_SPACECOUNTED), ("inode %u: inode is dead",
1053 	    ip->i_number));
1054 
1055 #ifdef INVARIANTS
1056 	if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
1057 		panic("ffs_extwrite: mode");
1058 #endif
1059 
1060 	if (ioflag & IO_APPEND)
1061 		uio->uio_offset = dp->di_extsize;
1062 	KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1063 	KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1064 	if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
1065 		return (EFBIG);
1066 
1067 	resid = uio->uio_resid;
1068 	osize = dp->di_extsize;
1069 	flags = IO_EXT;
1070 	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
1071 		flags |= IO_SYNC;
1072 
1073 	for (error = 0; uio->uio_resid > 0;) {
1074 		lbn = lblkno(fs, uio->uio_offset);
1075 		blkoffset = blkoff(fs, uio->uio_offset);
1076 		xfersize = fs->fs_bsize - blkoffset;
1077 		if (uio->uio_resid < xfersize)
1078 			xfersize = uio->uio_resid;
1079 
1080 		/*
1081 		 * We must perform a read-before-write if the transfer size
1082 		 * does not cover the entire buffer.
1083                  */
1084 		if (fs->fs_bsize > xfersize)
1085 			flags |= BA_CLRBUF;
1086 		else
1087 			flags &= ~BA_CLRBUF;
1088 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1089 		    ucred, flags, &bp);
1090 		if (error != 0)
1091 			break;
1092 		/*
1093 		 * If the buffer is not valid we have to clear out any
1094 		 * garbage data from the pages instantiated for the buffer.
1095 		 * If we do not, a failed uiomove() during a write can leave
1096 		 * the prior contents of the pages exposed to a userland
1097 		 * mmap().  XXX deal with uiomove() errors a better way.
1098 		 */
1099 		if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1100 			vfs_bio_clrbuf(bp);
1101 		if (ioflag & IO_DIRECT)
1102 			bp->b_flags |= B_DIRECT;
1103 
1104 		if (uio->uio_offset + xfersize > dp->di_extsize)
1105 			dp->di_extsize = uio->uio_offset + xfersize;
1106 
1107 		size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1108 		if (size < xfersize)
1109 			xfersize = size;
1110 
1111 		error =
1112 		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1113 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1114 		   (LIST_EMPTY(&bp->b_dep))) {
1115 			bp->b_flags |= B_RELBUF;
1116 		}
1117 
1118 		/*
1119 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
1120 		 * if we have a severe page deficiency write the buffer
1121 		 * asynchronously.  Otherwise try to cluster, and if that
1122 		 * doesn't do it then either do an async write (if O_DIRECT),
1123 		 * or a delayed write (if not).
1124 		 */
1125 		if (ioflag & IO_SYNC) {
1126 			(void)bwrite(bp);
1127 		} else if (vm_page_count_severe() ||
1128 			    buf_dirty_count_severe() ||
1129 			    xfersize + blkoffset == fs->fs_bsize ||
1130 			    (ioflag & (IO_ASYNC | IO_DIRECT)))
1131 			bawrite(bp);
1132 		else
1133 			bdwrite(bp);
1134 		if (error || xfersize == 0)
1135 			break;
1136 		ip->i_flag |= IN_CHANGE;
1137 	}
1138 	/*
1139 	 * If we successfully wrote any data, and we are not the superuser
1140 	 * we clear the setuid and setgid bits as a precaution against
1141 	 * tampering.
1142 	 */
1143 	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1144 		if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1145 			ip->i_mode &= ~(ISUID | ISGID);
1146 			dp->di_mode = ip->i_mode;
1147 		}
1148 	}
1149 	if (error) {
1150 		if (ioflag & IO_UNIT) {
1151 			(void)ffs_truncate(vp, osize,
1152 			    IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td);
1153 			uio->uio_offset -= resid - uio->uio_resid;
1154 			uio->uio_resid = resid;
1155 		}
1156 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1157 		error = ffs_update(vp, 1);
1158 	return (error);
1159 }
1160 
1161 
1162 /*
1163  * Vnode operating to retrieve a named extended attribute.
1164  *
1165  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1166  * the length of the EA, and possibly the pointer to the entry and to the data.
1167  */
1168 static int
1169 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
1170 {
1171 	u_char *p, *pe, *pn, *p0;
1172 	int eapad1, eapad2, ealength, ealen, nlen;
1173 	uint32_t ul;
1174 
1175 	pe = ptr + length;
1176 	nlen = strlen(name);
1177 
1178 	for (p = ptr; p < pe; p = pn) {
1179 		p0 = p;
1180 		bcopy(p, &ul, sizeof(ul));
1181 		pn = p + ul;
1182 		/* make sure this entry is complete */
1183 		if (pn > pe)
1184 			break;
1185 		p += sizeof(uint32_t);
1186 		if (*p != nspace)
1187 			continue;
1188 		p++;
1189 		eapad2 = *p++;
1190 		if (*p != nlen)
1191 			continue;
1192 		p++;
1193 		if (bcmp(p, name, nlen))
1194 			continue;
1195 		ealength = sizeof(uint32_t) + 3 + nlen;
1196 		eapad1 = 8 - (ealength % 8);
1197 		if (eapad1 == 8)
1198 			eapad1 = 0;
1199 		ealength += eapad1;
1200 		ealen = ul - ealength - eapad2;
1201 		p += nlen + eapad1;
1202 		if (eap != NULL)
1203 			*eap = p0;
1204 		if (eac != NULL)
1205 			*eac = p;
1206 		return (ealen);
1207 	}
1208 	return(-1);
1209 }
1210 
1211 static int
1212 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1213 {
1214 	struct inode *ip;
1215 	struct ufs2_dinode *dp;
1216 	struct fs *fs;
1217 	struct uio luio;
1218 	struct iovec liovec;
1219 	int easize, error;
1220 	u_char *eae;
1221 
1222 	ip = VTOI(vp);
1223 	fs = ip->i_fs;
1224 	dp = ip->i_din2;
1225 	easize = dp->di_extsize;
1226 	if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
1227 		return (EFBIG);
1228 
1229 	eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1230 
1231 	liovec.iov_base = eae;
1232 	liovec.iov_len = easize;
1233 	luio.uio_iov = &liovec;
1234 	luio.uio_iovcnt = 1;
1235 	luio.uio_offset = 0;
1236 	luio.uio_resid = easize;
1237 	luio.uio_segflg = UIO_SYSSPACE;
1238 	luio.uio_rw = UIO_READ;
1239 	luio.uio_td = td;
1240 
1241 	error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1242 	if (error) {
1243 		free(eae, M_TEMP);
1244 		return(error);
1245 	}
1246 	*p = eae;
1247 	return (0);
1248 }
1249 
1250 static void
1251 ffs_lock_ea(struct vnode *vp)
1252 {
1253 	struct inode *ip;
1254 
1255 	ip = VTOI(vp);
1256 	VI_LOCK(vp);
1257 	while (ip->i_flag & IN_EA_LOCKED) {
1258 		ip->i_flag |= IN_EA_LOCKWAIT;
1259 		msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1260 		    0);
1261 	}
1262 	ip->i_flag |= IN_EA_LOCKED;
1263 	VI_UNLOCK(vp);
1264 }
1265 
1266 static void
1267 ffs_unlock_ea(struct vnode *vp)
1268 {
1269 	struct inode *ip;
1270 
1271 	ip = VTOI(vp);
1272 	VI_LOCK(vp);
1273 	if (ip->i_flag & IN_EA_LOCKWAIT)
1274 		wakeup(&ip->i_ea_refs);
1275 	ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1276 	VI_UNLOCK(vp);
1277 }
1278 
1279 static int
1280 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1281 {
1282 	struct inode *ip;
1283 	struct ufs2_dinode *dp;
1284 	int error;
1285 
1286 	ip = VTOI(vp);
1287 
1288 	ffs_lock_ea(vp);
1289 	if (ip->i_ea_area != NULL) {
1290 		ip->i_ea_refs++;
1291 		ffs_unlock_ea(vp);
1292 		return (0);
1293 	}
1294 	dp = ip->i_din2;
1295 	error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1296 	if (error) {
1297 		ffs_unlock_ea(vp);
1298 		return (error);
1299 	}
1300 	ip->i_ea_len = dp->di_extsize;
1301 	ip->i_ea_error = 0;
1302 	ip->i_ea_refs++;
1303 	ffs_unlock_ea(vp);
1304 	return (0);
1305 }
1306 
1307 /*
1308  * Vnode extattr transaction commit/abort
1309  */
1310 static int
1311 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1312 {
1313 	struct inode *ip;
1314 	struct uio luio;
1315 	struct iovec liovec;
1316 	int error;
1317 	struct ufs2_dinode *dp;
1318 
1319 	ip = VTOI(vp);
1320 
1321 	ffs_lock_ea(vp);
1322 	if (ip->i_ea_area == NULL) {
1323 		ffs_unlock_ea(vp);
1324 		return (EINVAL);
1325 	}
1326 	dp = ip->i_din2;
1327 	error = ip->i_ea_error;
1328 	if (commit && error == 0) {
1329 		ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1330 		if (cred == NOCRED)
1331 			cred =  vp->v_mount->mnt_cred;
1332 		liovec.iov_base = ip->i_ea_area;
1333 		liovec.iov_len = ip->i_ea_len;
1334 		luio.uio_iov = &liovec;
1335 		luio.uio_iovcnt = 1;
1336 		luio.uio_offset = 0;
1337 		luio.uio_resid = ip->i_ea_len;
1338 		luio.uio_segflg = UIO_SYSSPACE;
1339 		luio.uio_rw = UIO_WRITE;
1340 		luio.uio_td = td;
1341 		/* XXX: I'm not happy about truncating to zero size */
1342 		if (ip->i_ea_len < dp->di_extsize)
1343 			error = ffs_truncate(vp, 0, IO_EXT, cred, td);
1344 		error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1345 	}
1346 	if (--ip->i_ea_refs == 0) {
1347 		free(ip->i_ea_area, M_TEMP);
1348 		ip->i_ea_area = NULL;
1349 		ip->i_ea_len = 0;
1350 		ip->i_ea_error = 0;
1351 	}
1352 	ffs_unlock_ea(vp);
1353 	return (error);
1354 }
1355 
1356 /*
1357  * Vnode extattr strategy routine for fifos.
1358  *
1359  * We need to check for a read or write of the external attributes.
1360  * Otherwise we just fall through and do the usual thing.
1361  */
1362 static int
1363 ffsext_strategy(struct vop_strategy_args *ap)
1364 /*
1365 struct vop_strategy_args {
1366 	struct vnodeop_desc *a_desc;
1367 	struct vnode *a_vp;
1368 	struct buf *a_bp;
1369 };
1370 */
1371 {
1372 	struct vnode *vp;
1373 	daddr_t lbn;
1374 
1375 	vp = ap->a_vp;
1376 	lbn = ap->a_bp->b_lblkno;
1377 	if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
1378 	    lbn < 0 && lbn >= -NXADDR)
1379 		return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1380 	if (vp->v_type == VFIFO)
1381 		return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1382 	panic("spec nodes went here");
1383 }
1384 
1385 /*
1386  * Vnode extattr transaction commit/abort
1387  */
1388 static int
1389 ffs_openextattr(struct vop_openextattr_args *ap)
1390 /*
1391 struct vop_openextattr_args {
1392 	struct vnodeop_desc *a_desc;
1393 	struct vnode *a_vp;
1394 	IN struct ucred *a_cred;
1395 	IN struct thread *a_td;
1396 };
1397 */
1398 {
1399 	struct inode *ip;
1400 	struct fs *fs;
1401 
1402 	ip = VTOI(ap->a_vp);
1403 	fs = ip->i_fs;
1404 
1405 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1406 		return (EOPNOTSUPP);
1407 
1408 	return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1409 }
1410 
1411 
1412 /*
1413  * Vnode extattr transaction commit/abort
1414  */
1415 static int
1416 ffs_closeextattr(struct vop_closeextattr_args *ap)
1417 /*
1418 struct vop_closeextattr_args {
1419 	struct vnodeop_desc *a_desc;
1420 	struct vnode *a_vp;
1421 	int a_commit;
1422 	IN struct ucred *a_cred;
1423 	IN struct thread *a_td;
1424 };
1425 */
1426 {
1427 	struct inode *ip;
1428 	struct fs *fs;
1429 
1430 	ip = VTOI(ap->a_vp);
1431 	fs = ip->i_fs;
1432 
1433 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1434 		return (EOPNOTSUPP);
1435 
1436 	if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1437 		return (EROFS);
1438 
1439 	return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1440 }
1441 
1442 /*
1443  * Vnode operation to remove a named attribute.
1444  */
1445 static int
1446 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1447 /*
1448 vop_deleteextattr {
1449 	IN struct vnode *a_vp;
1450 	IN int a_attrnamespace;
1451 	IN const char *a_name;
1452 	IN struct ucred *a_cred;
1453 	IN struct thread *a_td;
1454 };
1455 */
1456 {
1457 	struct inode *ip;
1458 	struct fs *fs;
1459 	uint32_t ealength, ul;
1460 	int ealen, olen, eapad1, eapad2, error, i, easize;
1461 	u_char *eae, *p;
1462 
1463 	ip = VTOI(ap->a_vp);
1464 	fs = ip->i_fs;
1465 
1466 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1467 		return (EOPNOTSUPP);
1468 
1469 	if (strlen(ap->a_name) == 0)
1470 		return (EINVAL);
1471 
1472 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1473 		return (EROFS);
1474 
1475 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1476 	    ap->a_cred, ap->a_td, VWRITE);
1477 	if (error) {
1478 
1479 		/*
1480 		 * ffs_lock_ea is not needed there, because the vnode
1481 		 * must be exclusively locked.
1482 		 */
1483 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1484 			ip->i_ea_error = error;
1485 		return (error);
1486 	}
1487 
1488 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1489 	if (error)
1490 		return (error);
1491 
1492 	ealength = eapad1 = ealen = eapad2 = 0;
1493 
1494 	eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1495 	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1496 	easize = ip->i_ea_len;
1497 
1498 	olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1499 	    &p, NULL);
1500 	if (olen == -1) {
1501 		/* delete but nonexistent */
1502 		free(eae, M_TEMP);
1503 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1504 		return(ENOATTR);
1505 	}
1506 	bcopy(p, &ul, sizeof ul);
1507 	i = p - eae + ul;
1508 	if (ul != ealength) {
1509 		bcopy(p + ul, p + ealength, easize - i);
1510 		easize += (ealength - ul);
1511 	}
1512 	if (easize > NXADDR * fs->fs_bsize) {
1513 		free(eae, M_TEMP);
1514 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1515 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1516 			ip->i_ea_error = ENOSPC;
1517 		return(ENOSPC);
1518 	}
1519 	p = ip->i_ea_area;
1520 	ip->i_ea_area = eae;
1521 	ip->i_ea_len = easize;
1522 	free(p, M_TEMP);
1523 	error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1524 	return(error);
1525 }
1526 
1527 /*
1528  * Vnode operation to retrieve a named extended attribute.
1529  */
1530 static int
1531 ffs_getextattr(struct vop_getextattr_args *ap)
1532 /*
1533 vop_getextattr {
1534 	IN struct vnode *a_vp;
1535 	IN int a_attrnamespace;
1536 	IN const char *a_name;
1537 	INOUT struct uio *a_uio;
1538 	OUT size_t *a_size;
1539 	IN struct ucred *a_cred;
1540 	IN struct thread *a_td;
1541 };
1542 */
1543 {
1544 	struct inode *ip;
1545 	struct fs *fs;
1546 	u_char *eae, *p;
1547 	unsigned easize;
1548 	int error, ealen;
1549 
1550 	ip = VTOI(ap->a_vp);
1551 	fs = ip->i_fs;
1552 
1553 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1554 		return (EOPNOTSUPP);
1555 
1556 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1557 	    ap->a_cred, ap->a_td, VREAD);
1558 	if (error)
1559 		return (error);
1560 
1561 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1562 	if (error)
1563 		return (error);
1564 
1565 	eae = ip->i_ea_area;
1566 	easize = ip->i_ea_len;
1567 
1568 	ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1569 	    NULL, &p);
1570 	if (ealen >= 0) {
1571 		error = 0;
1572 		if (ap->a_size != NULL)
1573 			*ap->a_size = ealen;
1574 		else if (ap->a_uio != NULL)
1575 			error = uiomove(p, ealen, ap->a_uio);
1576 	} else
1577 		error = ENOATTR;
1578 
1579 	ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1580 	return(error);
1581 }
1582 
1583 /*
1584  * Vnode operation to retrieve extended attributes on a vnode.
1585  */
1586 static int
1587 ffs_listextattr(struct vop_listextattr_args *ap)
1588 /*
1589 vop_listextattr {
1590 	IN struct vnode *a_vp;
1591 	IN int a_attrnamespace;
1592 	INOUT struct uio *a_uio;
1593 	OUT size_t *a_size;
1594 	IN struct ucred *a_cred;
1595 	IN struct thread *a_td;
1596 };
1597 */
1598 {
1599 	struct inode *ip;
1600 	struct fs *fs;
1601 	u_char *eae, *p, *pe, *pn;
1602 	unsigned easize;
1603 	uint32_t ul;
1604 	int error, ealen;
1605 
1606 	ip = VTOI(ap->a_vp);
1607 	fs = ip->i_fs;
1608 
1609 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1610 		return (EOPNOTSUPP);
1611 
1612 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1613 	    ap->a_cred, ap->a_td, VREAD);
1614 	if (error)
1615 		return (error);
1616 
1617 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1618 	if (error)
1619 		return (error);
1620 	eae = ip->i_ea_area;
1621 	easize = ip->i_ea_len;
1622 
1623 	error = 0;
1624 	if (ap->a_size != NULL)
1625 		*ap->a_size = 0;
1626 	pe = eae + easize;
1627 	for(p = eae; error == 0 && p < pe; p = pn) {
1628 		bcopy(p, &ul, sizeof(ul));
1629 		pn = p + ul;
1630 		if (pn > pe)
1631 			break;
1632 		p += sizeof(ul);
1633 		if (*p++ != ap->a_attrnamespace)
1634 			continue;
1635 		p++;	/* pad2 */
1636 		ealen = *p;
1637 		if (ap->a_size != NULL) {
1638 			*ap->a_size += ealen + 1;
1639 		} else if (ap->a_uio != NULL) {
1640 			error = uiomove(p, ealen + 1, ap->a_uio);
1641 		}
1642 	}
1643 	ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1644 	return(error);
1645 }
1646 
1647 /*
1648  * Vnode operation to set a named attribute.
1649  */
1650 static int
1651 ffs_setextattr(struct vop_setextattr_args *ap)
1652 /*
1653 vop_setextattr {
1654 	IN struct vnode *a_vp;
1655 	IN int a_attrnamespace;
1656 	IN const char *a_name;
1657 	INOUT struct uio *a_uio;
1658 	IN struct ucred *a_cred;
1659 	IN struct thread *a_td;
1660 };
1661 */
1662 {
1663 	struct inode *ip;
1664 	struct fs *fs;
1665 	uint32_t ealength, ul;
1666 	int ealen, olen, eapad1, eapad2, error, i, easize;
1667 	u_char *eae, *p;
1668 
1669 	ip = VTOI(ap->a_vp);
1670 	fs = ip->i_fs;
1671 
1672 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1673 		return (EOPNOTSUPP);
1674 
1675 	if (strlen(ap->a_name) == 0)
1676 		return (EINVAL);
1677 
1678 	/* XXX Now unsupported API to delete EAs using NULL uio. */
1679 	if (ap->a_uio == NULL)
1680 		return (EOPNOTSUPP);
1681 
1682 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1683 		return (EROFS);
1684 
1685 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1686 	    ap->a_cred, ap->a_td, VWRITE);
1687 	if (error) {
1688 
1689 		/*
1690 		 * ffs_lock_ea is not needed there, because the vnode
1691 		 * must be exclusively locked.
1692 		 */
1693 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1694 			ip->i_ea_error = error;
1695 		return (error);
1696 	}
1697 
1698 	error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1699 	if (error)
1700 		return (error);
1701 
1702 	ealen = ap->a_uio->uio_resid;
1703 	ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1704 	eapad1 = 8 - (ealength % 8);
1705 	if (eapad1 == 8)
1706 		eapad1 = 0;
1707 	eapad2 = 8 - (ealen % 8);
1708 	if (eapad2 == 8)
1709 		eapad2 = 0;
1710 	ealength += eapad1 + ealen + eapad2;
1711 
1712 	eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1713 	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1714 	easize = ip->i_ea_len;
1715 
1716 	olen = ffs_findextattr(eae, easize,
1717 	    ap->a_attrnamespace, ap->a_name, &p, NULL);
1718         if (olen == -1) {
1719 		/* new, append at end */
1720 		p = eae + easize;
1721 		easize += ealength;
1722 	} else {
1723 		bcopy(p, &ul, sizeof ul);
1724 		i = p - eae + ul;
1725 		if (ul != ealength) {
1726 			bcopy(p + ul, p + ealength, easize - i);
1727 			easize += (ealength - ul);
1728 		}
1729 	}
1730 	if (easize > NXADDR * fs->fs_bsize) {
1731 		free(eae, M_TEMP);
1732 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1733 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1734 			ip->i_ea_error = ENOSPC;
1735 		return(ENOSPC);
1736 	}
1737 	bcopy(&ealength, p, sizeof(ealength));
1738 	p += sizeof(ealength);
1739 	*p++ = ap->a_attrnamespace;
1740 	*p++ = eapad2;
1741 	*p++ = strlen(ap->a_name);
1742 	strcpy(p, ap->a_name);
1743 	p += strlen(ap->a_name);
1744 	bzero(p, eapad1);
1745 	p += eapad1;
1746 	error = uiomove(p, ealen, ap->a_uio);
1747 	if (error) {
1748 		free(eae, M_TEMP);
1749 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1750 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1751 			ip->i_ea_error = error;
1752 		return(error);
1753 	}
1754 	p += ealen;
1755 	bzero(p, eapad2);
1756 
1757 	p = ip->i_ea_area;
1758 	ip->i_ea_area = eae;
1759 	ip->i_ea_len = easize;
1760 	free(p, M_TEMP);
1761 	error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1762 	return(error);
1763 }
1764 
1765 /*
1766  * Vnode pointer to File handle
1767  */
1768 static int
1769 ffs_vptofh(struct vop_vptofh_args *ap)
1770 /*
1771 vop_vptofh {
1772 	IN struct vnode *a_vp;
1773 	IN struct fid *a_fhp;
1774 };
1775 */
1776 {
1777 	struct inode *ip;
1778 	struct ufid *ufhp;
1779 
1780 	ip = VTOI(ap->a_vp);
1781 	ufhp = (struct ufid *)ap->a_fhp;
1782 	ufhp->ufid_len = sizeof(struct ufid);
1783 	ufhp->ufid_ino = ip->i_number;
1784 	ufhp->ufid_gen = ip->i_gen;
1785 	return (0);
1786 }
1787