xref: /freebsd/sys/ufs/ffs/ffs_vnops.c (revision 39beb93c3f8bdbf72a61fda42300b5ebed7390c8)
1 /*-
2  * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
3  * All rights reserved.
4  *
5  * This software was developed for the FreeBSD Project by Marshall
6  * Kirk McKusick and Network Associates Laboratories, the Security
7  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
9  * research program
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Copyright (c) 1982, 1986, 1989, 1993
33  *	The Regents of the University of California.  All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  * 4. Neither the name of the University nor the names of its contributors
44  *    may be used to endorse or promote products derived from this software
45  *    without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57  * SUCH DAMAGE.
58  *
59  *	from: @(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
60  * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61  *	@(#)ffs_vnops.c	8.15 (Berkeley) 5/14/95
62  */
63 
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
66 
67 #include <sys/param.h>
68 #include <sys/bio.h>
69 #include <sys/systm.h>
70 #include <sys/buf.h>
71 #include <sys/conf.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
77 #include <sys/priv.h>
78 #include <sys/proc.h>
79 #include <sys/resourcevar.h>
80 #include <sys/signalvar.h>
81 #include <sys/stat.h>
82 #include <sys/vmmeter.h>
83 #include <sys/vnode.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vnode_pager.h>
91 
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
97 
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include "opt_directio.h"
101 #include "opt_ffs.h"
102 
103 #ifdef DIRECTIO
104 extern int	ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
105 #endif
106 static vop_fsync_t	ffs_fsync;
107 static vop_lock1_t	ffs_lock;
108 static vop_getpages_t	ffs_getpages;
109 static vop_read_t	ffs_read;
110 static vop_write_t	ffs_write;
111 static int	ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
112 static int	ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
113 		    struct ucred *cred);
114 static vop_strategy_t	ffsext_strategy;
115 static vop_closeextattr_t	ffs_closeextattr;
116 static vop_deleteextattr_t	ffs_deleteextattr;
117 static vop_getextattr_t	ffs_getextattr;
118 static vop_listextattr_t	ffs_listextattr;
119 static vop_openextattr_t	ffs_openextattr;
120 static vop_setextattr_t	ffs_setextattr;
121 static vop_vptofh_t	ffs_vptofh;
122 
123 
124 /* Global vfs data structures for ufs. */
125 struct vop_vector ffs_vnodeops1 = {
126 	.vop_default =		&ufs_vnodeops,
127 	.vop_fsync =		ffs_fsync,
128 	.vop_getpages =		ffs_getpages,
129 	.vop_lock1 =		ffs_lock,
130 	.vop_read =		ffs_read,
131 	.vop_reallocblks =	ffs_reallocblks,
132 	.vop_write =		ffs_write,
133 	.vop_vptofh =		ffs_vptofh,
134 };
135 
136 struct vop_vector ffs_fifoops1 = {
137 	.vop_default =		&ufs_fifoops,
138 	.vop_fsync =		ffs_fsync,
139 	.vop_reallocblks =	ffs_reallocblks, /* XXX: really ??? */
140 	.vop_vptofh =		ffs_vptofh,
141 };
142 
143 /* Global vfs data structures for ufs. */
144 struct vop_vector ffs_vnodeops2 = {
145 	.vop_default =		&ufs_vnodeops,
146 	.vop_fsync =		ffs_fsync,
147 	.vop_getpages =		ffs_getpages,
148 	.vop_lock1 =		ffs_lock,
149 	.vop_read =		ffs_read,
150 	.vop_reallocblks =	ffs_reallocblks,
151 	.vop_write =		ffs_write,
152 	.vop_closeextattr =	ffs_closeextattr,
153 	.vop_deleteextattr =	ffs_deleteextattr,
154 	.vop_getextattr =	ffs_getextattr,
155 	.vop_listextattr =	ffs_listextattr,
156 	.vop_openextattr =	ffs_openextattr,
157 	.vop_setextattr =	ffs_setextattr,
158 	.vop_vptofh =		ffs_vptofh,
159 };
160 
161 struct vop_vector ffs_fifoops2 = {
162 	.vop_default =		&ufs_fifoops,
163 	.vop_fsync =		ffs_fsync,
164 	.vop_lock1 =		ffs_lock,
165 	.vop_reallocblks =	ffs_reallocblks,
166 	.vop_strategy =		ffsext_strategy,
167 	.vop_closeextattr =	ffs_closeextattr,
168 	.vop_deleteextattr =	ffs_deleteextattr,
169 	.vop_getextattr =	ffs_getextattr,
170 	.vop_listextattr =	ffs_listextattr,
171 	.vop_openextattr =	ffs_openextattr,
172 	.vop_setextattr =	ffs_setextattr,
173 	.vop_vptofh =		ffs_vptofh,
174 };
175 
176 /*
177  * Synch an open file.
178  */
179 /* ARGSUSED */
180 static int
181 ffs_fsync(struct vop_fsync_args *ap)
182 {
183 	int error;
184 
185 	error = ffs_syncvnode(ap->a_vp, ap->a_waitfor);
186 	if (error)
187 		return (error);
188 	if (ap->a_waitfor == MNT_WAIT &&
189 	    (ap->a_vp->v_mount->mnt_flag & MNT_SOFTDEP))
190                 error = softdep_fsync(ap->a_vp);
191 	return (error);
192 }
193 
194 int
195 ffs_syncvnode(struct vnode *vp, int waitfor)
196 {
197 	struct inode *ip = VTOI(vp);
198 	struct bufobj *bo;
199 	struct buf *bp;
200 	struct buf *nbp;
201 	int s, error, wait, passes, skipmeta;
202 	ufs_lbn_t lbn;
203 
204 	wait = (waitfor == MNT_WAIT);
205 	lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1));
206 	bo = &vp->v_bufobj;
207 
208 	/*
209 	 * Flush all dirty buffers associated with a vnode.
210 	 */
211 	passes = NIADDR + 1;
212 	skipmeta = 0;
213 	if (wait)
214 		skipmeta = 1;
215 	s = splbio();
216 	BO_LOCK(bo);
217 loop:
218 	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
219 		bp->b_vflags &= ~BV_SCANNED;
220 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
221 		/*
222 		 * Reasons to skip this buffer: it has already been considered
223 		 * on this pass, this pass is the first time through on a
224 		 * synchronous flush request and the buffer being considered
225 		 * is metadata, the buffer has dependencies that will cause
226 		 * it to be redirtied and it has not already been deferred,
227 		 * or it is already being written.
228 		 */
229 		if ((bp->b_vflags & BV_SCANNED) != 0)
230 			continue;
231 		bp->b_vflags |= BV_SCANNED;
232 		if ((skipmeta == 1 && bp->b_lblkno < 0))
233 			continue;
234 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
235 			continue;
236 		BO_UNLOCK(bo);
237 		if (!wait && !LIST_EMPTY(&bp->b_dep) &&
238 		    (bp->b_flags & B_DEFERRED) == 0 &&
239 		    buf_countdeps(bp, 0)) {
240 			bp->b_flags |= B_DEFERRED;
241 			BUF_UNLOCK(bp);
242 			BO_LOCK(bo);
243 			continue;
244 		}
245 		if ((bp->b_flags & B_DELWRI) == 0)
246 			panic("ffs_fsync: not dirty");
247 		/*
248 		 * If this is a synchronous flush request, or it is not a
249 		 * file or device, start the write on this buffer immediately.
250 		 */
251 		if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) {
252 
253 			/*
254 			 * On our final pass through, do all I/O synchronously
255 			 * so that we can find out if our flush is failing
256 			 * because of write errors.
257 			 */
258 			if (passes > 0 || !wait) {
259 				if ((bp->b_flags & B_CLUSTEROK) && !wait) {
260 					(void) vfs_bio_awrite(bp);
261 				} else {
262 					bremfree(bp);
263 					splx(s);
264 					(void) bawrite(bp);
265 					s = splbio();
266 				}
267 			} else {
268 				bremfree(bp);
269 				splx(s);
270 				if ((error = bwrite(bp)) != 0)
271 					return (error);
272 				s = splbio();
273 			}
274 		} else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) {
275 			/*
276 			 * If the buffer is for data that has been truncated
277 			 * off the file, then throw it away.
278 			 */
279 			bremfree(bp);
280 			bp->b_flags |= B_INVAL | B_NOCACHE;
281 			splx(s);
282 			brelse(bp);
283 			s = splbio();
284 		} else
285 			vfs_bio_awrite(bp);
286 
287 		/*
288 		 * Since we may have slept during the I/O, we need
289 		 * to start from a known point.
290 		 */
291 		BO_LOCK(bo);
292 		nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
293 	}
294 	/*
295 	 * If we were asked to do this synchronously, then go back for
296 	 * another pass, this time doing the metadata.
297 	 */
298 	if (skipmeta) {
299 		skipmeta = 0;
300 		goto loop;
301 	}
302 
303 	if (wait) {
304 		bufobj_wwait(bo, 3, 0);
305 		BO_UNLOCK(bo);
306 
307 		/*
308 		 * Ensure that any filesystem metatdata associated
309 		 * with the vnode has been written.
310 		 */
311 		splx(s);
312 		if ((error = softdep_sync_metadata(vp)) != 0)
313 			return (error);
314 		s = splbio();
315 
316 		BO_LOCK(bo);
317 		if (bo->bo_dirty.bv_cnt > 0) {
318 			/*
319 			 * Block devices associated with filesystems may
320 			 * have new I/O requests posted for them even if
321 			 * the vnode is locked, so no amount of trying will
322 			 * get them clean. Thus we give block devices a
323 			 * good effort, then just give up. For all other file
324 			 * types, go around and try again until it is clean.
325 			 */
326 			if (passes > 0) {
327 				passes -= 1;
328 				goto loop;
329 			}
330 #ifdef INVARIANTS
331 			if (!vn_isdisk(vp, NULL))
332 				vprint("ffs_fsync: dirty", vp);
333 #endif
334 		}
335 	}
336 	BO_UNLOCK(bo);
337 	splx(s);
338 	return (ffs_update(vp, wait));
339 }
340 
341 static int
342 ffs_lock(ap)
343 	struct vop_lock1_args /* {
344 		struct vnode *a_vp;
345 		int a_flags;
346 		struct thread *a_td;
347 		char *file;
348 		int line;
349 	} */ *ap;
350 {
351 #ifndef NO_FFS_SNAPSHOT
352 	struct vnode *vp;
353 	int flags;
354 	struct lock *lkp;
355 	int result;
356 
357 	switch (ap->a_flags & LK_TYPE_MASK) {
358 	case LK_SHARED:
359 	case LK_UPGRADE:
360 	case LK_EXCLUSIVE:
361 		vp = ap->a_vp;
362 		flags = ap->a_flags;
363 		for (;;) {
364 #ifdef DEBUG_VFS_LOCKS
365 			KASSERT(vp->v_holdcnt != 0,
366 			    ("ffs_lock %p: zero hold count", vp));
367 #endif
368 			lkp = vp->v_vnlock;
369 			result = _lockmgr_args(lkp, flags, VI_MTX(vp),
370 			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
371 			    ap->a_file, ap->a_line);
372 			if (lkp == vp->v_vnlock || result != 0)
373 				break;
374 			/*
375 			 * Apparent success, except that the vnode
376 			 * mutated between snapshot file vnode and
377 			 * regular file vnode while this process
378 			 * slept.  The lock currently held is not the
379 			 * right lock.  Release it, and try to get the
380 			 * new lock.
381 			 */
382 			(void) _lockmgr_args(lkp, LK_RELEASE, NULL,
383 			    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
384 			    ap->a_file, ap->a_line);
385 			if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
386 			    (LK_INTERLOCK | LK_NOWAIT))
387 				return (EBUSY);
388 			if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
389 				flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
390 			flags &= ~LK_INTERLOCK;
391 		}
392 		break;
393 	default:
394 		result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
395 	}
396 	return (result);
397 #else
398 	return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
399 #endif
400 }
401 
402 /*
403  * Vnode op for reading.
404  */
405 /* ARGSUSED */
406 static int
407 ffs_read(ap)
408 	struct vop_read_args /* {
409 		struct vnode *a_vp;
410 		struct uio *a_uio;
411 		int a_ioflag;
412 		struct ucred *a_cred;
413 	} */ *ap;
414 {
415 	struct vnode *vp;
416 	struct inode *ip;
417 	struct uio *uio;
418 	struct fs *fs;
419 	struct buf *bp;
420 	ufs_lbn_t lbn, nextlbn;
421 	off_t bytesinfile;
422 	long size, xfersize, blkoffset;
423 	int error, orig_resid;
424 	int seqcount;
425 	int ioflag;
426 
427 	vp = ap->a_vp;
428 	uio = ap->a_uio;
429 	ioflag = ap->a_ioflag;
430 	if (ap->a_ioflag & IO_EXT)
431 #ifdef notyet
432 		return (ffs_extread(vp, uio, ioflag));
433 #else
434 		panic("ffs_read+IO_EXT");
435 #endif
436 #ifdef DIRECTIO
437 	if ((ioflag & IO_DIRECT) != 0) {
438 		int workdone;
439 
440 		error = ffs_rawread(vp, uio, &workdone);
441 		if (error != 0 || workdone != 0)
442 			return error;
443 	}
444 #endif
445 
446 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
447 	ip = VTOI(vp);
448 
449 #ifdef INVARIANTS
450 	if (uio->uio_rw != UIO_READ)
451 		panic("ffs_read: mode");
452 
453 	if (vp->v_type == VLNK) {
454 		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
455 			panic("ffs_read: short symlink");
456 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
457 		panic("ffs_read: type %d",  vp->v_type);
458 #endif
459 	orig_resid = uio->uio_resid;
460 	KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
461 	if (orig_resid == 0)
462 		return (0);
463 	KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
464 	fs = ip->i_fs;
465 	if (uio->uio_offset < ip->i_size &&
466 	    uio->uio_offset >= fs->fs_maxfilesize)
467 		return (EOVERFLOW);
468 
469 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
470 		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
471 			break;
472 		lbn = lblkno(fs, uio->uio_offset);
473 		nextlbn = lbn + 1;
474 
475 		/*
476 		 * size of buffer.  The buffer representing the
477 		 * end of the file is rounded up to the size of
478 		 * the block type ( fragment or full block,
479 		 * depending ).
480 		 */
481 		size = blksize(fs, ip, lbn);
482 		blkoffset = blkoff(fs, uio->uio_offset);
483 
484 		/*
485 		 * The amount we want to transfer in this iteration is
486 		 * one FS block less the amount of the data before
487 		 * our startpoint (duh!)
488 		 */
489 		xfersize = fs->fs_bsize - blkoffset;
490 
491 		/*
492 		 * But if we actually want less than the block,
493 		 * or the file doesn't have a whole block more of data,
494 		 * then use the lesser number.
495 		 */
496 		if (uio->uio_resid < xfersize)
497 			xfersize = uio->uio_resid;
498 		if (bytesinfile < xfersize)
499 			xfersize = bytesinfile;
500 
501 		if (lblktosize(fs, nextlbn) >= ip->i_size) {
502 			/*
503 			 * Don't do readahead if this is the end of the file.
504 			 */
505 			error = bread(vp, lbn, size, NOCRED, &bp);
506 		} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
507 			/*
508 			 * Otherwise if we are allowed to cluster,
509 			 * grab as much as we can.
510 			 *
511 			 * XXX  This may not be a win if we are not
512 			 * doing sequential access.
513 			 */
514 			error = cluster_read(vp, ip->i_size, lbn,
515 				size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp);
516 		} else if (seqcount > 1) {
517 			/*
518 			 * If we are NOT allowed to cluster, then
519 			 * if we appear to be acting sequentially,
520 			 * fire off a request for a readahead
521 			 * as well as a read. Note that the 4th and 5th
522 			 * arguments point to arrays of the size specified in
523 			 * the 6th argument.
524 			 */
525 			int nextsize = blksize(fs, ip, nextlbn);
526 			error = breadn(vp, lbn,
527 			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
528 		} else {
529 			/*
530 			 * Failing all of the above, just read what the
531 			 * user asked for. Interestingly, the same as
532 			 * the first option above.
533 			 */
534 			error = bread(vp, lbn, size, NOCRED, &bp);
535 		}
536 		if (error) {
537 			brelse(bp);
538 			bp = NULL;
539 			break;
540 		}
541 
542 		/*
543 		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
544 		 * will cause us to attempt to release the buffer later on
545 		 * and will cause the buffer cache to attempt to free the
546 		 * underlying pages.
547 		 */
548 		if (ioflag & IO_DIRECT)
549 			bp->b_flags |= B_DIRECT;
550 
551 		/*
552 		 * We should only get non-zero b_resid when an I/O error
553 		 * has occurred, which should cause us to break above.
554 		 * However, if the short read did not cause an error,
555 		 * then we want to ensure that we do not uiomove bad
556 		 * or uninitialized data.
557 		 */
558 		size -= bp->b_resid;
559 		if (size < xfersize) {
560 			if (size == 0)
561 				break;
562 			xfersize = size;
563 		}
564 
565 		error = uiomove((char *)bp->b_data + blkoffset,
566 		    (int)xfersize, uio);
567 		if (error)
568 			break;
569 
570 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
571 		   (LIST_EMPTY(&bp->b_dep))) {
572 			/*
573 			 * If there are no dependencies, and it's VMIO,
574 			 * then we don't need the buf, mark it available
575 			 * for freeing. The VM has the data.
576 			 */
577 			bp->b_flags |= B_RELBUF;
578 			brelse(bp);
579 		} else {
580 			/*
581 			 * Otherwise let whoever
582 			 * made the request take care of
583 			 * freeing it. We just queue
584 			 * it onto another list.
585 			 */
586 			bqrelse(bp);
587 		}
588 	}
589 
590 	/*
591 	 * This can only happen in the case of an error
592 	 * because the loop above resets bp to NULL on each iteration
593 	 * and on normal completion has not set a new value into it.
594 	 * so it must have come from a 'break' statement
595 	 */
596 	if (bp != NULL) {
597 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
598 		   (LIST_EMPTY(&bp->b_dep))) {
599 			bp->b_flags |= B_RELBUF;
600 			brelse(bp);
601 		} else {
602 			bqrelse(bp);
603 		}
604 	}
605 
606 	if ((error == 0 || uio->uio_resid != orig_resid) &&
607 	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 &&
608 	    (ip->i_flag & IN_ACCESS) == 0) {
609 		VI_LOCK(vp);
610 		ip->i_flag |= IN_ACCESS;
611 		VI_UNLOCK(vp);
612 	}
613 	return (error);
614 }
615 
616 /*
617  * Vnode op for writing.
618  */
619 static int
620 ffs_write(ap)
621 	struct vop_write_args /* {
622 		struct vnode *a_vp;
623 		struct uio *a_uio;
624 		int a_ioflag;
625 		struct ucred *a_cred;
626 	} */ *ap;
627 {
628 	struct vnode *vp;
629 	struct uio *uio;
630 	struct inode *ip;
631 	struct fs *fs;
632 	struct buf *bp;
633 	struct thread *td;
634 	ufs_lbn_t lbn;
635 	off_t osize;
636 	int seqcount;
637 	int blkoffset, error, flags, ioflag, resid, size, xfersize;
638 
639 	vp = ap->a_vp;
640 	uio = ap->a_uio;
641 	ioflag = ap->a_ioflag;
642 	if (ap->a_ioflag & IO_EXT)
643 #ifdef notyet
644 		return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
645 #else
646 		panic("ffs_write+IO_EXT");
647 #endif
648 
649 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
650 	ip = VTOI(vp);
651 
652 #ifdef INVARIANTS
653 	if (uio->uio_rw != UIO_WRITE)
654 		panic("ffs_write: mode");
655 #endif
656 
657 	switch (vp->v_type) {
658 	case VREG:
659 		if (ioflag & IO_APPEND)
660 			uio->uio_offset = ip->i_size;
661 		if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
662 			return (EPERM);
663 		/* FALLTHROUGH */
664 	case VLNK:
665 		break;
666 	case VDIR:
667 		panic("ffs_write: dir write");
668 		break;
669 	default:
670 		panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
671 			(int)uio->uio_offset,
672 			(int)uio->uio_resid
673 		);
674 	}
675 
676 	KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
677 	KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
678 	fs = ip->i_fs;
679 	if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
680 		return (EFBIG);
681 	/*
682 	 * Maybe this should be above the vnode op call, but so long as
683 	 * file servers have no limits, I don't think it matters.
684 	 */
685 	td = uio->uio_td;
686 	if (vp->v_type == VREG && td != NULL) {
687 		PROC_LOCK(td->td_proc);
688 		if (uio->uio_offset + uio->uio_resid >
689 		    lim_cur(td->td_proc, RLIMIT_FSIZE)) {
690 			psignal(td->td_proc, SIGXFSZ);
691 			PROC_UNLOCK(td->td_proc);
692 			return (EFBIG);
693 		}
694 		PROC_UNLOCK(td->td_proc);
695 	}
696 
697 	resid = uio->uio_resid;
698 	osize = ip->i_size;
699 	if (seqcount > BA_SEQMAX)
700 		flags = BA_SEQMAX << BA_SEQSHIFT;
701 	else
702 		flags = seqcount << BA_SEQSHIFT;
703 	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
704 		flags |= IO_SYNC;
705 
706 	for (error = 0; uio->uio_resid > 0;) {
707 		lbn = lblkno(fs, uio->uio_offset);
708 		blkoffset = blkoff(fs, uio->uio_offset);
709 		xfersize = fs->fs_bsize - blkoffset;
710 		if (uio->uio_resid < xfersize)
711 			xfersize = uio->uio_resid;
712 		if (uio->uio_offset + xfersize > ip->i_size)
713 			vnode_pager_setsize(vp, uio->uio_offset + xfersize);
714 
715                 /*
716 		 * We must perform a read-before-write if the transfer size
717 		 * does not cover the entire buffer.
718                  */
719 		if (fs->fs_bsize > xfersize)
720 			flags |= BA_CLRBUF;
721 		else
722 			flags &= ~BA_CLRBUF;
723 /* XXX is uio->uio_offset the right thing here? */
724 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
725 		    ap->a_cred, flags, &bp);
726 		if (error != 0) {
727 			vnode_pager_setsize(vp, ip->i_size);
728 			break;
729 		}
730 		/*
731 		 * If the buffer is not valid we have to clear out any
732 		 * garbage data from the pages instantiated for the buffer.
733 		 * If we do not, a failed uiomove() during a write can leave
734 		 * the prior contents of the pages exposed to a userland
735 		 * mmap().  XXX deal with uiomove() errors a better way.
736 		 */
737 		if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
738 			vfs_bio_clrbuf(bp);
739 		if (ioflag & IO_DIRECT)
740 			bp->b_flags |= B_DIRECT;
741 		if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
742 			bp->b_flags |= B_NOCACHE;
743 
744 		if (uio->uio_offset + xfersize > ip->i_size) {
745 			ip->i_size = uio->uio_offset + xfersize;
746 			DIP_SET(ip, i_size, ip->i_size);
747 		}
748 
749 		size = blksize(fs, ip, lbn) - bp->b_resid;
750 		if (size < xfersize)
751 			xfersize = size;
752 
753 		error =
754 		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
755 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
756 		   (LIST_EMPTY(&bp->b_dep))) {
757 			bp->b_flags |= B_RELBUF;
758 		}
759 
760 		/*
761 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
762 		 * if we have a severe page deficiency write the buffer
763 		 * asynchronously.  Otherwise try to cluster, and if that
764 		 * doesn't do it then either do an async write (if O_DIRECT),
765 		 * or a delayed write (if not).
766 		 */
767 		if (ioflag & IO_SYNC) {
768 			(void)bwrite(bp);
769 		} else if (vm_page_count_severe() ||
770 			    buf_dirty_count_severe() ||
771 			    (ioflag & IO_ASYNC)) {
772 			bp->b_flags |= B_CLUSTEROK;
773 			bawrite(bp);
774 		} else if (xfersize + blkoffset == fs->fs_bsize) {
775 			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
776 				bp->b_flags |= B_CLUSTEROK;
777 				cluster_write(vp, bp, ip->i_size, seqcount);
778 			} else {
779 				bawrite(bp);
780 			}
781 		} else if (ioflag & IO_DIRECT) {
782 			bp->b_flags |= B_CLUSTEROK;
783 			bawrite(bp);
784 		} else {
785 			bp->b_flags |= B_CLUSTEROK;
786 			bdwrite(bp);
787 		}
788 		if (error || xfersize == 0)
789 			break;
790 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
791 	}
792 	/*
793 	 * If we successfully wrote any data, and we are not the superuser
794 	 * we clear the setuid and setgid bits as a precaution against
795 	 * tampering.
796 	 */
797 	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
798 	    ap->a_cred) {
799 		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
800 			ip->i_mode &= ~(ISUID | ISGID);
801 			DIP_SET(ip, i_mode, ip->i_mode);
802 		}
803 	}
804 	if (error) {
805 		if (ioflag & IO_UNIT) {
806 			(void)ffs_truncate(vp, osize,
807 			    IO_NORMAL | (ioflag & IO_SYNC),
808 			    ap->a_cred, uio->uio_td);
809 			uio->uio_offset -= resid - uio->uio_resid;
810 			uio->uio_resid = resid;
811 		}
812 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
813 		error = ffs_update(vp, 1);
814 	return (error);
815 }
816 
817 /*
818  * get page routine
819  */
820 static int
821 ffs_getpages(ap)
822 	struct vop_getpages_args *ap;
823 {
824 	int i;
825 	vm_page_t mreq;
826 	int pcount;
827 
828 	pcount = round_page(ap->a_count) / PAGE_SIZE;
829 	mreq = ap->a_m[ap->a_reqpage];
830 
831 	/*
832 	 * if ANY DEV_BSIZE blocks are valid on a large filesystem block,
833 	 * then the entire page is valid.  Since the page may be mapped,
834 	 * user programs might reference data beyond the actual end of file
835 	 * occuring within the page.  We have to zero that data.
836 	 */
837 	VM_OBJECT_LOCK(mreq->object);
838 	if (mreq->valid) {
839 		if (mreq->valid != VM_PAGE_BITS_ALL)
840 			vm_page_zero_invalid(mreq, TRUE);
841 		vm_page_lock_queues();
842 		for (i = 0; i < pcount; i++) {
843 			if (i != ap->a_reqpage) {
844 				vm_page_free(ap->a_m[i]);
845 			}
846 		}
847 		vm_page_unlock_queues();
848 		VM_OBJECT_UNLOCK(mreq->object);
849 		return VM_PAGER_OK;
850 	}
851 	VM_OBJECT_UNLOCK(mreq->object);
852 
853 	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
854 					    ap->a_count,
855 					    ap->a_reqpage);
856 }
857 
858 
859 /*
860  * Extended attribute area reading.
861  */
862 static int
863 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
864 {
865 	struct inode *ip;
866 	struct ufs2_dinode *dp;
867 	struct fs *fs;
868 	struct buf *bp;
869 	ufs_lbn_t lbn, nextlbn;
870 	off_t bytesinfile;
871 	long size, xfersize, blkoffset;
872 	int error, orig_resid;
873 
874 	ip = VTOI(vp);
875 	fs = ip->i_fs;
876 	dp = ip->i_din2;
877 
878 #ifdef INVARIANTS
879 	if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
880 		panic("ffs_extread: mode");
881 
882 #endif
883 	orig_resid = uio->uio_resid;
884 	KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
885 	if (orig_resid == 0)
886 		return (0);
887 	KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
888 
889 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
890 		if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
891 			break;
892 		lbn = lblkno(fs, uio->uio_offset);
893 		nextlbn = lbn + 1;
894 
895 		/*
896 		 * size of buffer.  The buffer representing the
897 		 * end of the file is rounded up to the size of
898 		 * the block type ( fragment or full block,
899 		 * depending ).
900 		 */
901 		size = sblksize(fs, dp->di_extsize, lbn);
902 		blkoffset = blkoff(fs, uio->uio_offset);
903 
904 		/*
905 		 * The amount we want to transfer in this iteration is
906 		 * one FS block less the amount of the data before
907 		 * our startpoint (duh!)
908 		 */
909 		xfersize = fs->fs_bsize - blkoffset;
910 
911 		/*
912 		 * But if we actually want less than the block,
913 		 * or the file doesn't have a whole block more of data,
914 		 * then use the lesser number.
915 		 */
916 		if (uio->uio_resid < xfersize)
917 			xfersize = uio->uio_resid;
918 		if (bytesinfile < xfersize)
919 			xfersize = bytesinfile;
920 
921 		if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
922 			/*
923 			 * Don't do readahead if this is the end of the info.
924 			 */
925 			error = bread(vp, -1 - lbn, size, NOCRED, &bp);
926 		} else {
927 			/*
928 			 * If we have a second block, then
929 			 * fire off a request for a readahead
930 			 * as well as a read. Note that the 4th and 5th
931 			 * arguments point to arrays of the size specified in
932 			 * the 6th argument.
933 			 */
934 			int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
935 
936 			nextlbn = -1 - nextlbn;
937 			error = breadn(vp, -1 - lbn,
938 			    size, &nextlbn, &nextsize, 1, NOCRED, &bp);
939 		}
940 		if (error) {
941 			brelse(bp);
942 			bp = NULL;
943 			break;
944 		}
945 
946 		/*
947 		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
948 		 * will cause us to attempt to release the buffer later on
949 		 * and will cause the buffer cache to attempt to free the
950 		 * underlying pages.
951 		 */
952 		if (ioflag & IO_DIRECT)
953 			bp->b_flags |= B_DIRECT;
954 
955 		/*
956 		 * We should only get non-zero b_resid when an I/O error
957 		 * has occurred, which should cause us to break above.
958 		 * However, if the short read did not cause an error,
959 		 * then we want to ensure that we do not uiomove bad
960 		 * or uninitialized data.
961 		 */
962 		size -= bp->b_resid;
963 		if (size < xfersize) {
964 			if (size == 0)
965 				break;
966 			xfersize = size;
967 		}
968 
969 		error = uiomove((char *)bp->b_data + blkoffset,
970 					(int)xfersize, uio);
971 		if (error)
972 			break;
973 
974 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
975 		   (LIST_EMPTY(&bp->b_dep))) {
976 			/*
977 			 * If there are no dependencies, and it's VMIO,
978 			 * then we don't need the buf, mark it available
979 			 * for freeing. The VM has the data.
980 			 */
981 			bp->b_flags |= B_RELBUF;
982 			brelse(bp);
983 		} else {
984 			/*
985 			 * Otherwise let whoever
986 			 * made the request take care of
987 			 * freeing it. We just queue
988 			 * it onto another list.
989 			 */
990 			bqrelse(bp);
991 		}
992 	}
993 
994 	/*
995 	 * This can only happen in the case of an error
996 	 * because the loop above resets bp to NULL on each iteration
997 	 * and on normal completion has not set a new value into it.
998 	 * so it must have come from a 'break' statement
999 	 */
1000 	if (bp != NULL) {
1001 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1002 		   (LIST_EMPTY(&bp->b_dep))) {
1003 			bp->b_flags |= B_RELBUF;
1004 			brelse(bp);
1005 		} else {
1006 			bqrelse(bp);
1007 		}
1008 	}
1009 	return (error);
1010 }
1011 
1012 /*
1013  * Extended attribute area writing.
1014  */
1015 static int
1016 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
1017 {
1018 	struct inode *ip;
1019 	struct ufs2_dinode *dp;
1020 	struct fs *fs;
1021 	struct buf *bp;
1022 	ufs_lbn_t lbn;
1023 	off_t osize;
1024 	int blkoffset, error, flags, resid, size, xfersize;
1025 
1026 	ip = VTOI(vp);
1027 	fs = ip->i_fs;
1028 	dp = ip->i_din2;
1029 
1030 	KASSERT(!(ip->i_flag & IN_SPACECOUNTED), ("inode %u: inode is dead",
1031 	    ip->i_number));
1032 
1033 #ifdef INVARIANTS
1034 	if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
1035 		panic("ffs_extwrite: mode");
1036 #endif
1037 
1038 	if (ioflag & IO_APPEND)
1039 		uio->uio_offset = dp->di_extsize;
1040 	KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1041 	KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1042 	if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize)
1043 		return (EFBIG);
1044 
1045 	resid = uio->uio_resid;
1046 	osize = dp->di_extsize;
1047 	flags = IO_EXT;
1048 	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
1049 		flags |= IO_SYNC;
1050 
1051 	for (error = 0; uio->uio_resid > 0;) {
1052 		lbn = lblkno(fs, uio->uio_offset);
1053 		blkoffset = blkoff(fs, uio->uio_offset);
1054 		xfersize = fs->fs_bsize - blkoffset;
1055 		if (uio->uio_resid < xfersize)
1056 			xfersize = uio->uio_resid;
1057 
1058 		/*
1059 		 * We must perform a read-before-write if the transfer size
1060 		 * does not cover the entire buffer.
1061                  */
1062 		if (fs->fs_bsize > xfersize)
1063 			flags |= BA_CLRBUF;
1064 		else
1065 			flags &= ~BA_CLRBUF;
1066 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1067 		    ucred, flags, &bp);
1068 		if (error != 0)
1069 			break;
1070 		/*
1071 		 * If the buffer is not valid we have to clear out any
1072 		 * garbage data from the pages instantiated for the buffer.
1073 		 * If we do not, a failed uiomove() during a write can leave
1074 		 * the prior contents of the pages exposed to a userland
1075 		 * mmap().  XXX deal with uiomove() errors a better way.
1076 		 */
1077 		if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1078 			vfs_bio_clrbuf(bp);
1079 		if (ioflag & IO_DIRECT)
1080 			bp->b_flags |= B_DIRECT;
1081 
1082 		if (uio->uio_offset + xfersize > dp->di_extsize)
1083 			dp->di_extsize = uio->uio_offset + xfersize;
1084 
1085 		size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1086 		if (size < xfersize)
1087 			xfersize = size;
1088 
1089 		error =
1090 		    uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1091 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
1092 		   (LIST_EMPTY(&bp->b_dep))) {
1093 			bp->b_flags |= B_RELBUF;
1094 		}
1095 
1096 		/*
1097 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
1098 		 * if we have a severe page deficiency write the buffer
1099 		 * asynchronously.  Otherwise try to cluster, and if that
1100 		 * doesn't do it then either do an async write (if O_DIRECT),
1101 		 * or a delayed write (if not).
1102 		 */
1103 		if (ioflag & IO_SYNC) {
1104 			(void)bwrite(bp);
1105 		} else if (vm_page_count_severe() ||
1106 			    buf_dirty_count_severe() ||
1107 			    xfersize + blkoffset == fs->fs_bsize ||
1108 			    (ioflag & (IO_ASYNC | IO_DIRECT)))
1109 			bawrite(bp);
1110 		else
1111 			bdwrite(bp);
1112 		if (error || xfersize == 0)
1113 			break;
1114 		ip->i_flag |= IN_CHANGE;
1115 	}
1116 	/*
1117 	 * If we successfully wrote any data, and we are not the superuser
1118 	 * we clear the setuid and setgid bits as a precaution against
1119 	 * tampering.
1120 	 */
1121 	if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1122 		if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1123 			ip->i_mode &= ~(ISUID | ISGID);
1124 			dp->di_mode = ip->i_mode;
1125 		}
1126 	}
1127 	if (error) {
1128 		if (ioflag & IO_UNIT) {
1129 			(void)ffs_truncate(vp, osize,
1130 			    IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td);
1131 			uio->uio_offset -= resid - uio->uio_resid;
1132 			uio->uio_resid = resid;
1133 		}
1134 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1135 		error = ffs_update(vp, 1);
1136 	return (error);
1137 }
1138 
1139 
1140 /*
1141  * Vnode operating to retrieve a named extended attribute.
1142  *
1143  * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1144  * the length of the EA, and possibly the pointer to the entry and to the data.
1145  */
1146 static int
1147 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac)
1148 {
1149 	u_char *p, *pe, *pn, *p0;
1150 	int eapad1, eapad2, ealength, ealen, nlen;
1151 	uint32_t ul;
1152 
1153 	pe = ptr + length;
1154 	nlen = strlen(name);
1155 
1156 	for (p = ptr; p < pe; p = pn) {
1157 		p0 = p;
1158 		bcopy(p, &ul, sizeof(ul));
1159 		pn = p + ul;
1160 		/* make sure this entry is complete */
1161 		if (pn > pe)
1162 			break;
1163 		p += sizeof(uint32_t);
1164 		if (*p != nspace)
1165 			continue;
1166 		p++;
1167 		eapad2 = *p++;
1168 		if (*p != nlen)
1169 			continue;
1170 		p++;
1171 		if (bcmp(p, name, nlen))
1172 			continue;
1173 		ealength = sizeof(uint32_t) + 3 + nlen;
1174 		eapad1 = 8 - (ealength % 8);
1175 		if (eapad1 == 8)
1176 			eapad1 = 0;
1177 		ealength += eapad1;
1178 		ealen = ul - ealength - eapad2;
1179 		p += nlen + eapad1;
1180 		if (eap != NULL)
1181 			*eap = p0;
1182 		if (eac != NULL)
1183 			*eac = p;
1184 		return (ealen);
1185 	}
1186 	return(-1);
1187 }
1188 
1189 static int
1190 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1191 {
1192 	struct inode *ip;
1193 	struct ufs2_dinode *dp;
1194 	struct fs *fs;
1195 	struct uio luio;
1196 	struct iovec liovec;
1197 	int easize, error;
1198 	u_char *eae;
1199 
1200 	ip = VTOI(vp);
1201 	fs = ip->i_fs;
1202 	dp = ip->i_din2;
1203 	easize = dp->di_extsize;
1204 	if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize)
1205 		return (EFBIG);
1206 
1207 	eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1208 
1209 	liovec.iov_base = eae;
1210 	liovec.iov_len = easize;
1211 	luio.uio_iov = &liovec;
1212 	luio.uio_iovcnt = 1;
1213 	luio.uio_offset = 0;
1214 	luio.uio_resid = easize;
1215 	luio.uio_segflg = UIO_SYSSPACE;
1216 	luio.uio_rw = UIO_READ;
1217 	luio.uio_td = td;
1218 
1219 	error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1220 	if (error) {
1221 		free(eae, M_TEMP);
1222 		return(error);
1223 	}
1224 	*p = eae;
1225 	return (0);
1226 }
1227 
1228 static int
1229 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1230 {
1231 	struct inode *ip;
1232 	struct ufs2_dinode *dp;
1233 	int error;
1234 
1235 	ip = VTOI(vp);
1236 
1237 	if (ip->i_ea_area != NULL)
1238 		return (EBUSY);
1239 	dp = ip->i_din2;
1240 	error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1241 	if (error)
1242 		return (error);
1243 	ip->i_ea_len = dp->di_extsize;
1244 	ip->i_ea_error = 0;
1245 	return (0);
1246 }
1247 
1248 /*
1249  * Vnode extattr transaction commit/abort
1250  */
1251 static int
1252 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1253 {
1254 	struct inode *ip;
1255 	struct uio luio;
1256 	struct iovec liovec;
1257 	int error;
1258 	struct ufs2_dinode *dp;
1259 
1260 	ip = VTOI(vp);
1261 	if (ip->i_ea_area == NULL)
1262 		return (EINVAL);
1263 	dp = ip->i_din2;
1264 	error = ip->i_ea_error;
1265 	if (commit && error == 0) {
1266 		if (cred == NOCRED)
1267 			cred =  vp->v_mount->mnt_cred;
1268 		liovec.iov_base = ip->i_ea_area;
1269 		liovec.iov_len = ip->i_ea_len;
1270 		luio.uio_iov = &liovec;
1271 		luio.uio_iovcnt = 1;
1272 		luio.uio_offset = 0;
1273 		luio.uio_resid = ip->i_ea_len;
1274 		luio.uio_segflg = UIO_SYSSPACE;
1275 		luio.uio_rw = UIO_WRITE;
1276 		luio.uio_td = td;
1277 		/* XXX: I'm not happy about truncating to zero size */
1278 		if (ip->i_ea_len < dp->di_extsize)
1279 			error = ffs_truncate(vp, 0, IO_EXT, cred, td);
1280 		error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1281 	}
1282 	free(ip->i_ea_area, M_TEMP);
1283 	ip->i_ea_area = NULL;
1284 	ip->i_ea_len = 0;
1285 	ip->i_ea_error = 0;
1286 	return (error);
1287 }
1288 
1289 /*
1290  * Vnode extattr strategy routine for fifos.
1291  *
1292  * We need to check for a read or write of the external attributes.
1293  * Otherwise we just fall through and do the usual thing.
1294  */
1295 static int
1296 ffsext_strategy(struct vop_strategy_args *ap)
1297 /*
1298 struct vop_strategy_args {
1299 	struct vnodeop_desc *a_desc;
1300 	struct vnode *a_vp;
1301 	struct buf *a_bp;
1302 };
1303 */
1304 {
1305 	struct vnode *vp;
1306 	daddr_t lbn;
1307 
1308 	vp = ap->a_vp;
1309 	lbn = ap->a_bp->b_lblkno;
1310 	if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC &&
1311 	    lbn < 0 && lbn >= -NXADDR)
1312 		return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1313 	if (vp->v_type == VFIFO)
1314 		return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1315 	panic("spec nodes went here");
1316 }
1317 
1318 /*
1319  * Vnode extattr transaction commit/abort
1320  */
1321 static int
1322 ffs_openextattr(struct vop_openextattr_args *ap)
1323 /*
1324 struct vop_openextattr_args {
1325 	struct vnodeop_desc *a_desc;
1326 	struct vnode *a_vp;
1327 	IN struct ucred *a_cred;
1328 	IN struct thread *a_td;
1329 };
1330 */
1331 {
1332 	struct inode *ip;
1333 	struct fs *fs;
1334 
1335 	ip = VTOI(ap->a_vp);
1336 	fs = ip->i_fs;
1337 
1338 	if (ap->a_vp->v_type == VCHR)
1339 		return (EOPNOTSUPP);
1340 
1341 	return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1342 }
1343 
1344 
1345 /*
1346  * Vnode extattr transaction commit/abort
1347  */
1348 static int
1349 ffs_closeextattr(struct vop_closeextattr_args *ap)
1350 /*
1351 struct vop_closeextattr_args {
1352 	struct vnodeop_desc *a_desc;
1353 	struct vnode *a_vp;
1354 	int a_commit;
1355 	IN struct ucred *a_cred;
1356 	IN struct thread *a_td;
1357 };
1358 */
1359 {
1360 	struct inode *ip;
1361 	struct fs *fs;
1362 
1363 	ip = VTOI(ap->a_vp);
1364 	fs = ip->i_fs;
1365 
1366 	if (ap->a_vp->v_type == VCHR)
1367 		return (EOPNOTSUPP);
1368 
1369 	if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1370 		return (EROFS);
1371 
1372 	return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1373 }
1374 
1375 /*
1376  * Vnode operation to remove a named attribute.
1377  */
1378 static int
1379 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1380 /*
1381 vop_deleteextattr {
1382 	IN struct vnode *a_vp;
1383 	IN int a_attrnamespace;
1384 	IN const char *a_name;
1385 	IN struct ucred *a_cred;
1386 	IN struct thread *a_td;
1387 };
1388 */
1389 {
1390 	struct inode *ip;
1391 	struct fs *fs;
1392 	uint32_t ealength, ul;
1393 	int ealen, olen, eapad1, eapad2, error, i, easize;
1394 	u_char *eae, *p;
1395 	int stand_alone;
1396 
1397 	ip = VTOI(ap->a_vp);
1398 	fs = ip->i_fs;
1399 
1400 	if (ap->a_vp->v_type == VCHR)
1401 		return (EOPNOTSUPP);
1402 
1403 	if (strlen(ap->a_name) == 0)
1404 		return (EINVAL);
1405 
1406 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1407 		return (EROFS);
1408 
1409 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1410 	    ap->a_cred, ap->a_td, VWRITE);
1411 	if (error) {
1412 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1413 			ip->i_ea_error = error;
1414 		return (error);
1415 	}
1416 
1417 	if (ip->i_ea_area == NULL) {
1418 		error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1419 		if (error)
1420 			return (error);
1421 		stand_alone = 1;
1422 	} else {
1423 		stand_alone = 0;
1424 	}
1425 
1426 	ealength = eapad1 = ealen = eapad2 = 0;
1427 
1428 	eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1429 	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1430 	easize = ip->i_ea_len;
1431 
1432 	olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1433 	    &p, NULL);
1434 	if (olen == -1) {
1435 		/* delete but nonexistent */
1436 		free(eae, M_TEMP);
1437 		if (stand_alone)
1438 			ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1439 		return(ENOATTR);
1440 	}
1441 	bcopy(p, &ul, sizeof ul);
1442 	i = p - eae + ul;
1443 	if (ul != ealength) {
1444 		bcopy(p + ul, p + ealength, easize - i);
1445 		easize += (ealength - ul);
1446 	}
1447 	if (easize > NXADDR * fs->fs_bsize) {
1448 		free(eae, M_TEMP);
1449 		if (stand_alone)
1450 			ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1451 		else if (ip->i_ea_error == 0)
1452 			ip->i_ea_error = ENOSPC;
1453 		return(ENOSPC);
1454 	}
1455 	p = ip->i_ea_area;
1456 	ip->i_ea_area = eae;
1457 	ip->i_ea_len = easize;
1458 	free(p, M_TEMP);
1459 	if (stand_alone)
1460 		error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1461 	return(error);
1462 }
1463 
1464 /*
1465  * Vnode operation to retrieve a named extended attribute.
1466  */
1467 static int
1468 ffs_getextattr(struct vop_getextattr_args *ap)
1469 /*
1470 vop_getextattr {
1471 	IN struct vnode *a_vp;
1472 	IN int a_attrnamespace;
1473 	IN const char *a_name;
1474 	INOUT struct uio *a_uio;
1475 	OUT size_t *a_size;
1476 	IN struct ucred *a_cred;
1477 	IN struct thread *a_td;
1478 };
1479 */
1480 {
1481 	struct inode *ip;
1482 	struct fs *fs;
1483 	u_char *eae, *p;
1484 	unsigned easize;
1485 	int error, ealen, stand_alone;
1486 
1487 	ip = VTOI(ap->a_vp);
1488 	fs = ip->i_fs;
1489 
1490 	if (ap->a_vp->v_type == VCHR)
1491 		return (EOPNOTSUPP);
1492 
1493 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1494 	    ap->a_cred, ap->a_td, VREAD);
1495 	if (error)
1496 		return (error);
1497 
1498 	if (ip->i_ea_area == NULL) {
1499 		error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1500 		if (error)
1501 			return (error);
1502 		stand_alone = 1;
1503 	} else {
1504 		stand_alone = 0;
1505 	}
1506 	eae = ip->i_ea_area;
1507 	easize = ip->i_ea_len;
1508 
1509 	ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1510 	    NULL, &p);
1511 	if (ealen >= 0) {
1512 		error = 0;
1513 		if (ap->a_size != NULL)
1514 			*ap->a_size = ealen;
1515 		else if (ap->a_uio != NULL)
1516 			error = uiomove(p, ealen, ap->a_uio);
1517 	} else
1518 		error = ENOATTR;
1519 	if (stand_alone)
1520 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1521 	return(error);
1522 }
1523 
1524 /*
1525  * Vnode operation to retrieve extended attributes on a vnode.
1526  */
1527 static int
1528 ffs_listextattr(struct vop_listextattr_args *ap)
1529 /*
1530 vop_listextattr {
1531 	IN struct vnode *a_vp;
1532 	IN int a_attrnamespace;
1533 	INOUT struct uio *a_uio;
1534 	OUT size_t *a_size;
1535 	IN struct ucred *a_cred;
1536 	IN struct thread *a_td;
1537 };
1538 */
1539 {
1540 	struct inode *ip;
1541 	struct fs *fs;
1542 	u_char *eae, *p, *pe, *pn;
1543 	unsigned easize;
1544 	uint32_t ul;
1545 	int error, ealen, stand_alone;
1546 
1547 	ip = VTOI(ap->a_vp);
1548 	fs = ip->i_fs;
1549 
1550 	if (ap->a_vp->v_type == VCHR)
1551 		return (EOPNOTSUPP);
1552 
1553 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1554 	    ap->a_cred, ap->a_td, VREAD);
1555 	if (error)
1556 		return (error);
1557 
1558 	if (ip->i_ea_area == NULL) {
1559 		error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1560 		if (error)
1561 			return (error);
1562 		stand_alone = 1;
1563 	} else {
1564 		stand_alone = 0;
1565 	}
1566 	eae = ip->i_ea_area;
1567 	easize = ip->i_ea_len;
1568 
1569 	error = 0;
1570 	if (ap->a_size != NULL)
1571 		*ap->a_size = 0;
1572 	pe = eae + easize;
1573 	for(p = eae; error == 0 && p < pe; p = pn) {
1574 		bcopy(p, &ul, sizeof(ul));
1575 		pn = p + ul;
1576 		if (pn > pe)
1577 			break;
1578 		p += sizeof(ul);
1579 		if (*p++ != ap->a_attrnamespace)
1580 			continue;
1581 		p++;	/* pad2 */
1582 		ealen = *p;
1583 		if (ap->a_size != NULL) {
1584 			*ap->a_size += ealen + 1;
1585 		} else if (ap->a_uio != NULL) {
1586 			error = uiomove(p, ealen + 1, ap->a_uio);
1587 		}
1588 	}
1589 	if (stand_alone)
1590 		ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1591 	return(error);
1592 }
1593 
1594 /*
1595  * Vnode operation to set a named attribute.
1596  */
1597 static int
1598 ffs_setextattr(struct vop_setextattr_args *ap)
1599 /*
1600 vop_setextattr {
1601 	IN struct vnode *a_vp;
1602 	IN int a_attrnamespace;
1603 	IN const char *a_name;
1604 	INOUT struct uio *a_uio;
1605 	IN struct ucred *a_cred;
1606 	IN struct thread *a_td;
1607 };
1608 */
1609 {
1610 	struct inode *ip;
1611 	struct fs *fs;
1612 	uint32_t ealength, ul;
1613 	int ealen, olen, eapad1, eapad2, error, i, easize;
1614 	u_char *eae, *p;
1615 	int stand_alone;
1616 
1617 	ip = VTOI(ap->a_vp);
1618 	fs = ip->i_fs;
1619 
1620 	if (ap->a_vp->v_type == VCHR)
1621 		return (EOPNOTSUPP);
1622 
1623 	if (strlen(ap->a_name) == 0)
1624 		return (EINVAL);
1625 
1626 	/* XXX Now unsupported API to delete EAs using NULL uio. */
1627 	if (ap->a_uio == NULL)
1628 		return (EOPNOTSUPP);
1629 
1630 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1631 		return (EROFS);
1632 
1633 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1634 	    ap->a_cred, ap->a_td, VWRITE);
1635 	if (error) {
1636 		if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1637 			ip->i_ea_error = error;
1638 		return (error);
1639 	}
1640 
1641 	if (ip->i_ea_area == NULL) {
1642 		error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1643 		if (error)
1644 			return (error);
1645 		stand_alone = 1;
1646 	} else {
1647 		stand_alone = 0;
1648 	}
1649 
1650 	ealen = ap->a_uio->uio_resid;
1651 	ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1652 	eapad1 = 8 - (ealength % 8);
1653 	if (eapad1 == 8)
1654 		eapad1 = 0;
1655 	eapad2 = 8 - (ealen % 8);
1656 	if (eapad2 == 8)
1657 		eapad2 = 0;
1658 	ealength += eapad1 + ealen + eapad2;
1659 
1660 	eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1661 	bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1662 	easize = ip->i_ea_len;
1663 
1664 	olen = ffs_findextattr(eae, easize,
1665 	    ap->a_attrnamespace, ap->a_name, &p, NULL);
1666         if (olen == -1) {
1667 		/* new, append at end */
1668 		p = eae + easize;
1669 		easize += ealength;
1670 	} else {
1671 		bcopy(p, &ul, sizeof ul);
1672 		i = p - eae + ul;
1673 		if (ul != ealength) {
1674 			bcopy(p + ul, p + ealength, easize - i);
1675 			easize += (ealength - ul);
1676 		}
1677 	}
1678 	if (easize > NXADDR * fs->fs_bsize) {
1679 		free(eae, M_TEMP);
1680 		if (stand_alone)
1681 			ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1682 		else if (ip->i_ea_error == 0)
1683 			ip->i_ea_error = ENOSPC;
1684 		return(ENOSPC);
1685 	}
1686 	bcopy(&ealength, p, sizeof(ealength));
1687 	p += sizeof(ealength);
1688 	*p++ = ap->a_attrnamespace;
1689 	*p++ = eapad2;
1690 	*p++ = strlen(ap->a_name);
1691 	strcpy(p, ap->a_name);
1692 	p += strlen(ap->a_name);
1693 	bzero(p, eapad1);
1694 	p += eapad1;
1695 	error = uiomove(p, ealen, ap->a_uio);
1696 	if (error) {
1697 		free(eae, M_TEMP);
1698 		if (stand_alone)
1699 			ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1700 		else if (ip->i_ea_error == 0)
1701 			ip->i_ea_error = error;
1702 		return(error);
1703 	}
1704 	p += ealen;
1705 	bzero(p, eapad2);
1706 
1707 	p = ip->i_ea_area;
1708 	ip->i_ea_area = eae;
1709 	ip->i_ea_len = easize;
1710 	free(p, M_TEMP);
1711 	if (stand_alone)
1712 		error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1713 	return(error);
1714 }
1715 
1716 /*
1717  * Vnode pointer to File handle
1718  */
1719 static int
1720 ffs_vptofh(struct vop_vptofh_args *ap)
1721 /*
1722 vop_vptofh {
1723 	IN struct vnode *a_vp;
1724 	IN struct fid *a_fhp;
1725 };
1726 */
1727 {
1728 	struct inode *ip;
1729 	struct ufid *ufhp;
1730 
1731 	ip = VTOI(ap->a_vp);
1732 	ufhp = (struct ufid *)ap->a_fhp;
1733 	ufhp->ufid_len = sizeof(struct ufid);
1734 	ufhp->ufid_ino = ip->i_number;
1735 	ufhp->ufid_gen = ip->i_gen;
1736 	return (0);
1737 }
1738