xref: /freebsd/sys/kern/vfs_vnops.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/stat.h>
47 #include <sys/proc.h>
48 #include <sys/mount.h>
49 #include <sys/mutex.h>
50 #include <sys/namei.h>
51 #include <sys/vnode.h>
52 #include <sys/bio.h>
53 #include <sys/buf.h>
54 #include <sys/filio.h>
55 #include <sys/ttycom.h>
56 #include <sys/conf.h>
57 
58 #include <machine/limits.h>
59 
60 static int vn_closefile __P((struct file *fp, struct thread *td));
61 static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
62 		struct thread *td));
63 static int vn_read __P((struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags, struct thread *td));
65 static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
66 		struct thread *td));
67 static int vn_kqfilter __P((struct file *fp, struct knote *kn));
68 static int vn_statfile __P((struct file *fp, struct stat *sb, struct thread *td));
69 static int vn_write __P((struct file *fp, struct uio *uio,
70 		struct ucred *cred, int flags, struct thread *td));
71 
72 struct 	fileops vnops = {
73 	vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
74 	vn_statfile, vn_closefile
75 };
76 
77 /*
78  * Common code for vnode open operations.
79  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
80  *
81  * Note that this does NOT free nameidata for the successful case,
82  * due to the NDINIT being done elsewhere.
83  */
84 int
85 vn_open(ndp, flagp, cmode)
86 	register struct nameidata *ndp;
87 	int *flagp, cmode;
88 {
89 	struct vnode *vp;
90 	struct mount *mp;
91 	struct thread *td = ndp->ni_cnd.cn_thread;
92 	struct ucred *cred = td->td_proc->p_ucred;
93 	struct vattr vat;
94 	struct vattr *vap = &vat;
95 	int mode, fmode, error;
96 
97 restart:
98 	fmode = *flagp;
99 	if (fmode & O_CREAT) {
100 		ndp->ni_cnd.cn_nameiop = CREATE;
101 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
102 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
103 			ndp->ni_cnd.cn_flags |= FOLLOW;
104 		bwillwrite();
105 		if ((error = namei(ndp)) != 0)
106 			return (error);
107 		if (ndp->ni_vp == NULL) {
108 			VATTR_NULL(vap);
109 			vap->va_type = VREG;
110 			vap->va_mode = cmode;
111 			if (fmode & O_EXCL)
112 				vap->va_vaflags |= VA_EXCLUSIVE;
113 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
114 				NDFREE(ndp, NDF_ONLY_PNBUF);
115 				vput(ndp->ni_dvp);
116 				if ((error = vn_start_write(NULL, &mp,
117 				    V_XSLEEP | PCATCH)) != 0)
118 					return (error);
119 				goto restart;
120 			}
121 			VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
122 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
123 					   &ndp->ni_cnd, vap);
124 			vput(ndp->ni_dvp);
125 			vn_finished_write(mp);
126 			if (error) {
127 				NDFREE(ndp, NDF_ONLY_PNBUF);
128 				return (error);
129 			}
130 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
131 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
132 			fmode &= ~O_TRUNC;
133 			vp = ndp->ni_vp;
134 		} else {
135 			if (ndp->ni_dvp == ndp->ni_vp)
136 				vrele(ndp->ni_dvp);
137 			else
138 				vput(ndp->ni_dvp);
139 			ndp->ni_dvp = NULL;
140 			vp = ndp->ni_vp;
141 			if (fmode & O_EXCL) {
142 				error = EEXIST;
143 				goto bad;
144 			}
145 			fmode &= ~O_CREAT;
146 		}
147 	} else {
148 		ndp->ni_cnd.cn_nameiop = LOOKUP;
149 		ndp->ni_cnd.cn_flags =
150 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
151 		if ((error = namei(ndp)) != 0)
152 			return (error);
153 		vp = ndp->ni_vp;
154 	}
155 	if (vp->v_type == VLNK) {
156 		error = EMLINK;
157 		goto bad;
158 	}
159 	if (vp->v_type == VSOCK) {
160 		error = EOPNOTSUPP;
161 		goto bad;
162 	}
163 	if ((fmode & O_CREAT) == 0) {
164 		mode = 0;
165 		if (fmode & (FWRITE | O_TRUNC)) {
166 			if (vp->v_type == VDIR) {
167 				error = EISDIR;
168 				goto bad;
169 			}
170 			error = vn_writechk(vp);
171 			if (error)
172 				goto bad;
173 			mode |= VWRITE;
174 		}
175 		if (fmode & FREAD)
176 			mode |= VREAD;
177 		if (mode) {
178 		        error = VOP_ACCESS(vp, mode, cred, td);
179 			if (error)
180 				goto bad;
181 		}
182 	}
183 	if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0)
184 		goto bad;
185 	/*
186 	 * Make sure that a VM object is created for VMIO support.
187 	 */
188 	if (vn_canvmio(vp) == TRUE) {
189 		if ((error = vfs_object_create(vp, td, cred)) != 0)
190 			goto bad;
191 	}
192 
193 	if (fmode & FWRITE)
194 		vp->v_writecount++;
195 	*flagp = fmode;
196 	return (0);
197 bad:
198 	NDFREE(ndp, NDF_ONLY_PNBUF);
199 	vput(vp);
200 	*flagp = fmode;
201 	return (error);
202 }
203 
204 /*
205  * Check for write permissions on the specified vnode.
206  * Prototype text segments cannot be written.
207  */
208 int
209 vn_writechk(vp)
210 	register struct vnode *vp;
211 {
212 
213 	/*
214 	 * If there's shared text associated with
215 	 * the vnode, try to free it up once.  If
216 	 * we fail, we can't allow writing.
217 	 */
218 	if (vp->v_flag & VTEXT)
219 		return (ETXTBSY);
220 	return (0);
221 }
222 
223 /*
224  * Vnode close call
225  */
226 int
227 vn_close(vp, flags, cred, td)
228 	register struct vnode *vp;
229 	int flags;
230 	struct ucred *cred;
231 	struct thread *td;
232 {
233 	int error;
234 
235 	if (flags & FWRITE)
236 		vp->v_writecount--;
237 	error = VOP_CLOSE(vp, flags, cred, td);
238 	/*
239 	 * XXX - In certain instances VOP_CLOSE has to do the vrele
240 	 * itself. If the vrele has been done, it will return EAGAIN
241 	 * to indicate that the vrele should not be done again. When
242 	 * this happens, we just return success. The correct thing to
243 	 * do would be to have all VOP_CLOSE instances do the vrele.
244 	 */
245 	if (error == EAGAIN)
246 		return (0);
247 	vrele(vp);
248 	return (error);
249 }
250 
251 static __inline
252 int
253 sequential_heuristic(struct uio *uio, struct file *fp)
254 {
255 	/*
256 	 * Sequential heuristic - detect sequential operation
257 	 */
258 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
259 	    uio->uio_offset == fp->f_nextoff) {
260 		/*
261 		 * XXX we assume that the filesystem block size is
262 		 * the default.  Not true, but still gives us a pretty
263 		 * good indicator of how sequential the read operations
264 		 * are.
265 		 */
266 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
267 		if (fp->f_seqcount >= 127)
268 			fp->f_seqcount = 127;
269 		return(fp->f_seqcount << 16);
270 	}
271 
272 	/*
273 	 * Not sequential, quick draw-down of seqcount
274 	 */
275 	if (fp->f_seqcount > 1)
276 		fp->f_seqcount = 1;
277 	else
278 		fp->f_seqcount = 0;
279 	return(0);
280 }
281 
282 /*
283  * Package up an I/O request on a vnode into a uio and do it.
284  */
285 int
286 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
287 	enum uio_rw rw;
288 	struct vnode *vp;
289 	caddr_t base;
290 	int len;
291 	off_t offset;
292 	enum uio_seg segflg;
293 	int ioflg;
294 	struct ucred *cred;
295 	int *aresid;
296 	struct thread *td;
297 {
298 	struct uio auio;
299 	struct iovec aiov;
300 	struct mount *mp;
301 	int error;
302 
303 	if ((ioflg & IO_NODELOCKED) == 0) {
304 		mp = NULL;
305 		if (rw == UIO_WRITE &&
306 		    vp->v_type != VCHR &&
307 		    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
308 			return (error);
309 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
310 	}
311 	auio.uio_iov = &aiov;
312 	auio.uio_iovcnt = 1;
313 	aiov.iov_base = base;
314 	aiov.iov_len = len;
315 	auio.uio_resid = len;
316 	auio.uio_offset = offset;
317 	auio.uio_segflg = segflg;
318 	auio.uio_rw = rw;
319 	auio.uio_td = td;
320 	if (rw == UIO_READ) {
321 		error = VOP_READ(vp, &auio, ioflg, cred);
322 	} else {
323 		error = VOP_WRITE(vp, &auio, ioflg, cred);
324 	}
325 	if (aresid)
326 		*aresid = auio.uio_resid;
327 	else
328 		if (auio.uio_resid && error == 0)
329 			error = EIO;
330 	if ((ioflg & IO_NODELOCKED) == 0) {
331 		vn_finished_write(mp);
332 		VOP_UNLOCK(vp, 0, td);
333 	}
334 	return (error);
335 }
336 
337 /*
338  * Package up an I/O request on a vnode into a uio and do it.  The I/O
339  * request is split up into smaller chunks and we try to avoid saturating
340  * the buffer cache while potentially holding a vnode locked, so we
341  * check bwillwrite() before calling vn_rdwr()
342  */
343 int
344 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
345 	enum uio_rw rw;
346 	struct vnode *vp;
347 	caddr_t base;
348 	int len;
349 	off_t offset;
350 	enum uio_seg segflg;
351 	int ioflg;
352 	struct ucred *cred;
353 	int *aresid;
354 	struct thread *td;
355 {
356 	int error = 0;
357 
358 	do {
359 		int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
360 
361 		if (rw != UIO_READ && vp->v_type == VREG)
362 			bwillwrite();
363 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
364 		    ioflg, cred, aresid, td);
365 		len -= chunk;	/* aresid calc already includes length */
366 		if (error)
367 			break;
368 		offset += chunk;
369 		base += chunk;
370 	} while (len);
371 	if (aresid)
372 		*aresid += len;
373 	return (error);
374 }
375 
376 /*
377  * File table vnode read routine.
378  */
379 static int
380 vn_read(fp, uio, cred, flags, td)
381 	struct file *fp;
382 	struct uio *uio;
383 	struct ucred *cred;
384 	struct thread *td;
385 	int flags;
386 {
387 	struct vnode *vp;
388 	int error, ioflag;
389 
390 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
391 	    uio->uio_td, td));
392 	vp = (struct vnode *)fp->f_data;
393 	ioflag = 0;
394 	if (fp->f_flag & FNONBLOCK)
395 		ioflag |= IO_NDELAY;
396 	if (fp->f_flag & O_DIRECT)
397 		ioflag |= IO_DIRECT;
398 	VOP_LEASE(vp, td, cred, LEASE_READ);
399 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
400 	if ((flags & FOF_OFFSET) == 0)
401 		uio->uio_offset = fp->f_offset;
402 
403 	ioflag |= sequential_heuristic(uio, fp);
404 
405 	error = VOP_READ(vp, uio, ioflag, cred);
406 	if ((flags & FOF_OFFSET) == 0)
407 		fp->f_offset = uio->uio_offset;
408 	fp->f_nextoff = uio->uio_offset;
409 	VOP_UNLOCK(vp, 0, td);
410 	return (error);
411 }
412 
413 /*
414  * File table vnode write routine.
415  */
416 static int
417 vn_write(fp, uio, cred, flags, td)
418 	struct file *fp;
419 	struct uio *uio;
420 	struct ucred *cred;
421 	struct thread *td;
422 	int flags;
423 {
424 	struct vnode *vp;
425 	struct mount *mp;
426 	int error, ioflag;
427 
428 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
429 	    uio->uio_td, td));
430 	vp = (struct vnode *)fp->f_data;
431 	if (vp->v_type == VREG)
432 		bwillwrite();
433 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
434 	ioflag = IO_UNIT;
435 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
436 		ioflag |= IO_APPEND;
437 	if (fp->f_flag & FNONBLOCK)
438 		ioflag |= IO_NDELAY;
439 	if (fp->f_flag & O_DIRECT)
440 		ioflag |= IO_DIRECT;
441 	if ((fp->f_flag & O_FSYNC) ||
442 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
443 		ioflag |= IO_SYNC;
444 	mp = NULL;
445 	if (vp->v_type != VCHR &&
446 	    (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
447 		return (error);
448 	VOP_LEASE(vp, td, cred, LEASE_WRITE);
449 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
450 	if ((flags & FOF_OFFSET) == 0)
451 		uio->uio_offset = fp->f_offset;
452 	ioflag |= sequential_heuristic(uio, fp);
453 	error = VOP_WRITE(vp, uio, ioflag, cred);
454 	if ((flags & FOF_OFFSET) == 0)
455 		fp->f_offset = uio->uio_offset;
456 	fp->f_nextoff = uio->uio_offset;
457 	VOP_UNLOCK(vp, 0, td);
458 	vn_finished_write(mp);
459 	return (error);
460 }
461 
462 /*
463  * File table vnode stat routine.
464  */
465 static int
466 vn_statfile(fp, sb, td)
467 	struct file *fp;
468 	struct stat *sb;
469 	struct thread *td;
470 {
471 	struct vnode *vp = (struct vnode *)fp->f_data;
472 
473 	return vn_stat(vp, sb, td);
474 }
475 
476 int
477 vn_stat(vp, sb, td)
478 	struct vnode *vp;
479 	register struct stat *sb;
480 	struct thread *td;
481 {
482 	struct vattr vattr;
483 	register struct vattr *vap;
484 	int error;
485 	u_short mode;
486 
487 	vap = &vattr;
488 	error = VOP_GETATTR(vp, vap, td->td_proc->p_ucred, td);
489 	if (error)
490 		return (error);
491 
492 	/*
493 	 * Zero the spare stat fields
494 	 */
495 	sb->st_lspare = 0;
496 	sb->st_qspare[0] = 0;
497 	sb->st_qspare[1] = 0;
498 
499 	/*
500 	 * Copy from vattr table
501 	 */
502 	if (vap->va_fsid != VNOVAL)
503 		sb->st_dev = vap->va_fsid;
504 	else
505 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
506 	sb->st_ino = vap->va_fileid;
507 	mode = vap->va_mode;
508 	switch (vap->va_type) {
509 	case VREG:
510 		mode |= S_IFREG;
511 		break;
512 	case VDIR:
513 		mode |= S_IFDIR;
514 		break;
515 	case VBLK:
516 		mode |= S_IFBLK;
517 		break;
518 	case VCHR:
519 		mode |= S_IFCHR;
520 		break;
521 	case VLNK:
522 		mode |= S_IFLNK;
523 		/* This is a cosmetic change, symlinks do not have a mode. */
524 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
525 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
526 		else
527 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
528 		break;
529 	case VSOCK:
530 		mode |= S_IFSOCK;
531 		break;
532 	case VFIFO:
533 		mode |= S_IFIFO;
534 		break;
535 	default:
536 		return (EBADF);
537 	};
538 	sb->st_mode = mode;
539 	sb->st_nlink = vap->va_nlink;
540 	sb->st_uid = vap->va_uid;
541 	sb->st_gid = vap->va_gid;
542 	sb->st_rdev = vap->va_rdev;
543 	if (vap->va_size > OFF_MAX)
544 		return (EOVERFLOW);
545 	sb->st_size = vap->va_size;
546 	sb->st_atimespec = vap->va_atime;
547 	sb->st_mtimespec = vap->va_mtime;
548 	sb->st_ctimespec = vap->va_ctime;
549 
550         /*
551 	 * According to www.opengroup.org, the meaning of st_blksize is
552 	 *   "a filesystem-specific preferred I/O block size for this
553 	 *    object.  In some filesystem types, this may vary from file
554 	 *    to file"
555 	 * Default to zero to catch bogus uses of this field.
556 	 */
557 
558 	if (vap->va_type == VREG) {
559 		sb->st_blksize = vap->va_blocksize;
560 	} else if (vn_isdisk(vp, NULL)) {
561 		sb->st_blksize = vp->v_rdev->si_bsize_best;
562 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
563 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
564 		if (sb->st_blksize < BLKDEV_IOSIZE)
565 			sb->st_blksize = BLKDEV_IOSIZE;
566 	} else {
567 		sb->st_blksize = 0;
568 	}
569 
570 	sb->st_flags = vap->va_flags;
571 	if (suser_xxx(td->td_proc->p_ucred, 0, 0))
572 		sb->st_gen = 0;
573 	else
574 		sb->st_gen = vap->va_gen;
575 
576 #if (S_BLKSIZE == 512)
577 	/* Optimize this case */
578 	sb->st_blocks = vap->va_bytes >> 9;
579 #else
580 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
581 #endif
582 	return (0);
583 }
584 
585 /*
586  * File table vnode ioctl routine.
587  */
588 static int
589 vn_ioctl(fp, com, data, td)
590 	struct file *fp;
591 	u_long com;
592 	caddr_t data;
593 	struct thread *td;
594 {
595 	register struct vnode *vp = ((struct vnode *)fp->f_data);
596 	struct vattr vattr;
597 	int error;
598 
599 	switch (vp->v_type) {
600 
601 	case VREG:
602 	case VDIR:
603 		if (com == FIONREAD) {
604 			error = VOP_GETATTR(vp, &vattr, td->td_proc->p_ucred, td);
605 			if (error)
606 				return (error);
607 			*(int *)data = vattr.va_size - fp->f_offset;
608 			return (0);
609 		}
610 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
611 			return (0);			/* XXX */
612 		/* fall into ... */
613 
614 	default:
615 #if 0
616 		return (ENOTTY);
617 #endif
618 	case VFIFO:
619 	case VCHR:
620 	case VBLK:
621 		if (com == FIODTYPE) {
622 			if (vp->v_type != VCHR && vp->v_type != VBLK)
623 				return (ENOTTY);
624 			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
625 			return (0);
626 		}
627 		error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_proc->p_ucred, td);
628 		if (error == 0 && com == TIOCSCTTY) {
629 
630 			/* Do nothing if reassigning same control tty */
631 			if (td->td_proc->p_session->s_ttyvp == vp)
632 				return (0);
633 
634 			/* Get rid of reference to old control tty */
635 			if (td->td_proc->p_session->s_ttyvp)
636 				vrele(td->td_proc->p_session->s_ttyvp);
637 
638 			td->td_proc->p_session->s_ttyvp = vp;
639 			VREF(vp);
640 		}
641 		return (error);
642 	}
643 }
644 
645 /*
646  * File table vnode poll routine.
647  */
648 static int
649 vn_poll(fp, events, cred, td)
650 	struct file *fp;
651 	int events;
652 	struct ucred *cred;
653 	struct thread *td;
654 {
655 
656 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
657 }
658 
659 /*
660  * Check that the vnode is still valid, and if so
661  * acquire requested lock.
662  */
663 int
664 #ifndef	DEBUG_LOCKS
665 vn_lock(vp, flags, td)
666 #else
667 debug_vn_lock(vp, flags, td, filename, line)
668 #endif
669 	struct vnode *vp;
670 	int flags;
671 	struct thread *td;
672 #ifdef	DEBUG_LOCKS
673 	const char *filename;
674 	int line;
675 #endif
676 {
677 	int error;
678 
679 	do {
680 		if ((flags & LK_INTERLOCK) == 0)
681 			mtx_lock(&vp->v_interlock);
682 		if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) {
683 			vp->v_flag |= VXWANT;
684 			msleep(vp, &vp->v_interlock, PINOD | PDROP,
685 			    "vn_lock", 0);
686 			error = ENOENT;
687 		} else {
688 			if (vp->v_vxproc != NULL)
689 				printf("VXLOCK interlock avoided in vn_lock\n");
690 #ifdef	DEBUG_LOCKS
691 			vp->filename = filename;
692 			vp->line = line;
693 #endif
694 			error = VOP_LOCK(vp,
695 				    flags | LK_NOPAUSE | LK_INTERLOCK, td);
696 			if (error == 0)
697 				return (error);
698 		}
699 		flags &= ~LK_INTERLOCK;
700 	} while (flags & LK_RETRY);
701 	return (error);
702 }
703 
704 /*
705  * File table vnode close routine.
706  */
707 static int
708 vn_closefile(fp, td)
709 	struct file *fp;
710 	struct thread *td;
711 {
712 
713 	fp->f_ops = &badfileops;
714 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
715 		fp->f_cred, td));
716 }
717 
718 /*
719  * Preparing to start a filesystem write operation. If the operation is
720  * permitted, then we bump the count of operations in progress and
721  * proceed. If a suspend request is in progress, we wait until the
722  * suspension is over, and then proceed.
723  */
724 int
725 vn_start_write(vp, mpp, flags)
726 	struct vnode *vp;
727 	struct mount **mpp;
728 	int flags;
729 {
730 	struct mount *mp;
731 	int error;
732 
733 	/*
734 	 * If a vnode is provided, get and return the mount point that
735 	 * to which it will write.
736 	 */
737 	if (vp != NULL) {
738 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
739 			*mpp = NULL;
740 			if (error != EOPNOTSUPP)
741 				return (error);
742 			return (0);
743 		}
744 	}
745 	if ((mp = *mpp) == NULL)
746 		return (0);
747 	/*
748 	 * Check on status of suspension.
749 	 */
750 	while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
751 		if (flags & V_NOWAIT)
752 			return (EWOULDBLOCK);
753 		error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
754 		    "suspfs", 0);
755 		if (error)
756 			return (error);
757 	}
758 	if (flags & V_XSLEEP)
759 		return (0);
760 	mp->mnt_writeopcount++;
761 	return (0);
762 }
763 
764 /*
765  * Secondary suspension. Used by operations such as vop_inactive
766  * routines that are needed by the higher level functions. These
767  * are allowed to proceed until all the higher level functions have
768  * completed (indicated by mnt_writeopcount dropping to zero). At that
769  * time, these operations are halted until the suspension is over.
770  */
771 int
772 vn_write_suspend_wait(vp, mp, flags)
773 	struct vnode *vp;
774 	struct mount *mp;
775 	int flags;
776 {
777 	int error;
778 
779 	if (vp != NULL) {
780 		if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) {
781 			if (error != EOPNOTSUPP)
782 				return (error);
783 			return (0);
784 		}
785 	}
786 	/*
787 	 * If we are not suspended or have not yet reached suspended
788 	 * mode, then let the operation proceed.
789 	 */
790 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0)
791 		return (0);
792 	if (flags & V_NOWAIT)
793 		return (EWOULDBLOCK);
794 	/*
795 	 * Wait for the suspension to finish.
796 	 */
797 	return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH),
798 	    "suspfs", 0));
799 }
800 
801 /*
802  * Filesystem write operation has completed. If we are suspending and this
803  * operation is the last one, notify the suspender that the suspension is
804  * now in effect.
805  */
806 void
807 vn_finished_write(mp)
808 	struct mount *mp;
809 {
810 
811 	if (mp == NULL)
812 		return;
813 	mp->mnt_writeopcount--;
814 	if (mp->mnt_writeopcount < 0)
815 		panic("vn_finished_write: neg cnt");
816 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
817 	    mp->mnt_writeopcount <= 0)
818 		wakeup(&mp->mnt_writeopcount);
819 }
820 
821 /*
822  * Request a filesystem to suspend write operations.
823  */
824 void
825 vfs_write_suspend(mp)
826 	struct mount *mp;
827 {
828 	struct thread *td = curthread;
829 
830 	if (mp->mnt_kern_flag & MNTK_SUSPEND)
831 		return;
832 	mp->mnt_kern_flag |= MNTK_SUSPEND;
833 	if (mp->mnt_writeopcount > 0)
834 		(void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0);
835 	VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
836 	mp->mnt_kern_flag |= MNTK_SUSPENDED;
837 }
838 
839 /*
840  * Request a filesystem to resume write operations.
841  */
842 void
843 vfs_write_resume(mp)
844 	struct mount *mp;
845 {
846 
847 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0)
848 		return;
849 	mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED);
850 	wakeup(&mp->mnt_writeopcount);
851 	wakeup(&mp->mnt_flag);
852 }
853 
854 static int
855 vn_kqfilter(struct file *fp, struct knote *kn)
856 {
857 
858 	return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
859 }
860 
861 /*
862  * Simplified in-kernel wrapper calls for extended attribute access.
863  * Both calls pass in a NULL credential, authorizing as "kernel" access.
864  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
865  */
866 int
867 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
868     const char *attrname, int *buflen, char *buf, struct thread *td)
869 {
870 	struct uio	auio;
871 	struct iovec	iov;
872 	int	error;
873 
874 	iov.iov_len = *buflen;
875 	iov.iov_base = buf;
876 
877 	auio.uio_iov = &iov;
878 	auio.uio_iovcnt = 1;
879 	auio.uio_rw = UIO_READ;
880 	auio.uio_segflg = UIO_SYSSPACE;
881 	auio.uio_td = td;
882 	auio.uio_offset = 0;
883 	auio.uio_resid = *buflen;
884 
885 	if ((ioflg & IO_NODELOCKED) == 0)
886 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
887 
888 	/* authorize attribute retrieval as kernel */
889 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
890 
891 	if ((ioflg & IO_NODELOCKED) == 0)
892 		VOP_UNLOCK(vp, 0, td);
893 
894 	if (error == 0) {
895 		*buflen = *buflen - auio.uio_resid;
896 	}
897 
898 	return (error);
899 }
900 
901 /*
902  * XXX failure mode if partially written?
903  */
904 int
905 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
906     const char *attrname, int buflen, char *buf, struct thread *td)
907 {
908 	struct uio	auio;
909 	struct iovec	iov;
910 	struct mount	*mp;
911 	int	error;
912 
913 	iov.iov_len = buflen;
914 	iov.iov_base = buf;
915 
916 	auio.uio_iov = &iov;
917 	auio.uio_iovcnt = 1;
918 	auio.uio_rw = UIO_WRITE;
919 	auio.uio_segflg = UIO_SYSSPACE;
920 	auio.uio_td = td;
921 	auio.uio_offset = 0;
922 	auio.uio_resid = buflen;
923 
924 	if ((ioflg & IO_NODELOCKED) == 0) {
925 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
926 			return (error);
927 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
928 	}
929 
930 	/* authorize attribute setting as kernel */
931 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
932 
933 	if ((ioflg & IO_NODELOCKED) == 0) {
934 		vn_finished_write(mp);
935 		VOP_UNLOCK(vp, 0, td);
936 	}
937 
938 	return (error);
939 }
940 
941 int
942 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
943     const char *attrname, struct thread *td)
944 {
945 	struct mount	*mp;
946 	int	error;
947 
948 	if ((ioflg & IO_NODELOCKED) == 0) {
949 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
950 			return (error);
951 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
952 	}
953 
954 	/* authorize attribute removal as kernel */
955 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td);
956 
957 	if ((ioflg & IO_NODELOCKED) == 0) {
958 		vn_finished_write(mp);
959 		VOP_UNLOCK(vp, 0, td);
960 	}
961 
962 	return (error);
963 }
964