xref: /freebsd/sys/kern/vfs_vnops.c (revision a79b71281cd63ad7a6cc43a6d5673a2510b51630)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
39  * $FreeBSD$
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/stat.h>
47 #include <sys/proc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/vnode.h>
51 #include <sys/bio.h>
52 #include <sys/buf.h>
53 #include <sys/filio.h>
54 #include <sys/ttycom.h>
55 #include <sys/conf.h>
56 
57 #include <ufs/ufs/quota.h>
58 #include <ufs/ufs/inode.h>
59 
60 static int vn_closefile __P((struct file *fp, struct proc *p));
61 static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
62 		struct proc *p));
63 static int vn_read __P((struct file *fp, struct uio *uio,
64 		struct ucred *cred, int flags, struct proc *p));
65 static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
66 		struct proc *p));
67 static int vn_statfile __P((struct file *fp, struct stat *sb, struct proc *p));
68 static int vn_write __P((struct file *fp, struct uio *uio,
69 		struct ucred *cred, int flags, struct proc *p));
70 
71 struct 	fileops vnops =
72 	{ vn_read, vn_write, vn_ioctl, vn_poll, vn_statfile, vn_closefile };
73 
74 static int	filt_nullattach(struct knote *kn);
75 static int	filt_vnattach(struct knote *kn);
76 static void	filt_vndetach(struct knote *kn);
77 static int	filt_vnode(struct knote *kn, long hint);
78 static int	filt_vnread(struct knote *kn, long hint);
79 
80 struct filterops vn_filtops =
81 	{ 1, filt_vnattach, filt_vndetach, filt_vnode };
82 
83 /*
84  * XXX
85  * filt_vnread is ufs-specific, so the attach routine should really
86  * switch out to different filterops based on the vn filetype
87  */
88 struct filterops vn_rwfiltops[] = {
89 	{ 1, filt_vnattach, filt_vndetach, filt_vnread },
90 	{ 1, filt_nullattach, NULL, NULL },
91 };
92 
93 /*
94  * Common code for vnode open operations.
95  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
96  *
97  * Note that this does NOT free nameidata for the successful case,
98  * due to the NDINIT being done elsewhere.
99  */
100 int
101 vn_open(ndp, fmode, cmode)
102 	register struct nameidata *ndp;
103 	int fmode, cmode;
104 {
105 	register struct vnode *vp;
106 	register struct proc *p = ndp->ni_cnd.cn_proc;
107 	register struct ucred *cred = p->p_ucred;
108 	struct vattr vat;
109 	struct vattr *vap = &vat;
110 	int mode, error;
111 
112 	if (fmode & O_CREAT) {
113 		ndp->ni_cnd.cn_nameiop = CREATE;
114 		ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
115 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
116 			ndp->ni_cnd.cn_flags |= FOLLOW;
117 		bwillwrite();
118 		error = namei(ndp);
119 		if (error)
120 			return (error);
121 		if (ndp->ni_vp == NULL) {
122 			VATTR_NULL(vap);
123 			vap->va_type = VREG;
124 			vap->va_mode = cmode;
125 			if (fmode & O_EXCL)
126 				vap->va_vaflags |= VA_EXCLUSIVE;
127 			VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE);
128 			error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
129 					   &ndp->ni_cnd, vap);
130 			if (error) {
131 				NDFREE(ndp, NDF_ONLY_PNBUF);
132 				vput(ndp->ni_dvp);
133 				return (error);
134 			}
135 			vput(ndp->ni_dvp);
136 			ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
137 			ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
138 			fmode &= ~O_TRUNC;
139 			vp = ndp->ni_vp;
140 		} else {
141 			if (ndp->ni_dvp == ndp->ni_vp)
142 				vrele(ndp->ni_dvp);
143 			else
144 				vput(ndp->ni_dvp);
145 			ndp->ni_dvp = NULL;
146 			vp = ndp->ni_vp;
147 			if (fmode & O_EXCL) {
148 				error = EEXIST;
149 				goto bad;
150 			}
151 			fmode &= ~O_CREAT;
152 		}
153 	} else {
154 		ndp->ni_cnd.cn_nameiop = LOOKUP;
155 		ndp->ni_cnd.cn_flags =
156 		    ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
157 		error = namei(ndp);
158 		if (error)
159 			return (error);
160 		vp = ndp->ni_vp;
161 	}
162 	if (vp->v_type == VLNK) {
163 		error = EMLINK;
164 		goto bad;
165 	}
166 	if (vp->v_type == VSOCK) {
167 		error = EOPNOTSUPP;
168 		goto bad;
169 	}
170 	if ((fmode & O_CREAT) == 0) {
171 		mode = 0;
172 		if (fmode & (FWRITE | O_TRUNC)) {
173 			if (vp->v_type == VDIR) {
174 				error = EISDIR;
175 				goto bad;
176 			}
177 			error = vn_writechk(vp);
178 			if (error)
179 				goto bad;
180 			mode |= VWRITE;
181 		}
182 		if (fmode & FREAD)
183 			mode |= VREAD;
184 		if (mode) {
185 		        error = VOP_ACCESS(vp, mode, cred, p);
186 			if (error)
187 				goto bad;
188 		}
189 	}
190 	if (fmode & O_TRUNC) {
191 		VOP_UNLOCK(vp, 0, p);				/* XXX */
192 		VOP_LEASE(vp, p, cred, LEASE_WRITE);
193 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);	/* XXX */
194 		VATTR_NULL(vap);
195 		vap->va_size = 0;
196 		error = VOP_SETATTR(vp, vap, cred, p);
197 		if (error)
198 			goto bad;
199 	}
200 	error = VOP_OPEN(vp, fmode, cred, p);
201 	if (error)
202 		goto bad;
203 	/*
204 	 * Make sure that a VM object is created for VMIO support.
205 	 */
206 	if (vn_canvmio(vp) == TRUE) {
207 		if ((error = vfs_object_create(vp, p, cred)) != 0)
208 			goto bad;
209 	}
210 
211 	if (fmode & FWRITE)
212 		vp->v_writecount++;
213 	return (0);
214 bad:
215 	NDFREE(ndp, NDF_ONLY_PNBUF);
216 	vput(vp);
217 	return (error);
218 }
219 
220 /*
221  * Check for write permissions on the specified vnode.
222  * Prototype text segments cannot be written.
223  */
224 int
225 vn_writechk(vp)
226 	register struct vnode *vp;
227 {
228 
229 	/*
230 	 * If there's shared text associated with
231 	 * the vnode, try to free it up once.  If
232 	 * we fail, we can't allow writing.
233 	 */
234 	if (vp->v_flag & VTEXT)
235 		return (ETXTBSY);
236 	return (0);
237 }
238 
239 /*
240  * Vnode close call
241  */
242 int
243 vn_close(vp, flags, cred, p)
244 	register struct vnode *vp;
245 	int flags;
246 	struct ucred *cred;
247 	struct proc *p;
248 {
249 	int error;
250 
251 	if (flags & FWRITE)
252 		vp->v_writecount--;
253 	error = VOP_CLOSE(vp, flags, cred, p);
254 	vrele(vp);
255 	return (error);
256 }
257 
258 static __inline
259 int
260 sequential_heuristic(struct uio *uio, struct file *fp)
261 {
262 	/*
263 	 * Sequential heuristic - detect sequential operation
264 	 */
265 	if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
266 	    uio->uio_offset == fp->f_nextoff) {
267 		/*
268 		 * XXX we assume that the filesystem block size is
269 		 * the default.  Not true, but still gives us a pretty
270 		 * good indicator of how sequential the read operations
271 		 * are.
272 		 */
273 		fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
274 		if (fp->f_seqcount >= 127)
275 			fp->f_seqcount = 127;
276 		return(fp->f_seqcount << 16);
277 	}
278 
279 	/*
280 	 * Not sequential, quick draw-down of seqcount
281 	 */
282 	if (fp->f_seqcount > 1)
283 		fp->f_seqcount = 1;
284 	else
285 		fp->f_seqcount = 0;
286 	return(0);
287 }
288 
289 /*
290  * Package up an I/O request on a vnode into a uio and do it.
291  */
292 int
293 vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p)
294 	enum uio_rw rw;
295 	struct vnode *vp;
296 	caddr_t base;
297 	int len;
298 	off_t offset;
299 	enum uio_seg segflg;
300 	int ioflg;
301 	struct ucred *cred;
302 	int *aresid;
303 	struct proc *p;
304 {
305 	struct uio auio;
306 	struct iovec aiov;
307 	int error;
308 
309 	if ((ioflg & IO_NODELOCKED) == 0)
310 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
311 	auio.uio_iov = &aiov;
312 	auio.uio_iovcnt = 1;
313 	aiov.iov_base = base;
314 	aiov.iov_len = len;
315 	auio.uio_resid = len;
316 	auio.uio_offset = offset;
317 	auio.uio_segflg = segflg;
318 	auio.uio_rw = rw;
319 	auio.uio_procp = p;
320 	if (rw == UIO_READ) {
321 		error = VOP_READ(vp, &auio, ioflg, cred);
322 	} else {
323 		error = VOP_WRITE(vp, &auio, ioflg, cred);
324 	}
325 	if (aresid)
326 		*aresid = auio.uio_resid;
327 	else
328 		if (auio.uio_resid && error == 0)
329 			error = EIO;
330 	if ((ioflg & IO_NODELOCKED) == 0)
331 		VOP_UNLOCK(vp, 0, p);
332 	return (error);
333 }
334 
335 /*
336  * File table vnode read routine.
337  */
338 static int
339 vn_read(fp, uio, cred, flags, p)
340 	struct file *fp;
341 	struct uio *uio;
342 	struct ucred *cred;
343 	struct proc *p;
344 	int flags;
345 {
346 	struct vnode *vp;
347 	int error, ioflag;
348 
349 	KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
350 	    uio->uio_procp, p));
351 	vp = (struct vnode *)fp->f_data;
352 	ioflag = 0;
353 	if (fp->f_flag & FNONBLOCK)
354 		ioflag |= IO_NDELAY;
355 	VOP_LEASE(vp, p, cred, LEASE_READ);
356 	vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p);
357 	if ((flags & FOF_OFFSET) == 0)
358 		uio->uio_offset = fp->f_offset;
359 
360 	ioflag |= sequential_heuristic(uio, fp);
361 
362 	error = VOP_READ(vp, uio, ioflag, cred);
363 	if ((flags & FOF_OFFSET) == 0)
364 		fp->f_offset = uio->uio_offset;
365 	fp->f_nextoff = uio->uio_offset;
366 	VOP_UNLOCK(vp, 0, p);
367 	return (error);
368 }
369 
370 /*
371  * File table vnode write routine.
372  */
373 static int
374 vn_write(fp, uio, cred, flags, p)
375 	struct file *fp;
376 	struct uio *uio;
377 	struct ucred *cred;
378 	struct proc *p;
379 	int flags;
380 {
381 	struct vnode *vp;
382 	int error, ioflag;
383 
384 	KASSERT(uio->uio_procp == p, ("uio_procp %p is not p %p",
385 	    uio->uio_procp, p));
386 	vp = (struct vnode *)fp->f_data;
387 	if (vp->v_type == VREG)
388 		bwillwrite();
389 	vp = (struct vnode *)fp->f_data;	/* XXX needed? */
390 	ioflag = IO_UNIT;
391 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
392 		ioflag |= IO_APPEND;
393 	if (fp->f_flag & FNONBLOCK)
394 		ioflag |= IO_NDELAY;
395 	if ((fp->f_flag & O_FSYNC) ||
396 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
397 		ioflag |= IO_SYNC;
398 	VOP_LEASE(vp, p, cred, LEASE_WRITE);
399 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
400 	if ((flags & FOF_OFFSET) == 0)
401 		uio->uio_offset = fp->f_offset;
402 	ioflag |= sequential_heuristic(uio, fp);
403 	error = VOP_WRITE(vp, uio, ioflag, cred);
404 	if ((flags & FOF_OFFSET) == 0)
405 		fp->f_offset = uio->uio_offset;
406 	fp->f_nextoff = uio->uio_offset;
407 	VOP_UNLOCK(vp, 0, p);
408 	return (error);
409 }
410 
411 /*
412  * File table vnode stat routine.
413  */
414 static int
415 vn_statfile(fp, sb, p)
416 	struct file *fp;
417 	struct stat *sb;
418 	struct proc *p;
419 {
420 	struct vnode *vp = (struct vnode *)fp->f_data;
421 
422 	return vn_stat(vp, sb, p);
423 }
424 
425 int
426 vn_stat(vp, sb, p)
427 	struct vnode *vp;
428 	register struct stat *sb;
429 	struct proc *p;
430 {
431 	struct vattr vattr;
432 	register struct vattr *vap;
433 	int error;
434 	u_short mode;
435 
436 	vap = &vattr;
437 	error = VOP_GETATTR(vp, vap, p->p_ucred, p);
438 	if (error)
439 		return (error);
440 
441 	/*
442 	 * Zero the spare stat fields
443 	 */
444 	sb->st_lspare = 0;
445 	sb->st_qspare[0] = 0;
446 	sb->st_qspare[1] = 0;
447 
448 	/*
449 	 * Copy from vattr table
450 	 */
451 	if (vap->va_fsid != VNOVAL)
452 		sb->st_dev = vap->va_fsid;
453 	else
454 		sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
455 	sb->st_ino = vap->va_fileid;
456 	mode = vap->va_mode;
457 	switch (vap->va_type) {
458 	case VREG:
459 		mode |= S_IFREG;
460 		break;
461 	case VDIR:
462 		mode |= S_IFDIR;
463 		break;
464 	case VBLK:
465 		mode |= S_IFBLK;
466 		break;
467 	case VCHR:
468 		mode |= S_IFCHR;
469 		break;
470 	case VLNK:
471 		mode |= S_IFLNK;
472 		/* This is a cosmetic change, symlinks do not have a mode. */
473 		if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
474 			sb->st_mode &= ~ACCESSPERMS;	/* 0000 */
475 		else
476 			sb->st_mode |= ACCESSPERMS;	/* 0777 */
477 		break;
478 	case VSOCK:
479 		mode |= S_IFSOCK;
480 		break;
481 	case VFIFO:
482 		mode |= S_IFIFO;
483 		break;
484 	default:
485 		return (EBADF);
486 	};
487 	sb->st_mode = mode;
488 	sb->st_nlink = vap->va_nlink;
489 	sb->st_uid = vap->va_uid;
490 	sb->st_gid = vap->va_gid;
491 	sb->st_rdev = vap->va_rdev;
492 	sb->st_size = vap->va_size;
493 	sb->st_atimespec = vap->va_atime;
494 	sb->st_mtimespec = vap->va_mtime;
495 	sb->st_ctimespec = vap->va_ctime;
496 
497         /*
498 	 * According to www.opengroup.org, the meaning of st_blksize is
499 	 *   "a filesystem-specific preferred I/O block size for this
500 	 *    object.  In some filesystem types, this may vary from file
501 	 *    to file"
502 	 * Default to zero to catch bogus uses of this field.
503 	 */
504 
505 	if (vap->va_type == VREG) {
506 		sb->st_blksize = vap->va_blocksize;
507 	} else if (vn_isdisk(vp, NULL)) {
508 		sb->st_blksize = vp->v_rdev->si_bsize_best;
509 		if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
510 			sb->st_blksize = vp->v_rdev->si_bsize_phys;
511 		if (sb->st_blksize < BLKDEV_IOSIZE)
512 			sb->st_blksize = BLKDEV_IOSIZE;
513 	} else {
514 		sb->st_blksize = 0;
515 	}
516 
517 	sb->st_flags = vap->va_flags;
518 	if (suser_xxx(p->p_ucred, 0, 0))
519 		sb->st_gen = 0;
520 	else
521 		sb->st_gen = vap->va_gen;
522 
523 #if (S_BLKSIZE == 512)
524 	/* Optimize this case */
525 	sb->st_blocks = vap->va_bytes >> 9;
526 #else
527 	sb->st_blocks = vap->va_bytes / S_BLKSIZE;
528 #endif
529 	return (0);
530 }
531 
532 /*
533  * File table vnode ioctl routine.
534  */
535 static int
536 vn_ioctl(fp, com, data, p)
537 	struct file *fp;
538 	u_long com;
539 	caddr_t data;
540 	struct proc *p;
541 {
542 	register struct vnode *vp = ((struct vnode *)fp->f_data);
543 	struct vattr vattr;
544 	int error;
545 
546 	switch (vp->v_type) {
547 
548 	case VREG:
549 	case VDIR:
550 		if (com == FIONREAD) {
551 			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
552 			if (error)
553 				return (error);
554 			*(int *)data = vattr.va_size - fp->f_offset;
555 			return (0);
556 		}
557 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
558 			return (0);			/* XXX */
559 		/* fall into ... */
560 
561 	default:
562 #if 0
563 		return (ENOTTY);
564 #endif
565 	case VFIFO:
566 	case VCHR:
567 	case VBLK:
568 		if (com == FIODTYPE) {
569 			if (vp->v_type != VCHR && vp->v_type != VBLK)
570 				return (ENOTTY);
571 			*(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK;
572 			return (0);
573 		}
574 		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
575 		if (error == 0 && com == TIOCSCTTY) {
576 
577 			/* Do nothing if reassigning same control tty */
578 			if (p->p_session->s_ttyvp == vp)
579 				return (0);
580 
581 			/* Get rid of reference to old control tty */
582 			if (p->p_session->s_ttyvp)
583 				vrele(p->p_session->s_ttyvp);
584 
585 			p->p_session->s_ttyvp = vp;
586 			VREF(vp);
587 		}
588 		return (error);
589 	}
590 }
591 
592 /*
593  * File table vnode poll routine.
594  */
595 static int
596 vn_poll(fp, events, cred, p)
597 	struct file *fp;
598 	int events;
599 	struct ucred *cred;
600 	struct proc *p;
601 {
602 
603 	return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, p));
604 }
605 
606 /*
607  * Check that the vnode is still valid, and if so
608  * acquire requested lock.
609  */
610 int
611 #ifndef	DEBUG_LOCKS
612 vn_lock(vp, flags, p)
613 #else
614 debug_vn_lock(vp, flags, p, filename, line)
615 #endif
616 	struct vnode *vp;
617 	int flags;
618 	struct proc *p;
619 #ifdef	DEBUG_LOCKS
620 	const char *filename;
621 	int line;
622 #endif
623 {
624 	int error;
625 
626 	do {
627 		if ((flags & LK_INTERLOCK) == 0)
628 			simple_lock(&vp->v_interlock);
629 		if (vp->v_flag & VXLOCK) {
630 			vp->v_flag |= VXWANT;
631 			simple_unlock(&vp->v_interlock);
632 			tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
633 			error = ENOENT;
634 		} else {
635 #ifdef	DEBUG_LOCKS
636 			vp->filename = filename;
637 			vp->line = line;
638 #endif
639 			error = VOP_LOCK(vp,
640 				    flags | LK_NOPAUSE | LK_INTERLOCK, p);
641 			if (error == 0)
642 				return (error);
643 		}
644 		flags &= ~LK_INTERLOCK;
645 	} while (flags & LK_RETRY);
646 	return (error);
647 }
648 
649 /*
650  * File table vnode close routine.
651  */
652 static int
653 vn_closefile(fp, p)
654 	struct file *fp;
655 	struct proc *p;
656 {
657 
658 	fp->f_ops = &badfileops;
659 	return (vn_close(((struct vnode *)fp->f_data), fp->f_flag,
660 		fp->f_cred, p));
661 }
662 
663 static int
664 filt_vnattach(struct knote *kn)
665 {
666 	struct vnode *vp;
667 
668 	if (kn->kn_fp->f_type != DTYPE_VNODE &&
669 	    kn->kn_fp->f_type != DTYPE_FIFO)
670 		return (EBADF);
671 
672 	vp = (struct vnode *)kn->kn_fp->f_data;
673 
674         simple_lock(&vp->v_pollinfo.vpi_lock);
675 	SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
676         simple_unlock(&vp->v_pollinfo.vpi_lock);
677 
678 	return (0);
679 }
680 
681 static void
682 filt_vndetach(struct knote *kn)
683 {
684 	struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
685 
686         simple_lock(&vp->v_pollinfo.vpi_lock);
687 	SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
688 	    kn, knote, kn_selnext);
689         simple_unlock(&vp->v_pollinfo.vpi_lock);
690 }
691 
692 static int
693 filt_vnode(struct knote *kn, long hint)
694 {
695 
696 	if (kn->kn_sfflags & hint)
697 		kn->kn_fflags |= hint;
698 	return (kn->kn_fflags != 0);
699 }
700 
701 static int
702 filt_nullattach(struct knote *kn)
703 {
704 	return (ENXIO);
705 }
706 
707 /*ARGSUSED*/
708 static int
709 filt_vnread(struct knote *kn, long hint)
710 {
711 	struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
712 	struct inode *ip = VTOI(vp);
713 
714 	kn->kn_data = ip->i_size - kn->kn_fp->f_offset;
715 	return (kn->kn_data != 0);
716 }
717