1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #ifndef _SYS_VNODE_H_
33 #define _SYS_VNODE_H_
34
35 #include <sys/bufobj.h>
36 #include <sys/queue.h>
37 #include <sys/lock.h>
38 #include <sys/lockmgr.h>
39 #include <sys/mutex.h>
40 #include <sys/rangelock.h>
41 #include <sys/selinfo.h>
42 #include <sys/uio.h>
43 #include <sys/acl.h>
44 #include <sys/ktr.h>
45 #include <sys/_seqc.h>
46
47 /*
48 * The vnode is the focus of all file activity in UNIX. There is a
49 * unique vnode allocated for each active file, each current directory,
50 * each mounted-on file, text file, and the root.
51 */
52
53 /*
54 * Vnode types. VNON means no type.
55 */
__enum_uint8_decl(vtype)56 __enum_uint8_decl(vtype) {
57 VNON,
58 VREG,
59 VDIR,
60 VBLK,
61 VCHR,
62 VLNK,
63 VSOCK,
64 VFIFO,
65 VBAD,
66 VMARKER,
67 VLASTTYPE = VMARKER,
68 };
69
70 /*
71 * We frequently need to test is something is a device node.
72 */
73 #define VTYPE_ISDEV(vtype) ((vtype) == VCHR || (vtype) == VBLK)
74
__enum_uint8_decl(vstate)75 __enum_uint8_decl(vstate) {
76 VSTATE_UNINITIALIZED,
77 VSTATE_CONSTRUCTED,
78 VSTATE_DESTROYING,
79 VSTATE_DEAD,
80 VLASTSTATE = VSTATE_DEAD,
81 };
82
83 enum vgetstate {
84 VGET_NONE,
85 VGET_HOLDCNT,
86 VGET_USECOUNT,
87 };
88
89 /*
90 * Each underlying filesystem allocates its own private area and hangs
91 * it from v_data. If non-null, this area is freed in getnewvnode().
92 */
93
94 struct cache_fpl;
95 struct inotify_watch;
96 struct namecache;
97
98 struct vpollinfo {
99 struct mtx vpi_lock; /* lock to protect below */
100 TAILQ_HEAD(, inotify_watch) vpi_inotify; /* list of inotify watchers */
101 struct selinfo vpi_selinfo; /* identity of poller(s) */
102 short vpi_events; /* what they are looking for */
103 short vpi_revents; /* what has happened */
104 };
105
106 /*
107 * Reading or writing any of these items requires holding the appropriate lock.
108 *
109 * Lock reference:
110 * c - namecache mutex
111 * i - interlock
112 * l - mp mnt_listmtx or freelist mutex
113 * I - updated with atomics, 0->1 and 1->0 transitions with interlock held
114 * m - mount point interlock
115 * p - pollinfo lock
116 * u - Only a reference to the vnode is needed to read.
117 * v - vnode lock
118 *
119 * Vnodes may be found on many lists. The general way to deal with operating
120 * on a vnode that is on a list is:
121 * 1) Lock the list and find the vnode.
122 * 2) Lock interlock so that the vnode does not go away.
123 * 3) Unlock the list to avoid lock order reversals.
124 * 4) vget with LK_INTERLOCK and check for ENOENT, or
125 * 5) Check for DOOMED if the vnode lock is not required.
126 * 6) Perform your operation, then vput().
127 */
128
129 #if defined(_KERNEL) || defined(_KVM_VNODE)
130
131 struct vnode {
132 /*
133 * Fields which define the identity of the vnode. These fields are
134 * owned by the filesystem (XXX: and vgone() ?)
135 */
136 __enum_uint8(vtype) v_type; /* u vnode type */
137 __enum_uint8(vstate) v_state; /* u vnode state */
138 short v_irflag; /* i frequently read flags */
139 seqc_t v_seqc; /* i modification count */
140 uint32_t v_nchash; /* u namecache hash */
141 u_int v_hash;
142 const struct vop_vector *v_op; /* u vnode operations vector */
143 void *v_data; /* u private data for fs */
144
145 /*
146 * Filesystem instance stuff
147 */
148 struct mount *v_mount; /* u ptr to vfs we are in */
149 TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */
150
151 /*
152 * Type specific fields, only one applies to any given vnode.
153 */
154 union {
155 struct mount *v_mountedhere; /* v ptr to mountpoint (VDIR) */
156 struct unpcb *v_unpcb; /* v unix domain net (VSOCK) */
157 struct cdev *v_rdev; /* v device (VCHR, VBLK) */
158 struct fifoinfo *v_fifoinfo; /* v fifo (VFIFO) */
159 };
160
161 /*
162 * vfs_hash: (mount + inode) -> vnode hash. The hash value
163 * itself is grouped with other int fields, to avoid padding.
164 */
165 LIST_ENTRY(vnode) v_hashlist;
166
167 /*
168 * VFS_namecache stuff
169 */
170 LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */
171 TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */
172 struct namecache *v_cache_dd; /* c Cache entry for .. vnode */
173
174 /*
175 * Locking
176 */
177 struct lock v_lock; /* u (if fs don't have one) */
178 struct mtx v_interlock; /* lock for "i" things */
179 struct lock *v_vnlock; /* u pointer to vnode lock */
180
181 /*
182 * The machinery of being a vnode
183 */
184 TAILQ_ENTRY(vnode) v_vnodelist; /* l vnode lists */
185 TAILQ_ENTRY(vnode) v_lazylist; /* l vnode lazy list */
186 struct bufobj v_bufobj; /* * Buffer cache object */
187
188 /*
189 * Hooks for various subsystems and features.
190 */
191 struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */
192 struct label *v_label; /* MAC label for vnode */
193 struct lockf *v_lockf; /* Byte-level advisory lock list */
194 struct rangelock v_rl; /* Byte-range lock */
195
196 u_int v_holdcnt; /* I prevents recycling. */
197 u_int v_usecount; /* I ref count of users */
198 u_short v_iflag; /* i vnode flags (see below) */
199 u_short v_vflag; /* v vnode flags */
200 u_short v_mflag; /* l mnt-specific vnode flags */
201 short v_dbatchcpu; /* i LRU requeue deferral batch */
202 int v_writecount; /* I ref count of writers or
203 (negative) text users */
204 int v_seqc_users; /* i modifications pending */
205 };
206
207 #define VN_ISDEV(vp) VTYPE_ISDEV((vp)->v_type)
208
209 #ifndef DEBUG_LOCKS
210 #ifdef _LP64
211 /*
212 * Not crossing 448 bytes fits 9 vnodes per page. If you have to add fields
213 * to the structure and there is nothing which can be done to prevent growth
214 * then so be it. But don't grow it without a good reason.
215 */
216 _Static_assert(sizeof(struct vnode) <= 448, "vnode size crosses 448 bytes");
217 #endif
218 #endif
219
220 #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
221
222 #define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj)
223
224 /* XXX: These are temporary to avoid a source sweep at this time */
225 #define v_object v_bufobj.bo_object
226
227 /* We don't need to lock the knlist */
228 #define VN_KNLIST_EMPTY(vp) ((vp)->v_pollinfo == NULL || \
229 KNLIST_EMPTY(&(vp)->v_pollinfo->vpi_selinfo.si_note))
230
231 #define VN_KNOTE(vp, b, a) \
232 do { \
233 if (!VN_KNLIST_EMPTY(vp)) \
234 KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), \
235 (a) | KNF_NOKQLOCK); \
236 } while (0)
237 #define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, KNF_LISTLOCKED)
238 #define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0)
239
240 /*
241 * Vnode flags.
242 * VI flags are protected by interlock and live in v_iflag
243 * VIRF flags are protected by interlock and live in v_irflag
244 * VV flags are protected by the vnode lock and live in v_vflag
245 *
246 * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both
247 * are required for writing but the status may be checked with either.
248 */
249 #define VHOLD_NO_SMR (1<<29) /* Disable vhold_smr */
250 #define VHOLD_ALL_FLAGS (VHOLD_NO_SMR)
251
252 #define VIRF_DOOMED 0x0001 /* This vnode is being recycled */
253 #define VIRF_PGREAD 0x0002 /* Direct reads from the page cache are permitted,
254 never cleared once set */
255 #define VIRF_MOUNTPOINT 0x0004 /* This vnode is mounted on */
256 #define VIRF_TEXT_REF 0x0008 /* Executable mappings ref the vnode */
257 #define VIRF_CROSSMP 0x0010 /* Cross-mp vnode, no locking */
258 #define VIRF_NAMEDDIR 0x0020 /* Named attribute directory */
259 #define VIRF_NAMEDATTR 0x0040 /* Named attribute */
260 #define VIRF_INOTIFY 0x0080 /* This vnode is being watched */
261 #define VIRF_INOTIFY_PARENT 0x0100 /* A parent of this vnode may be being
262 watched */
263
264 #define VI_UNUSED0 0x0001 /* unused */
265 #define VI_MOUNT 0x0002 /* Mount in progress */
266 #define VI_DOINGINACT 0x0004 /* VOP_INACTIVE is in progress */
267 #define VI_OWEINACT 0x0008 /* Need to call inactive */
268 #define VI_DEFINACT 0x0010 /* deferred inactive */
269 #define VI_FOPENING 0x0020 /* In open, with opening process having the
270 first right to advlock file */
271
272 #define VV_ROOT 0x0001 /* root of its filesystem */
273 #define VV_ISTTY 0x0002 /* vnode represents a tty */
274 #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
275 #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */
276 #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
277 #define VV_VMSIZEVNLOCK 0x0020 /* object size check requires vnode lock */
278 #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */
279 #define VV_SYSTEM 0x0080 /* vnode being used by kernel */
280 #define VV_PROCDEP 0x0100 /* vnode is process dependent */
281 #define VV_UNLINKED 0x0200 /* unlinked but stil open directory */
282 #define VV_DELETED 0x0400 /* should be removed */
283 #define VV_MD 0x0800 /* vnode backs the md device */
284 #define VV_FORCEINSMQ 0x1000 /* force the insmntque to succeed */
285 #define VV_READLINK 0x2000 /* fdescfs linux vnode */
286 #define VV_UNREF 0x4000 /* vunref, do not drop lock in inactive() */
287 #define VV_CROSSLOCK 0x8000 /* vnode lock is shared w/ root mounted here */
288
289 #define VMP_LAZYLIST 0x0001 /* Vnode is on mnt's lazy list */
290
291 /*
292 * Vnode attributes. A field value of VNOVAL represents a field whose value
293 * is unavailable (getattr) or which is not to be changed (setattr).
294 */
295 struct vattr {
296 __enum_uint8(vtype) va_type; /* vnode type (for create) */
297 u_short va_mode; /* files access mode and type */
298 uint16_t va_bsdflags; /* same as st_bsdflags from stat(2) */
299 uid_t va_uid; /* owner user id */
300 gid_t va_gid; /* owner group id */
301 nlink_t va_nlink; /* number of references to file */
302 dev_t va_fsid; /* filesystem id */
303 ino_t va_fileid; /* file id */
304 u_quad_t va_size; /* file size in bytes */
305 long va_blocksize; /* blocksize preferred for i/o */
306 struct timespec va_atime; /* time of last access */
307 struct timespec va_mtime; /* time of last modification */
308 struct timespec va_ctime; /* time file changed */
309 struct timespec va_birthtime; /* time file created */
310 u_long va_gen; /* generation number of file */
311 u_long va_flags; /* flags defined for file */
312 dev_t va_rdev; /* device the special file represents */
313 u_quad_t va_bytes; /* bytes of disk space held by file */
314 u_quad_t va_filerev; /* file modification number */
315 u_int va_vaflags; /* operations flags, see below */
316 long va_spare; /* remain quad aligned */
317 };
318
319 #define VATTR_ISDEV(vap) VTYPE_ISDEV((vap)->va_type)
320
321 /*
322 * Flags for va_vaflags.
323 */
324 #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */
325 #define VA_EXCLUSIVE 0x02 /* exclusive create request */
326 #define VA_SYNC 0x04 /* O_SYNC truncation */
327
328 /*
329 * Flags for ioflag. (high 16 bits used to ask for read-ahead and
330 * help with write clustering)
331 * NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h
332 */
333 #define IO_UNIT 0x0001 /* do I/O as atomic unit */
334 #define IO_APPEND 0x0002 /* append write to end */
335 #define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */
336 #define IO_NODELOCKED 0x0008 /* underlying node already locked */
337 #define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */
338 #define IO_VMIO 0x0020 /* data already in VMIO space */
339 #define IO_INVAL 0x0040 /* invalidate after I/O */
340 #define IO_SYNC 0x0080 /* do I/O synchronously */
341 #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
342 #define IO_NOREUSE 0x0200 /* VMIO data won't be reused */
343 #define IO_EXT 0x0400 /* operate on external attributes */
344 #define IO_NORMAL 0x0800 /* operate on regular data */
345 #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */
346 #define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */
347 #define IO_RANGELOCKED 0x4000 /* range locked */
348 #define IO_DATASYNC 0x8000 /* do only data I/O synchronously */
349
350 #define IO_SEQMAX 0x7F /* seq heuristic max value */
351 #define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */
352
353 /*
354 * Flags for accmode_t.
355 */
356 #define VEXEC 000000000100 /* execute/search permission */
357 #define VWRITE 000000000200 /* write permission */
358 #define VREAD 000000000400 /* read permission */
359 #define VADMIN 000000010000 /* being the file owner */
360 #define VAPPEND 000000040000 /* permission to write/append */
361 /*
362 * VEXPLICIT_DENY makes VOP_ACCESSX(9) return EPERM or EACCES only
363 * if permission was denied explicitly, by a "deny" rule in NFSv4 ACL,
364 * and 0 otherwise. This never happens with ordinary unix access rights
365 * or POSIX.1e ACLs. Obviously, VEXPLICIT_DENY must be OR-ed with
366 * some other V* constant.
367 */
368 #define VEXPLICIT_DENY 000000100000
369 #define VREAD_NAMED_ATTRS 000000200000 /* not used */
370 #define VWRITE_NAMED_ATTRS 000000400000 /* not used */
371 #define VDELETE_CHILD 000001000000
372 #define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */
373 #define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */
374 #define VDELETE 000010000000
375 #define VREAD_ACL 000020000000 /* read ACL and file mode */
376 #define VWRITE_ACL 000040000000 /* change ACL and/or file mode */
377 #define VWRITE_OWNER 000100000000 /* change file owner */
378 #define VSYNCHRONIZE 000200000000 /* not used */
379 #define VCREAT 000400000000 /* creating new file */
380 #define VVERIFY 001000000000 /* verification required */
381
382 /*
383 * Permissions that were traditionally granted only to the file owner.
384 */
385 #define VADMIN_PERMS (VADMIN | VWRITE_ATTRIBUTES | VWRITE_ACL | \
386 VWRITE_OWNER)
387
388 /*
389 * Permissions that were traditionally granted to everyone.
390 */
391 #define VSTAT_PERMS (VREAD_ATTRIBUTES | VREAD_ACL)
392
393 /*
394 * Permissions that allow to change the state of the file in any way.
395 */
396 #define VMODIFY_PERMS (VWRITE | VAPPEND | VADMIN_PERMS | VDELETE_CHILD | \
397 VDELETE)
398
399 /*
400 * Token indicating no attribute value yet assigned.
401 */
402 #define VNOVAL (-1)
403
404 /*
405 * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon)
406 */
407 #define VLKTIMEOUT (hz / 20 + 1)
408
409 #ifdef _KERNEL
410
411 #ifdef MALLOC_DECLARE
412 MALLOC_DECLARE(M_VNODE);
413 #endif
414
415 extern u_int ncsizefactor;
416 extern const u_int io_hold_cnt;
417
418 /*
419 * Convert between vnode types and inode formats (since POSIX.1
420 * defines mode word of stat structure in terms of inode formats).
421 */
422 extern __enum_uint8(vtype) iftovt_tab[];
423 extern int vttoif_tab[];
424 #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
425 #define VTTOIF(indx) (vttoif_tab[(int)(indx)])
426 #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
427
428 /*
429 * Flags to various vnode functions.
430 */
431 #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */
432 #define FORCECLOSE 0x0002 /* vflush: force file closure */
433 #define WRITECLOSE 0x0004 /* vflush: only close writable files */
434 #define EARLYFLUSH 0x0008 /* vflush: early call for ffs_flushfiles */
435 #define V_SAVE 0x0001 /* vinvalbuf: sync file first */
436 #define V_ALT 0x0002 /* vinvalbuf: invalidate only alternate bufs */
437 #define V_NORMAL 0x0004 /* vinvalbuf: invalidate only regular bufs */
438 #define V_CLEANONLY 0x0008 /* vinvalbuf: invalidate only clean bufs */
439 #define V_VMIO 0x0010 /* vinvalbuf: called during pageout */
440 #define V_ALLOWCLEAN 0x0020 /* vinvalbuf: allow clean buffers after flush */
441 #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */
442 #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */
443 #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */
444 #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */
445 #define V_PCATCH 0x0008 /* vn_start_write: make the sleep interruptible */
446 #define V_VALID_FLAGS (V_WAIT | V_NOWAIT | V_XSLEEP | V_PCATCH)
447
448 #define VR_START_WRITE 0x0001 /* vfs_write_resume: start write atomically */
449 #define VR_NO_SUSPCLR 0x0002 /* vfs_write_resume: do not clear suspension */
450
451 #define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the
452 filesystem is being unmounted */
453
454 #ifdef DIAGNOSTIC
455 #define VATTR_NULL(vap) vattr_null(vap)
456 #else
457 #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
458 #endif /* DIAGNOSTIC */
459
460 /*
461 * Global vnode data.
462 */
463 extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
464 extern struct mount *rootdevmp; /* "/dev" mount */
465 extern u_long desiredvnodes; /* number of vnodes desired */
466 extern struct uma_zone *namei_zone;
467 extern struct vattr va_null; /* predefined null vattr structure */
468
469 extern u_int vn_lock_pair_pause_max;
470
471 #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock)
472 #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags))
473 #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock)
474 #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
475 #define VI_MTX(vp) (&(vp)->v_interlock)
476
477 #define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock)
478 #define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock)
479 #define VN_LOCK_DSHARE(vp) lockdisableshare((vp)->v_vnlock)
480
481 #endif /* _KERNEL */
482
483 /*
484 * Mods for extensibility.
485 */
486
487 /*
488 * Flags for vdesc_flags:
489 */
490 #define VDESC_MAX_VPS 16
491 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */
492 #define VDESC_VP0_WILLRELE 0x0001
493 #define VDESC_VP1_WILLRELE 0x0002
494 #define VDESC_VP2_WILLRELE 0x0004
495 #define VDESC_VP3_WILLRELE 0x0008
496
497 /*
498 * A generic structure.
499 * This can be used by bypass routines to identify generic arguments.
500 */
501 struct vop_generic_args {
502 struct vnodeop_desc *a_desc;
503 /* other random data follows, presumably */
504 };
505
506 typedef int vop_bypass_t(struct vop_generic_args *);
507
508 /*
509 * VDESC_NO_OFFSET is used to identify the end of the offset list
510 * and in places where no such field exists.
511 */
512 #define VDESC_NO_OFFSET -1
513
514 /*
515 * This structure describes the vnode operation taking place.
516 */
517 struct vnodeop_desc {
518 char *vdesc_name; /* a readable name for debugging */
519 int vdesc_flags; /* VDESC_* flags */
520 int vdesc_vop_offset;
521 vop_bypass_t *vdesc_call; /* Function to call */
522
523 /*
524 * These ops are used by bypass routines to map and locate arguments.
525 * Creds and procs are not needed in bypass routines, but sometimes
526 * they are useful to (for example) transport layers.
527 * Nameidata is useful because it has a cred in it.
528 */
529 int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */
530 int vdesc_vpp_offset; /* return vpp location */
531 int vdesc_cred_offset; /* cred location, if any */
532 int vdesc_thread_offset; /* thread location, if any */
533 int vdesc_componentname_offset; /* if any */
534 };
535
536 #ifdef _KERNEL
537 /*
538 * A list of all the operation descs.
539 */
540 extern struct vnodeop_desc *vnodeop_descs[];
541
542 #define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field)
543 #define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \
544 ((s_type)(((char*)(struct_p)) + (s_offset)))
545
546 #ifdef INVARIANTS
547 /*
548 * Support code to aid in debugging VFS locking problems. Not totally
549 * reliable since if the thread sleeps between changing the lock
550 * state and checking it with the assert, some other thread could
551 * change the state. They are good enough for debugging a single
552 * filesystem using a single-threaded test. Note that the unreliability is
553 * limited to false negatives; efforts were made to ensure that false
554 * positives cannot occur.
555 */
556 void assert_vi_locked(struct vnode *vp, const char *str);
557 void assert_vi_unlocked(struct vnode *vp, const char *str);
558 void assert_vop_elocked(struct vnode *vp, const char *str);
559 void assert_vop_locked(struct vnode *vp, const char *str);
560 void assert_vop_unlocked(struct vnode *vp, const char *str);
561
562 #define ASSERT_VI_LOCKED(vp, str) assert_vi_locked((vp), (str))
563 #define ASSERT_VI_UNLOCKED(vp, str) assert_vi_unlocked((vp), (str))
564 #define ASSERT_VOP_ELOCKED(vp, str) assert_vop_elocked((vp), (str))
565 #define ASSERT_VOP_LOCKED(vp, str) assert_vop_locked((vp), (str))
566 #define ASSERT_VOP_UNLOCKED(vp, str) assert_vop_unlocked((vp), (str))
567
568 #define ASSERT_VOP_IN_SEQC(vp) do { \
569 struct vnode *_vp = (vp); \
570 \
571 VNPASS(seqc_in_modify(_vp->v_seqc), _vp); \
572 } while (0)
573
574 #define ASSERT_VOP_NOT_IN_SEQC(vp) do { \
575 struct vnode *_vp = (vp); \
576 \
577 VNPASS(!seqc_in_modify(_vp->v_seqc), _vp); \
578 } while (0)
579
580 #else /* !INVARIANTS */
581
582 #define ASSERT_VI_LOCKED(vp, str) ((void)0)
583 #define ASSERT_VI_UNLOCKED(vp, str) ((void)0)
584 #define ASSERT_VOP_ELOCKED(vp, str) ((void)0)
585 #define ASSERT_VOP_LOCKED(vp, str) ((void)0)
586 #define ASSERT_VOP_UNLOCKED(vp, str) ((void)0)
587
588 #define ASSERT_VOP_IN_SEQC(vp) ((void)0)
589 #define ASSERT_VOP_NOT_IN_SEQC(vp) ((void)0)
590
591 #endif /* INVARIANTS */
592
593 #define DOINGASYNC(vp) \
594 (((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \
595 ((curthread->td_pflags & TDP_SYNCIO) == 0))
596
597 /*
598 * VMIO support inline
599 */
600
601 extern int vmiodirenable;
602
603 static __inline int
vn_canvmio(struct vnode * vp)604 vn_canvmio(struct vnode *vp)
605 {
606 if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR)))
607 return(TRUE);
608 return(FALSE);
609 }
610
611 /*
612 * Finally, include the default set of vnode operations.
613 */
614 typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int);
615 #include "vnode_if.h"
616
617 /* vn_open_flags */
618 #define VN_OPEN_NOAUDIT 0x00000001
619 #define VN_OPEN_NOCAPCHECK 0x00000002
620 #define VN_OPEN_NAMECACHE 0x00000004
621 #define VN_OPEN_INVFS 0x00000008
622 #define VN_OPEN_WANTIOCTLCAPS 0x00000010
623
624 /* copy_file_range kernel flags */
625 #define COPY_FILE_RANGE_KFLAGS 0xff000000
626 #define COPY_FILE_RANGE_TIMEO1SEC 0x01000000 /* Return after 1sec. */
627
628 /*
629 * Public vnode manipulation functions.
630 */
631 struct componentname;
632 struct file;
633 struct mount;
634 struct nameidata;
635 struct ostat;
636 struct freebsd11_stat;
637 struct thread;
638 struct proc;
639 struct stat;
640 struct nstat;
641 struct ucred;
642 struct uio;
643 struct vattr;
644 struct vfsops;
645 struct vnode;
646
647 typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **);
648
649 int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn,
650 daddr_t endn);
651 /* cache_* may belong in namei.h. */
652 void cache_changesize(u_long newhashsize);
653
654 #define VFS_CACHE_DROPOLD 0x1
655
656 void cache_enter_time_flags(struct vnode *dvp, struct vnode *vp,
657 struct componentname *cnp, struct timespec *tsp,
658 struct timespec *dtsp, int flags);
659 #define cache_enter(dvp, vp, cnp) \
660 cache_enter_time(dvp, vp, cnp, NULL, NULL)
661 void cache_enter_time(struct vnode *dvp, struct vnode *vp,
662 struct componentname *cnp, struct timespec *tsp,
663 struct timespec *dtsp);
664 int cache_lookup(struct vnode *dvp, struct vnode **vpp,
665 struct componentname *cnp, struct timespec *tsp, int *ticksp);
666 void cache_vnode_init(struct vnode *vp);
667 void cache_purge(struct vnode *vp);
668 void cache_purge_vgone(struct vnode *vp);
669 void cache_purge_negative(struct vnode *vp);
670 void cache_purgevfs(struct mount *mp);
671 char *cache_symlink_alloc(size_t size, int flags);
672 void cache_symlink_free(char *string, size_t size);
673 int cache_symlink_resolve(struct cache_fpl *fpl, const char *string,
674 size_t len);
675 void cache_vop_inotify(struct vnode *vp, int event, uint32_t cookie);
676 void cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
677 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp);
678 void cache_vop_rmdir(struct vnode *dvp, struct vnode *vp);
679 void cache_vop_vector_register(struct vop_vector *);
680 #ifdef INVARIANTS
681 void cache_validate(struct vnode *dvp, struct vnode *vp,
682 struct componentname *cnp);
683 void cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops);
684 void cache_assert_no_entries(struct vnode *vp);
685 #else
686 static inline void
cache_validate(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)687 cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
688 {
689 }
690
691 static inline void
cache_validate_vop_vector(struct mount * mp,struct vop_vector * vops)692 cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops)
693 {
694 }
695
696 static inline void
cache_assert_no_entries(struct vnode * vp)697 cache_assert_no_entries(struct vnode *vp)
698 {
699 }
700 #endif
701 void cache_fast_lookup_enabled_recalc(void);
702 int change_dir(struct vnode *vp, struct thread *td);
703 void cvtstat(struct stat *st, struct ostat *ost);
704 int freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb);
705 int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost);
706 int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
707 struct vnode **vpp);
708 void getnewvnode_reserve(void);
709 void getnewvnode_drop_reserve(void);
710 int insmntque(struct vnode *vp, struct mount *mp);
711 int insmntque1(struct vnode *vp, struct mount *mp);
712 u_quad_t init_va_filerev(void);
713 int speedup_syncer(void);
714 int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen);
715 int vn_getcwd(char *buf, char **retbuf, size_t *buflen);
716 int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf);
717 int vn_fullpath_jail(struct vnode *vp, char **retbuf, char **freebuf);
718 int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf);
719 int vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp,
720 const char *hdrl_name, size_t hrdl_name_length, char **retbuf,
721 char **freebuf, size_t *buflen);
722 struct vnode *
723 vn_dir_dd_ino(struct vnode *vp);
724 int vn_commname(struct vnode *vn, char *buf, u_int buflen);
725 int vn_path_to_global_path(struct thread *td, struct vnode *vp,
726 char *path, u_int pathlen);
727 int vn_path_to_global_path_hardlink(struct thread *td, struct vnode *vp,
728 struct vnode *dvp, char *path, u_int pathlen, const char *leaf_name,
729 size_t leaf_length);
730 int vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid,
731 gid_t file_gid, accmode_t accmode, struct ucred *cred);
732 int vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid,
733 struct ucred *cred);
734 int vaccess_acl_nfs4(__enum_uint8(vtype) type, uid_t file_uid, gid_t file_gid,
735 struct acl *aclp, accmode_t accmode, struct ucred *cred);
736 int vaccess_acl_posix1e(__enum_uint8(vtype) type, uid_t file_uid,
737 gid_t file_gid, struct acl *acl, accmode_t accmode,
738 struct ucred *cred);
739 void vattr_null(struct vattr *vap);
740 void vlazy(struct vnode *);
741 void vdrop(struct vnode *);
742 void vdropl(struct vnode *);
743 int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td);
744 int vget(struct vnode *vp, int flags);
745 enum vgetstate vget_prep_smr(struct vnode *vp);
746 enum vgetstate vget_prep(struct vnode *vp);
747 int vget_finish(struct vnode *vp, int flags, enum vgetstate vs);
748 void vget_finish_ref(struct vnode *vp, enum vgetstate vs);
749 void vget_abort(struct vnode *vp, enum vgetstate vs);
750 void vgone(struct vnode *vp);
751 void vhold(struct vnode *);
752 void vholdnz(struct vnode *);
753 bool vhold_smr(struct vnode *);
754 int vinactive(struct vnode *vp);
755 int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo);
756 int vtruncbuf(struct vnode *vp, off_t length, int blksize);
757 void v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
758 int blksize);
759 void vunref(struct vnode *);
760 void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3);
761 int vrecycle(struct vnode *vp);
762 int vrecyclel(struct vnode *vp);
763 int vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off,
764 struct ucred *cred);
765 int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off,
766 struct ucred *cred);
767 int vn_close(struct vnode *vp,
768 int flags, struct ucred *file_cred, struct thread *td);
769 int vn_copy_file_range(struct vnode *invp, off_t *inoffp,
770 struct vnode *outvp, off_t *outoffp, size_t *lenp,
771 unsigned int flags, struct ucred *incred, struct ucred *outcred,
772 struct thread *fsize_td);
773 int vn_deallocate(struct vnode *vp, off_t *offset, off_t *length, int flags,
774 int ioflg, struct ucred *active_cred, struct ucred *file_cred);
775 void vn_finished_write(struct mount *mp);
776 void vn_finished_secondary_write(struct mount *mp);
777 int vn_fsync_buf(struct vnode *vp, int waitfor);
778 int vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
779 struct vnode *outvp, off_t *outoffp, size_t *lenp,
780 unsigned int flags, struct ucred *incred, struct ucred *outcred,
781 struct thread *fsize_td);
782 int vn_need_pageq_flush(struct vnode *vp);
783 bool vn_isdisk_error(struct vnode *vp, int *errp);
784 bool vn_isdisk(struct vnode *vp);
785 int _vn_lock(struct vnode *vp, int flags, const char *file, int line);
786 #define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__)
787 void vn_lock_pair(struct vnode *vp1, bool vp1_locked, int lkflags1,
788 struct vnode *vp2, bool vp2_locked, int lkflags2);
789 int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp);
790 int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode,
791 u_int vn_open_flags, struct ucred *cred, struct file *fp);
792 int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
793 struct thread *td, struct file *fp);
794 void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end);
795 void vn_pages_remove_valid(struct vnode *vp, vm_pindex_t start,
796 vm_pindex_t end);
797 int vn_pollrecord(struct vnode *vp, struct thread *p, int events);
798 int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base,
799 int len, off_t offset, enum uio_seg segflg, int ioflg,
800 struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid,
801 struct thread *td);
802 int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base,
803 size_t len, off_t offset, enum uio_seg segflg, int ioflg,
804 struct ucred *active_cred, struct ucred *file_cred, size_t *aresid,
805 struct thread *td);
806 int vn_read_from_obj(struct vnode *vp, struct uio *uio);
807 int vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
808 struct thread *td);
809 int vn_rlimit_fsizex(const struct vnode *vp, struct uio *uio,
810 off_t maxfsz, ssize_t *resid_adj, struct thread *td);
811 void vn_rlimit_fsizex_res(struct uio *uio, ssize_t resid_adj);
812 int vn_rlimit_trunc(u_quad_t size, struct thread *td);
813 int vn_start_write(struct vnode *vp, struct mount **mpp, int flags);
814 int vn_start_secondary_write(struct vnode *vp, struct mount **mpp,
815 int flags);
816 int vn_truncate_locked(struct vnode *vp, off_t length, bool sync,
817 struct ucred *cred);
818 int vn_writechk(struct vnode *vp);
819 int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
820 const char *attrname, int *buflen, char *buf, struct thread *td);
821 int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
822 const char *attrname, int buflen, char *buf, struct thread *td);
823 int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
824 const char *attrname, struct thread *td);
825 int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags,
826 struct vnode **rvp);
827 int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc,
828 void *alloc_arg, int lkflags, struct vnode **rvp);
829 int vn_utimes_perm(struct vnode *vp, struct vattr *vap,
830 struct ucred *cred, struct thread *td);
831 int vn_cmp(struct file *, struct file *, struct thread *td);
832
833 int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio);
834 int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
835 struct uio *uio);
836
837 void vn_seqc_write_begin_locked(struct vnode *vp);
838 void vn_seqc_write_begin(struct vnode *vp);
839 void vn_seqc_write_end_locked(struct vnode *vp);
840 void vn_seqc_write_end(struct vnode *vp);
841 #define vn_seqc_read_any(vp) seqc_read_any(&(vp)->v_seqc)
842 #define vn_seqc_read_notmodify(vp) seqc_read_notmodify(&(vp)->v_seqc)
843 #define vn_seqc_consistent(vp, seq) seqc_consistent(&(vp)->v_seqc, seq)
844
845 #define vn_rangelock_unlock(vp, cookie) \
846 rangelock_unlock(&(vp)->v_rl, (cookie))
847 #define vn_rangelock_rlock(vp, start, end) \
848 rangelock_rlock(&(vp)->v_rl, (start), (end))
849 #define vn_rangelock_tryrlock(vp, start, end) \
850 rangelock_tryrlock(&(vp)->v_rl, (start), (end))
851 #define vn_rangelock_wlock(vp, start, end) \
852 rangelock_wlock(&(vp)->v_rl, (start), (end))
853 #define vn_rangelock_trywlock(vp, start, end) \
854 rangelock_trywlock(&(vp)->v_rl, (start), (end))
855
856 #define vn_irflag_read(vp) atomic_load_short(&(vp)->v_irflag)
857 void vn_irflag_set_locked(struct vnode *vp, short toset);
858 void vn_irflag_set(struct vnode *vp, short toset);
859 void vn_irflag_set_cond_locked(struct vnode *vp, short toset);
860 void vn_irflag_set_cond(struct vnode *vp, short toset);
861 void vn_irflag_unset_locked(struct vnode *vp, short tounset);
862 void vn_irflag_unset(struct vnode *vp, short tounset);
863
864 int vfs_cache_lookup(struct vop_lookup_args *ap);
865 int vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp);
866 void vfs_timestamp(struct timespec *);
867 void vfs_write_resume(struct mount *mp, int flags);
868 int vfs_write_suspend(struct mount *mp, int flags);
869 int vfs_write_suspend_umnt(struct mount *mp);
870 struct vnode *vnlru_alloc_marker(void);
871 void vnlru_free_marker(struct vnode *);
872 void vnlru_free_vfsops(int, struct vfsops *, struct vnode *);
873 int vop_stdbmap(struct vop_bmap_args *);
874 int vop_stdfdatasync_buf(struct vop_fdatasync_args *);
875 int vop_stdfsync(struct vop_fsync_args *);
876 int vop_stdgetwritemount(struct vop_getwritemount_args *);
877 int vop_stdgetpages(struct vop_getpages_args *);
878 int vop_stdinactive(struct vop_inactive_args *);
879 int vop_stdneed_inactive(struct vop_need_inactive_args *);
880 int vop_stdinotify(struct vop_inotify_args *);
881 int vop_stdinotify_add_watch(struct vop_inotify_add_watch_args *);
882 int vop_stdioctl(struct vop_ioctl_args *);
883 int vop_stdkqfilter(struct vop_kqfilter_args *);
884 int vop_stdlock(struct vop_lock1_args *);
885 int vop_stdunlock(struct vop_unlock_args *);
886 int vop_stdislocked(struct vop_islocked_args *);
887 int vop_lock(struct vop_lock1_args *);
888 int vop_unlock(struct vop_unlock_args *);
889 int vop_islocked(struct vop_islocked_args *);
890 int vop_stdputpages(struct vop_putpages_args *);
891 int vop_nopoll(struct vop_poll_args *);
892 int vop_stdaccess(struct vop_access_args *ap);
893 int vop_stdaccessx(struct vop_accessx_args *ap);
894 int vop_stdadvise(struct vop_advise_args *ap);
895 int vop_stdadvlock(struct vop_advlock_args *ap);
896 int vop_stdadvlockasync(struct vop_advlockasync_args *ap);
897 int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap);
898 int vop_stdallocate(struct vop_allocate_args *ap);
899 int vop_stddeallocate(struct vop_deallocate_args *ap);
900 int vop_stdset_text(struct vop_set_text_args *ap);
901 int vop_stdpathconf(struct vop_pathconf_args *);
902 int vop_stdpoll(struct vop_poll_args *);
903 int vop_stdvptocnp(struct vop_vptocnp_args *ap);
904 int vop_stdvptofh(struct vop_vptofh_args *ap);
905 int vop_stdunp_bind(struct vop_unp_bind_args *ap);
906 int vop_stdunp_connect(struct vop_unp_connect_args *ap);
907 int vop_stdunp_detach(struct vop_unp_detach_args *ap);
908 int vop_stdadd_writecount_nomsync(struct vop_add_writecount_args *ap);
909 int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
910 int vop_eopnotsupp(struct vop_generic_args *ap);
911 int vop_ebadf(struct vop_generic_args *ap);
912 int vop_einval(struct vop_generic_args *ap);
913 int vop_enoent(struct vop_generic_args *ap);
914 int vop_enotty(struct vop_generic_args *ap);
915 int vop_eagain(struct vop_generic_args *ap);
916 int vop_null(struct vop_generic_args *ap);
917 int vop_panic(struct vop_generic_args *ap);
918 int dead_poll(struct vop_poll_args *ap);
919 int dead_read(struct vop_read_args *ap);
920 int dead_write(struct vop_write_args *ap);
921
922 /* These are called from within the actual VOPS. */
923 void vop_allocate_post(void *a, int rc);
924 void vop_copy_file_range_post(void *ap, int rc);
925 void vop_close_post(void *a, int rc);
926 void vop_create_pre(void *a);
927 void vop_create_post(void *a, int rc);
928 void vop_deallocate_post(void *a, int rc);
929 void vop_whiteout_pre(void *a);
930 void vop_whiteout_post(void *a, int rc);
931 void vop_deleteextattr_pre(void *a);
932 void vop_deleteextattr_post(void *a, int rc);
933 void vop_link_pre(void *a);
934 void vop_link_post(void *a, int rc);
935 void vop_lookup_post(void *a, int rc);
936 void vop_lookup_pre(void *a);
937 void vop_mkdir_pre(void *a);
938 void vop_mkdir_post(void *a, int rc);
939 void vop_mknod_pre(void *a);
940 void vop_mknod_post(void *a, int rc);
941 void vop_open_post(void *a, int rc);
942 void vop_read_post(void *a, int rc);
943 void vop_read_pgcache_post(void *ap, int rc);
944 void vop_reclaim_post(void *a, int rc);
945 void vop_remove_pre(void *a);
946 void vop_remove_post(void *a, int rc);
947 void vop_rename_post(void *a, int rc);
948 void vop_rename_pre(void *a);
949 void vop_rmdir_pre(void *a);
950 void vop_rmdir_post(void *a, int rc);
951 void vop_setattr_pre(void *a);
952 void vop_setattr_post(void *a, int rc);
953 void vop_setacl_pre(void *a);
954 void vop_setacl_post(void *a, int rc);
955 void vop_setextattr_pre(void *a);
956 void vop_setextattr_post(void *a, int rc);
957 void vop_symlink_pre(void *a);
958 void vop_symlink_post(void *a, int rc);
959 int vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a);
960
961 #ifdef INVARIANTS
962 void vop_fdatasync_debugpre(void *a);
963 void vop_fdatasync_debugpost(void *a, int rc);
964 void vop_fplookup_vexec_debugpre(void *a);
965 void vop_fplookup_vexec_debugpost(void *a, int rc);
966 void vop_fplookup_symlink_debugpre(void *a);
967 void vop_fplookup_symlink_debugpost(void *a, int rc);
968 void vop_fsync_debugpre(void *a);
969 void vop_fsync_debugpost(void *a, int rc);
970 void vop_strategy_debugpre(void *a);
971 void vop_lock_debugpre(void *a);
972 void vop_lock_debugpost(void *a, int rc);
973 void vop_unlock_debugpre(void *a);
974 void vop_need_inactive_debugpre(void *a);
975 void vop_need_inactive_debugpost(void *a, int rc);
976 void vop_mkdir_debugpost(void *a, int rc);
977 #else
978 #define vop_fdatasync_debugpre(x) do { } while (0)
979 #define vop_fdatasync_debugpost(x, y) do { } while (0)
980 #define vop_fplookup_vexec_debugpre(x) do { } while (0)
981 #define vop_fplookup_vexec_debugpost(x, y) do { } while (0)
982 #define vop_fplookup_symlink_debugpre(x) do { } while (0)
983 #define vop_fplookup_symlink_debugpost(x, y) do { } while (0)
984 #define vop_fsync_debugpre(x) do { } while (0)
985 #define vop_fsync_debugpost(x, y) do { } while (0)
986 #define vop_strategy_debugpre(x) do { } while (0)
987 #define vop_lock_debugpre(x) do { } while (0)
988 #define vop_lock_debugpost(x, y) do { } while (0)
989 #define vop_unlock_debugpre(x) do { } while (0)
990 #define vop_need_inactive_debugpre(x) do { } while (0)
991 #define vop_need_inactive_debugpost(x, y) do { } while (0)
992 #define vop_mkdir_debugpost(x, y) do { } while (0)
993 #endif
994
995 void vop_rename_fail(struct vop_rename_args *ap);
996
997 #define vop_stat_helper_pre(ap) ({ \
998 struct vop_stat_args *_ap = (ap); \
999 int _error; \
1000 AUDIT_ARG_VNODE1(ap->a_vp); \
1001 _error = mac_vnode_check_stat(_ap->a_active_cred, _ap->a_file_cred, _ap->a_vp);\
1002 if (__predict_true(_error == 0)) { \
1003 ap->a_sb->st_padding1 = 0; \
1004 bzero(_ap->a_sb->st_spare, sizeof(_ap->a_sb->st_spare)); \
1005 ap->a_sb->st_filerev = 0; \
1006 ap->a_sb->st_bsdflags = 0; \
1007 } \
1008 _error; \
1009 })
1010
1011 #define vop_stat_helper_post(ap, error) ({ \
1012 struct vop_stat_args *_ap = (ap); \
1013 int _error = (error); \
1014 if (priv_check_cred_vfs_generation(_ap->a_active_cred)) \
1015 _ap->a_sb->st_gen = 0; \
1016 _error; \
1017 })
1018
1019 #ifdef INVARIANTS
1020 #define vop_readdir_pre_assert(ap) \
1021 ssize_t nresid, oresid; \
1022 \
1023 oresid = (ap)->a_uio->uio_resid;
1024
1025 #define vop_readdir_post_assert(ap, ret) \
1026 nresid = (ap)->a_uio->uio_resid; \
1027 if ((ret) == 0 && (ap)->a_eofflag != NULL) { \
1028 VNASSERT(oresid == 0 || nresid != oresid || \
1029 *(ap)->a_eofflag == 1, \
1030 (ap)->a_vp, ("VOP_READDIR: eofflag not set")); \
1031 }
1032 #else
1033 #define vop_readdir_pre_assert(ap)
1034 #define vop_readdir_post_assert(ap, ret)
1035 #endif
1036
1037 #define vop_readdir_pre(ap) do { \
1038 vop_readdir_pre_assert(ap)
1039
1040 #define vop_readdir_post(ap, ret) \
1041 vop_readdir_post_assert(ap, ret); \
1042 if ((ret) == 0) { \
1043 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_READ); \
1044 INOTIFY((ap)->a_vp, IN_ACCESS); \
1045 } \
1046 } while (0)
1047
1048 #define vop_write_pre(ap) \
1049 struct vattr va; \
1050 int error; \
1051 off_t osize, ooffset, noffset; \
1052 \
1053 osize = ooffset = noffset = 0; \
1054 if (!VN_KNLIST_EMPTY((ap)->a_vp)) { \
1055 error = VOP_GETATTR((ap)->a_vp, &va, (ap)->a_cred); \
1056 if (error) \
1057 return (error); \
1058 ooffset = (ap)->a_uio->uio_offset; \
1059 osize = (off_t)va.va_size; \
1060 }
1061
1062 #define vop_write_post(ap, ret) \
1063 noffset = (ap)->a_uio->uio_offset; \
1064 if (noffset > ooffset) { \
1065 if (!VN_KNLIST_EMPTY((ap)->a_vp)) { \
1066 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_WRITE | \
1067 (noffset > osize ? NOTE_EXTEND : 0)); \
1068 } \
1069 INOTIFY((ap)->a_vp, IN_MODIFY); \
1070 }
1071
1072 #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__)
1073
1074 #ifdef INVARIANTS
1075 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) \
1076 do { \
1077 int error_; \
1078 \
1079 error_ = VOP_ADD_WRITECOUNT((vp), (cnt)); \
1080 VNASSERT(error_ == 0, (vp), ("VOP_ADD_WRITECOUNT returned %d", \
1081 error_)); \
1082 } while (0)
1083 #define VOP_SET_TEXT_CHECKED(vp) \
1084 do { \
1085 int error_; \
1086 \
1087 error_ = VOP_SET_TEXT((vp)); \
1088 VNASSERT(error_ == 0, (vp), ("VOP_SET_TEXT returned %d", \
1089 error_)); \
1090 } while (0)
1091 #define VOP_UNSET_TEXT_CHECKED(vp) \
1092 do { \
1093 int error_; \
1094 \
1095 error_ = VOP_UNSET_TEXT((vp)); \
1096 VNASSERT(error_ == 0, (vp), ("VOP_UNSET_TEXT returned %d", \
1097 error_)); \
1098 } while (0)
1099 #else
1100 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) VOP_ADD_WRITECOUNT((vp), (cnt))
1101 #define VOP_SET_TEXT_CHECKED(vp) VOP_SET_TEXT((vp))
1102 #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp))
1103 #endif
1104
1105 #define VN_IS_DOOMED(vp) __predict_false((vn_irflag_read(vp) & VIRF_DOOMED) != 0)
1106
1107 void vput(struct vnode *vp);
1108 void vrele(struct vnode *vp);
1109 void vref(struct vnode *vp);
1110 void vrefact(struct vnode *vp);
1111 void v_addpollinfo(struct vnode *vp);
1112 static __inline int
vrefcnt(struct vnode * vp)1113 vrefcnt(struct vnode *vp)
1114 {
1115
1116 return (vp->v_usecount);
1117 }
1118
1119 #define vholdl(vp) do { \
1120 ASSERT_VI_LOCKED(vp, __func__); \
1121 vhold(vp); \
1122 } while (0)
1123
1124 #define vrefl(vp) do { \
1125 ASSERT_VI_LOCKED(vp, __func__); \
1126 vref(vp); \
1127 } while (0)
1128
1129 /*
1130 * The caller doesn't know the file size and vnode_create_vobject() should
1131 * determine the size on its own.
1132 */
1133 #define VNODE_NO_SIZE ((off_t)-1)
1134
1135 int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td);
1136 int vnode_create_disk_vobject(struct vnode *vp, off_t size, struct thread *td);
1137 void vnode_destroy_vobject(struct vnode *vp);
1138
1139 extern struct vop_vector fifo_specops;
1140 extern struct vop_vector dead_vnodeops;
1141 extern struct vop_vector default_vnodeops;
1142
1143 #define VOP_PANIC ((void*)(uintptr_t)vop_panic)
1144 #define VOP_NULL ((void*)(uintptr_t)vop_null)
1145 #define VOP_EBADF ((void*)(uintptr_t)vop_ebadf)
1146 #define VOP_ENOTTY ((void*)(uintptr_t)vop_enotty)
1147 #define VOP_EINVAL ((void*)(uintptr_t)vop_einval)
1148 #define VOP_ENOENT ((void*)(uintptr_t)vop_enoent)
1149 #define VOP_EOPNOTSUPP ((void*)(uintptr_t)vop_eopnotsupp)
1150 #define VOP_EAGAIN ((void*)(uintptr_t)vop_eagain)
1151
1152 /* fifo_vnops.c */
1153 int fifo_printinfo(struct vnode *);
1154
1155 /* vfs_hash.c */
1156 typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg);
1157
1158 void vfs_hash_changesize(u_long newhashsize);
1159 int vfs_hash_get(const struct mount *mp, u_int hash, int flags,
1160 struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1161 u_int vfs_hash_index(struct vnode *vp);
1162 int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
1163 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1164 void vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
1165 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1166 void vfs_hash_rehash(struct vnode *vp, u_int hash);
1167 void vfs_hash_remove(struct vnode *vp);
1168
1169 int vfs_kqfilter(struct vop_kqfilter_args *);
1170 struct dirent;
1171 int vn_dir_next_dirent(struct vnode *vp, struct thread *td,
1172 char *dirbuf, size_t dirbuflen,
1173 struct dirent **dpp, size_t *len, off_t *off, int *eofflag);
1174 int vn_dir_check_empty(struct vnode *vp);
1175 int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off);
1176
1177 int vfs_unixify_accmode(accmode_t *accmode);
1178
1179 void vfs_unp_reclaim(struct vnode *vp);
1180
1181 int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode);
1182 int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid,
1183 gid_t gid);
1184 int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1185 struct thread *td);
1186 int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1187 struct thread *td);
1188 int vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *active_cred);
1189 int vn_getsize(struct vnode *vp, off_t *size, struct ucred *active_cred);
1190
1191 void vn_fsid(struct vnode *vp, struct vattr *va);
1192
1193 int vn_dir_check_exec(struct vnode *vp, struct componentname *cnp);
1194 int vn_lktype_write(struct mount *mp, struct vnode *vp);
1195
1196 #ifdef INVARIANTS
1197 void vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state);
1198 #endif
1199
1200 static inline void
vn_set_state(struct vnode * vp,__enum_uint8 (vstate)state)1201 vn_set_state(struct vnode *vp, __enum_uint8(vstate) state)
1202 {
1203 #ifdef INVARIANTS
1204 vn_set_state_validate(vp, state);
1205 #endif
1206 vp->v_state = state;
1207 }
1208
1209 static inline __enum_uint8(vstate)
vn_get_state(struct vnode * vp)1210 vn_get_state(struct vnode *vp)
1211 {
1212 return (vp->v_state);
1213 }
1214
1215 #define VOP_UNLOCK_FLAGS(vp, flags) ({ \
1216 struct vnode *_vp = (vp); \
1217 int _flags = (flags); \
1218 int _error; \
1219 \
1220 if ((_flags & ~(LK_INTERLOCK | LK_RELEASE)) != 0) \
1221 panic("%s: unsupported flags %x\n", __func__, flags); \
1222 _error = VOP_UNLOCK(_vp); \
1223 if (_flags & LK_INTERLOCK) \
1224 VI_UNLOCK(_vp); \
1225 _error; \
1226 })
1227
1228 #include <sys/kernel.h>
1229
1230 #define VFS_VOP_VECTOR_REGISTER(vnodeops) \
1231 SYSINIT(vfs_vector_##vnodeops##_f, SI_SUB_VFS, SI_ORDER_ANY, \
1232 vfs_vector_op_register, &vnodeops)
1233
1234 #define VFS_SMR_DECLARE \
1235 extern smr_t vfs_smr
1236
1237 #define VFS_SMR() vfs_smr
1238 #define vfs_smr_enter() smr_enter(VFS_SMR())
1239 #define vfs_smr_exit() smr_exit(VFS_SMR())
1240 #define vfs_smr_synchronize() smr_synchronize(VFS_SMR())
1241 #define vfs_smr_entered_load(ptr) smr_entered_load((ptr), VFS_SMR())
1242 #define VFS_SMR_ENTERED() SMR_ENTERED(VFS_SMR())
1243 #define VFS_SMR_ASSERT_ENTERED() SMR_ASSERT_ENTERED(VFS_SMR())
1244 #define VFS_SMR_ASSERT_NOT_ENTERED() SMR_ASSERT_NOT_ENTERED(VFS_SMR())
1245 #define VFS_SMR_ZONE_SET(zone) uma_zone_set_smr((zone), VFS_SMR())
1246
1247 #define vn_load_v_data_smr(vp) ({ \
1248 struct vnode *_vp = (vp); \
1249 \
1250 VFS_SMR_ASSERT_ENTERED(); \
1251 atomic_load_consume_ptr(&(_vp)->v_data);\
1252 })
1253
1254 #endif /* _KERNEL */
1255
1256 #endif /* !_SYS_VNODE_H_ */
1257