1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #ifndef _SYS_VNODE_H_
33 #define _SYS_VNODE_H_
34
35 #include <sys/bufobj.h>
36 #include <sys/queue.h>
37 #include <sys/lock.h>
38 #include <sys/lockmgr.h>
39 #include <sys/mutex.h>
40 #include <sys/rangelock.h>
41 #include <sys/selinfo.h>
42 #include <sys/uio.h>
43 #include <sys/acl.h>
44 #include <sys/ktr.h>
45 #include <sys/_seqc.h>
46
47 /*
48 * The vnode is the focus of all file activity in UNIX. There is a
49 * unique vnode allocated for each active file, each current directory,
50 * each mounted-on file, text file, and the root.
51 */
52
53 /*
54 * Vnode types. VNON means no type.
55 */
__enum_uint8_decl(vtype)56 __enum_uint8_decl(vtype) {
57 VNON,
58 VREG,
59 VDIR,
60 VBLK,
61 VCHR,
62 VLNK,
63 VSOCK,
64 VFIFO,
65 VBAD,
66 VMARKER,
67 VLASTTYPE = VMARKER,
68 };
69
70 /*
71 * We frequently need to test is something is a device node.
72 */
73 #define VTYPE_ISDEV(vtype) ((vtype) == VCHR || (vtype) == VBLK)
74
__enum_uint8_decl(vstate)75 __enum_uint8_decl(vstate) {
76 VSTATE_UNINITIALIZED,
77 VSTATE_CONSTRUCTED,
78 VSTATE_DESTROYING,
79 VSTATE_DEAD,
80 VLASTSTATE = VSTATE_DEAD,
81 };
82
83 enum vgetstate {
84 VGET_NONE,
85 VGET_HOLDCNT,
86 VGET_USECOUNT,
87 };
88
89 /*
90 * Each underlying filesystem allocates its own private area and hangs
91 * it from v_data. If non-null, this area is freed in getnewvnode().
92 */
93
94 struct cache_fpl;
95 struct inotify_watch;
96 struct namecache;
97
98 struct vpollinfo {
99 struct mtx vpi_lock; /* lock to protect below */
100 TAILQ_HEAD(, inotify_watch) vpi_inotify; /* list of inotify watchers */
101 struct selinfo vpi_selinfo; /* identity of poller(s) */
102 short vpi_events; /* what they are looking for */
103 short vpi_revents; /* what has happened */
104 };
105
106 /*
107 * Reading or writing any of these items requires holding the appropriate lock.
108 *
109 * Lock reference:
110 * c - namecache mutex
111 * i - interlock
112 * l - mp mnt_listmtx or freelist mutex
113 * I - updated with atomics, 0->1 and 1->0 transitions with interlock held
114 * m - mount point interlock
115 * p - pollinfo lock
116 * u - Only a reference to the vnode is needed to read.
117 * v - vnode lock
118 *
119 * Vnodes may be found on many lists. The general way to deal with operating
120 * on a vnode that is on a list is:
121 * 1) Lock the list and find the vnode.
122 * 2) Lock interlock so that the vnode does not go away.
123 * 3) Unlock the list to avoid lock order reversals.
124 * 4) vget with LK_INTERLOCK and check for ENOENT, or
125 * 5) Check for DOOMED if the vnode lock is not required.
126 * 6) Perform your operation, then vput().
127 */
128
129 #if defined(_KERNEL) || defined(_KVM_VNODE)
130
131 struct vnode {
132 /*
133 * Fields which define the identity of the vnode. These fields are
134 * owned by the filesystem (XXX: and vgone() ?)
135 */
136 __enum_uint8(vtype) v_type; /* u vnode type */
137 __enum_uint8(vstate) v_state; /* u vnode state */
138 short v_irflag; /* i frequently read flags */
139 seqc_t v_seqc; /* i modification count */
140 uint32_t v_nchash; /* u namecache hash */
141 u_int v_hash;
142 const struct vop_vector *v_op; /* u vnode operations vector */
143 void *v_data; /* u private data for fs */
144
145 /*
146 * Filesystem instance stuff
147 */
148 struct mount *v_mount; /* u ptr to vfs we are in */
149 TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */
150
151 /*
152 * Type specific fields, only one applies to any given vnode.
153 */
154 union {
155 struct mount *v_mountedhere; /* v ptr to mountpoint (VDIR) */
156 struct unpcb *v_unpcb; /* v unix domain net (VSOCK) */
157 struct cdev *v_rdev; /* v device (VCHR, VBLK) */
158 struct fifoinfo *v_fifoinfo; /* v fifo (VFIFO) */
159 };
160
161 /*
162 * vfs_hash: (mount + inode) -> vnode hash. The hash value
163 * itself is grouped with other int fields, to avoid padding.
164 */
165 LIST_ENTRY(vnode) v_hashlist;
166
167 /*
168 * VFS_namecache stuff
169 */
170 LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */
171 TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */
172 struct namecache *v_cache_dd; /* c Cache entry for .. vnode */
173
174 /*
175 * Locking
176 */
177 struct lock v_lock; /* u (if fs don't have one) */
178 struct mtx v_interlock; /* lock for "i" things */
179 struct lock *v_vnlock; /* u pointer to vnode lock */
180
181 /*
182 * The machinery of being a vnode
183 */
184 TAILQ_ENTRY(vnode) v_vnodelist; /* l vnode lists */
185 TAILQ_ENTRY(vnode) v_lazylist; /* l vnode lazy list */
186 struct bufobj v_bufobj; /* * Buffer cache object */
187
188 /*
189 * Hooks for various subsystems and features.
190 */
191 struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */
192 struct label *v_label; /* MAC label for vnode */
193 struct lockf *v_lockf; /* Byte-level advisory lock list */
194 struct rangelock v_rl; /* Byte-range lock */
195
196 u_int v_holdcnt; /* I prevents recycling. */
197 u_int v_usecount; /* I ref count of users */
198 u_short v_iflag; /* i vnode flags (see below) */
199 u_short v_vflag; /* v vnode flags */
200 u_short v_mflag; /* l mnt-specific vnode flags */
201 short v_dbatchcpu; /* i LRU requeue deferral batch */
202 int v_writecount; /* I ref count of writers or
203 (negative) text users */
204 int v_seqc_users; /* i modifications pending */
205 };
206
207 #define VN_ISDEV(vp) VTYPE_ISDEV((vp)->v_type)
208
209 #ifndef DEBUG_LOCKS
210 #ifdef _LP64
211 /*
212 * Not crossing 448 bytes fits 9 vnodes per page. If you have to add fields
213 * to the structure and there is nothing which can be done to prevent growth
214 * then so be it. But don't grow it without a good reason.
215 */
216 _Static_assert(sizeof(struct vnode) <= 448, "vnode size crosses 448 bytes");
217 #endif
218 #endif
219
220 #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
221
222 #define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj)
223
224 #define v_object v_bufobj.bo_object
225
226 #define VN_KNOTE(vp, b, a) do { \
227 if ((vn_irflag_read(vp) & VIRF_KNOTE) != 0) { \
228 KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), \
229 (a) | KNF_NOKQLOCK); \
230 } \
231 } while (0)
232 #define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, KNF_LISTLOCKED)
233 #define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0)
234
235 /*
236 * Vnode flags.
237 * VI flags are protected by interlock and live in v_iflag
238 * VIRF flags are protected by interlock and live in v_irflag
239 * VV flags are protected by the vnode lock and live in v_vflag
240 *
241 * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both
242 * are required for writing but the status may be checked with either.
243 */
244 #define VHOLD_NO_SMR (1<<29) /* Disable vhold_smr */
245 #define VHOLD_ALL_FLAGS (VHOLD_NO_SMR)
246
247 #define VIRF_DOOMED 0x0001 /* This vnode is being recycled */
248 #define VIRF_PGREAD 0x0002 /* Direct reads from the page cache are permitted,
249 never cleared once set */
250 #define VIRF_MOUNTPOINT 0x0004 /* This vnode is mounted on */
251 #define VIRF_TEXT_REF 0x0008 /* Executable mappings ref the vnode */
252 #define VIRF_CROSSMP 0x0010 /* Cross-mp vnode, no locking */
253 #define VIRF_NAMEDDIR 0x0020 /* Named attribute directory */
254 #define VIRF_NAMEDATTR 0x0040 /* Named attribute */
255 #define VIRF_INOTIFY 0x0080 /* This vnode is being watched */
256 #define VIRF_INOTIFY_PARENT 0x0100 /* A parent of this vnode may be being
257 watched */
258 #define VIRF_KNOTE 0x0200 /* Has knlist */
259
260 #define VI_UNUSED0 0x0001 /* unused */
261 #define VI_MOUNT 0x0002 /* Mount in progress */
262 #define VI_DOINGINACT 0x0004 /* VOP_INACTIVE is in progress */
263 #define VI_OWEINACT 0x0008 /* Need to call inactive */
264 #define VI_DEFINACT 0x0010 /* deferred inactive */
265 #define VI_FOPENING 0x0020 /* In open, with opening process having the
266 first right to advlock file */
267 #define VI_DELAYED_SETSIZE 0x0040 /* Delayed setsize */
268
269 #define VV_ROOT 0x0001 /* root of its filesystem */
270 #define VV_ISTTY 0x0002 /* vnode represents a tty */
271 #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
272 #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */
273 #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
274 #define VV_VMSIZEVNLOCK 0x0020 /* object size check requires vnode lock */
275 #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */
276 #define VV_SYSTEM 0x0080 /* vnode being used by kernel */
277 #define VV_PROCDEP 0x0100 /* vnode is process dependent */
278 #define VV_UNLINKED 0x0200 /* unlinked but stil open directory */
279 #define VV_DELETED 0x0400 /* should be removed */
280 #define VV_MD 0x0800 /* vnode backs the md device */
281 #define VV_FORCEINSMQ 0x1000 /* force the insmntque to succeed */
282 #define VV_READLINK 0x2000 /* fdescfs linux vnode */
283 #define VV_UNREF 0x4000 /* vunref, do not drop lock in inactive() */
284 #define VV_CROSSLOCK 0x8000 /* vnode lock is shared w/ root mounted here */
285
286 #define VMP_LAZYLIST 0x0001 /* Vnode is on mnt's lazy list */
287
288 /*
289 * Vnode attributes. A field value of VNOVAL represents a field whose value
290 * is unavailable (getattr) or which is not to be changed (setattr).
291 */
292 struct vattr {
293 __enum_uint8(vtype) va_type; /* vnode type (for create) */
294 u_short va_mode; /* files access mode and type */
295 uint16_t va_bsdflags; /* same as st_bsdflags from stat(2) */
296 uid_t va_uid; /* owner user id */
297 gid_t va_gid; /* owner group id */
298 nlink_t va_nlink; /* number of references to file */
299 dev_t va_fsid; /* filesystem id */
300 ino_t va_fileid; /* file id */
301 u_quad_t va_size; /* file size in bytes */
302 long va_blocksize; /* blocksize preferred for i/o */
303 struct timespec va_atime; /* time of last access */
304 struct timespec va_mtime; /* time of last modification */
305 struct timespec va_ctime; /* time file changed */
306 struct timespec va_birthtime; /* time file created */
307 u_long va_gen; /* generation number of file */
308 u_long va_flags; /* flags defined for file */
309 dev_t va_rdev; /* device the special file represents */
310 u_quad_t va_bytes; /* bytes of disk space held by file */
311 u_quad_t va_filerev; /* file modification number */
312 u_int va_vaflags; /* operations flags, see below */
313 long va_spare; /* remain quad aligned */
314 };
315
316 #define VATTR_ISDEV(vap) VTYPE_ISDEV((vap)->va_type)
317
318 /*
319 * Flags for va_vaflags.
320 */
321 #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */
322 #define VA_EXCLUSIVE 0x02 /* exclusive create request */
323 #define VA_SYNC 0x04 /* O_SYNC truncation */
324
325 /*
326 * Flags for ioflag. (high 16 bits used to ask for read-ahead and
327 * help with write clustering)
328 * NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h
329 */
330 #define IO_UNIT 0x0001 /* do I/O as atomic unit */
331 #define IO_APPEND 0x0002 /* append write to end */
332 #define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */
333 #define IO_NODELOCKED 0x0008 /* underlying node already locked */
334 #define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */
335 #define IO_VMIO 0x0020 /* data already in VMIO space */
336 #define IO_INVAL 0x0040 /* invalidate after I/O */
337 #define IO_SYNC 0x0080 /* do I/O synchronously */
338 #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
339 #define IO_NOREUSE 0x0200 /* VMIO data won't be reused */
340 #define IO_EXT 0x0400 /* operate on external attributes */
341 #define IO_NORMAL 0x0800 /* operate on regular data */
342 #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */
343 #define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */
344 #define IO_RANGELOCKED 0x4000 /* range locked */
345 #define IO_DATASYNC 0x8000 /* do only data I/O synchronously */
346
347 #define IO_SEQMAX 0x7F /* seq heuristic max value */
348 #define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */
349
350 /*
351 * Flags for accmode_t.
352 */
353 #define VEXEC 000000000100 /* execute/search permission */
354 #define VWRITE 000000000200 /* write permission */
355 #define VREAD 000000000400 /* read permission */
356 #define VADMIN 000000010000 /* being the file owner */
357 #define VAPPEND 000000040000 /* permission to write/append */
358 /*
359 * VEXPLICIT_DENY makes VOP_ACCESSX(9) return EPERM or EACCES only
360 * if permission was denied explicitly, by a "deny" rule in NFSv4 ACL,
361 * and 0 otherwise. This never happens with ordinary unix access rights
362 * or POSIX.1e ACLs. Obviously, VEXPLICIT_DENY must be OR-ed with
363 * some other V* constant.
364 */
365 #define VEXPLICIT_DENY 000000100000
366 #define VREAD_NAMED_ATTRS 000000200000 /* not used */
367 #define VWRITE_NAMED_ATTRS 000000400000 /* not used */
368 #define VDELETE_CHILD 000001000000
369 #define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */
370 #define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */
371 #define VDELETE 000010000000
372 #define VREAD_ACL 000020000000 /* read ACL and file mode */
373 #define VWRITE_ACL 000040000000 /* change ACL and/or file mode */
374 #define VWRITE_OWNER 000100000000 /* change file owner */
375 #define VSYNCHRONIZE 000200000000 /* not used */
376 #define VCREAT 000400000000 /* creating new file */
377 #define VVERIFY 001000000000 /* verification required */
378
379 /*
380 * Permissions that were traditionally granted only to the file owner.
381 */
382 #define VADMIN_PERMS (VADMIN | VWRITE_ATTRIBUTES | VWRITE_ACL | \
383 VWRITE_OWNER)
384
385 /*
386 * Permissions that were traditionally granted to everyone.
387 */
388 #define VSTAT_PERMS (VREAD_ATTRIBUTES | VREAD_ACL)
389
390 /*
391 * Permissions that allow to change the state of the file in any way.
392 */
393 #define VMODIFY_PERMS (VWRITE | VAPPEND | VADMIN_PERMS | VDELETE_CHILD | \
394 VDELETE)
395
396 /*
397 * Token indicating no attribute value yet assigned.
398 */
399 #define VNOVAL (-1)
400
401 /*
402 * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon)
403 */
404 #define VLKTIMEOUT (hz / 20 + 1)
405
406 #ifdef _KERNEL
407
408 #ifdef MALLOC_DECLARE
409 MALLOC_DECLARE(M_VNODE);
410 #endif
411
412 extern u_int ncsizefactor;
413 extern const u_int io_hold_cnt;
414
415 /*
416 * Convert between vnode types and inode formats (since POSIX.1
417 * defines mode word of stat structure in terms of inode formats).
418 */
419 extern __enum_uint8(vtype) iftovt_tab[];
420 extern int vttoif_tab[];
421 #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
422 #define VTTOIF(indx) (vttoif_tab[(int)(indx)])
423 #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
424
425 /*
426 * Flags to various vnode functions.
427 */
428 #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */
429 #define FORCECLOSE 0x0002 /* vflush: force file closure */
430 #define WRITECLOSE 0x0004 /* vflush: only close writable files */
431 #define EARLYFLUSH 0x0008 /* vflush: early call for ffs_flushfiles */
432 #define V_SAVE 0x0001 /* vinvalbuf: sync file first */
433 #define V_ALT 0x0002 /* vinvalbuf: invalidate only alternate bufs */
434 #define V_NORMAL 0x0004 /* vinvalbuf: invalidate only regular bufs */
435 #define V_CLEANONLY 0x0008 /* vinvalbuf: invalidate only clean bufs */
436 #define V_VMIO 0x0010 /* vinvalbuf: called during pageout */
437 #define V_ALLOWCLEAN 0x0020 /* vinvalbuf: allow clean buffers after flush */
438 #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */
439 #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */
440 #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */
441 #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */
442 #define V_PCATCH 0x0008 /* vn_start_write: make the sleep interruptible */
443 #define V_VALID_FLAGS (V_WAIT | V_NOWAIT | V_XSLEEP | V_PCATCH)
444
445 #define VR_START_WRITE 0x0001 /* vfs_write_resume: start write atomically */
446 #define VR_NO_SUSPCLR 0x0002 /* vfs_write_resume: do not clear suspension */
447
448 #define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the
449 filesystem is being unmounted */
450
451 #ifdef DIAGNOSTIC
452 #define VATTR_NULL(vap) vattr_null(vap)
453 #else
454 #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
455 #endif /* DIAGNOSTIC */
456
457 /*
458 * Global vnode data.
459 */
460 extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
461 extern struct mount *rootdevmp; /* "/dev" mount */
462 extern u_long desiredvnodes; /* number of vnodes desired */
463 extern struct uma_zone *namei_zone;
464 extern struct vattr va_null; /* predefined null vattr structure */
465
466 extern u_int vn_lock_pair_pause_max;
467
468 #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock)
469 #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags))
470 #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock)
471 #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
472 #define VI_MTX(vp) (&(vp)->v_interlock)
473
474 #define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock)
475 #define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock)
476 #define VN_LOCK_DSHARE(vp) lockdisableshare((vp)->v_vnlock)
477
478 #endif /* _KERNEL */
479
480 /*
481 * Mods for extensibility.
482 */
483
484 /*
485 * Flags for vdesc_flags:
486 */
487 #define VDESC_MAX_VPS 16
488 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */
489 #define VDESC_VP0_WILLRELE 0x0001
490 #define VDESC_VP1_WILLRELE 0x0002
491 #define VDESC_VP2_WILLRELE 0x0004
492 #define VDESC_VP3_WILLRELE 0x0008
493
494 /*
495 * A generic structure.
496 * This can be used by bypass routines to identify generic arguments.
497 */
498 struct vop_generic_args {
499 struct vnodeop_desc *a_desc;
500 /* other random data follows, presumably */
501 };
502
503 typedef int vop_bypass_t(struct vop_generic_args *);
504
505 /*
506 * VDESC_NO_OFFSET is used to identify the end of the offset list
507 * and in places where no such field exists.
508 */
509 #define VDESC_NO_OFFSET -1
510
511 /*
512 * This structure describes the vnode operation taking place.
513 */
514 struct vnodeop_desc {
515 char *vdesc_name; /* a readable name for debugging */
516 int vdesc_flags; /* VDESC_* flags */
517 int vdesc_vop_offset;
518 vop_bypass_t *vdesc_call; /* Function to call */
519
520 /*
521 * These ops are used by bypass routines to map and locate arguments.
522 * Creds and procs are not needed in bypass routines, but sometimes
523 * they are useful to (for example) transport layers.
524 * Nameidata is useful because it has a cred in it.
525 */
526 int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */
527 int vdesc_vpp_offset; /* return vpp location */
528 int vdesc_cred_offset; /* cred location, if any */
529 int vdesc_thread_offset; /* thread location, if any */
530 int vdesc_componentname_offset; /* if any */
531 };
532
533 #ifdef _KERNEL
534 /*
535 * A list of all the operation descs.
536 */
537 extern struct vnodeop_desc *vnodeop_descs[];
538
539 #define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field)
540 #define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \
541 ((s_type)(((char*)(struct_p)) + (s_offset)))
542
543 #ifdef INVARIANTS
544 /*
545 * Support code to aid in debugging VFS locking problems. Not totally
546 * reliable since if the thread sleeps between changing the lock
547 * state and checking it with the assert, some other thread could
548 * change the state. They are good enough for debugging a single
549 * filesystem using a single-threaded test. Note that the unreliability is
550 * limited to false negatives; efforts were made to ensure that false
551 * positives cannot occur.
552 */
553 void assert_vi_locked(struct vnode *vp, const char *str);
554 void assert_vi_unlocked(struct vnode *vp, const char *str);
555 void assert_vop_elocked(struct vnode *vp, const char *str);
556 void assert_vop_locked(struct vnode *vp, const char *str);
557 void assert_vop_unlocked(struct vnode *vp, const char *str);
558
559 #define ASSERT_VI_LOCKED(vp, str) assert_vi_locked((vp), (str))
560 #define ASSERT_VI_UNLOCKED(vp, str) assert_vi_unlocked((vp), (str))
561 #define ASSERT_VOP_ELOCKED(vp, str) assert_vop_elocked((vp), (str))
562 #define ASSERT_VOP_LOCKED(vp, str) assert_vop_locked((vp), (str))
563 #define ASSERT_VOP_UNLOCKED(vp, str) assert_vop_unlocked((vp), (str))
564
565 #define ASSERT_VOP_IN_SEQC(vp) do { \
566 struct vnode *_vp = (vp); \
567 \
568 VNPASS(seqc_in_modify(_vp->v_seqc), _vp); \
569 } while (0)
570
571 #define ASSERT_VOP_NOT_IN_SEQC(vp) do { \
572 struct vnode *_vp = (vp); \
573 \
574 VNPASS(!seqc_in_modify(_vp->v_seqc), _vp); \
575 } while (0)
576
577 #else /* !INVARIANTS */
578
579 #define ASSERT_VI_LOCKED(vp, str) ((void)0)
580 #define ASSERT_VI_UNLOCKED(vp, str) ((void)0)
581 #define ASSERT_VOP_ELOCKED(vp, str) ((void)0)
582 #define ASSERT_VOP_LOCKED(vp, str) ((void)0)
583 #define ASSERT_VOP_UNLOCKED(vp, str) ((void)0)
584
585 #define ASSERT_VOP_IN_SEQC(vp) ((void)0)
586 #define ASSERT_VOP_NOT_IN_SEQC(vp) ((void)0)
587
588 #endif /* INVARIANTS */
589
590 #define DOINGASYNC(vp) \
591 (((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \
592 ((curthread->td_pflags & TDP_SYNCIO) == 0))
593
594 /*
595 * VMIO support inline
596 */
597
598 extern int vmiodirenable;
599
600 static __inline int
vn_canvmio(struct vnode * vp)601 vn_canvmio(struct vnode *vp)
602 {
603 if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR)))
604 return(TRUE);
605 return(FALSE);
606 }
607
608 /*
609 * Finally, include the default set of vnode operations.
610 */
611 typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int);
612 #include "vnode_if.h"
613
614 /* vn_open_flags */
615 #define VN_OPEN_NOAUDIT 0x00000001
616 #define VN_OPEN_NOCAPCHECK 0x00000002
617 #define VN_OPEN_NAMECACHE 0x00000004
618 #define VN_OPEN_INVFS 0x00000008
619 #define VN_OPEN_WANTIOCTLCAPS 0x00000010
620
621 /* copy_file_range kernel flags */
622 #define COPY_FILE_RANGE_KFLAGS 0xff000000
623 #define COPY_FILE_RANGE_TIMEO1SEC 0x01000000 /* Return after 1sec. */
624
625 /*
626 * Public vnode manipulation functions.
627 */
628 struct componentname;
629 struct file;
630 struct mount;
631 struct nameidata;
632 struct ostat;
633 struct freebsd11_stat;
634 struct thread;
635 struct proc;
636 struct stat;
637 struct nstat;
638 struct ucred;
639 struct uio;
640 struct vattr;
641 struct vfsops;
642 struct vnode;
643
644 typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **);
645
646 int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn,
647 daddr_t endn);
648 /* cache_* may belong in namei.h. */
649 void cache_changesize(u_long newhashsize);
650
651 #define VFS_CACHE_DROPOLD 0x1
652
653 void cache_enter_time_flags(struct vnode *dvp, struct vnode *vp,
654 struct componentname *cnp, struct timespec *tsp,
655 struct timespec *dtsp, int flags);
656 #define cache_enter(dvp, vp, cnp) \
657 cache_enter_time(dvp, vp, cnp, NULL, NULL)
658 void cache_enter_time(struct vnode *dvp, struct vnode *vp,
659 struct componentname *cnp, struct timespec *tsp,
660 struct timespec *dtsp);
661 int cache_lookup(struct vnode *dvp, struct vnode **vpp,
662 struct componentname *cnp, struct timespec *tsp, int *ticksp);
663 void cache_vnode_init(struct vnode *vp);
664 void cache_purge(struct vnode *vp);
665 void cache_purge_vgone(struct vnode *vp);
666 void cache_purge_negative(struct vnode *vp);
667 void cache_purgevfs(struct mount *mp);
668 char *cache_symlink_alloc(size_t size, int flags);
669 void cache_symlink_free(char *string, size_t size);
670 int cache_symlink_resolve(struct cache_fpl *fpl, const char *string,
671 size_t len);
672 void cache_vop_inotify(struct vnode *vp, int event, uint32_t cookie);
673 void cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
674 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp);
675 void cache_vop_rmdir(struct vnode *dvp, struct vnode *vp);
676 void cache_vop_vector_register(struct vop_vector *);
677 #ifdef INVARIANTS
678 void cache_validate(struct vnode *dvp, struct vnode *vp,
679 struct componentname *cnp);
680 void cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops);
681 void cache_assert_no_entries(struct vnode *vp);
682 #else
683 static inline void
cache_validate(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)684 cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
685 {
686 }
687
688 static inline void
cache_validate_vop_vector(struct mount * mp,struct vop_vector * vops)689 cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops)
690 {
691 }
692
693 static inline void
cache_assert_no_entries(struct vnode * vp)694 cache_assert_no_entries(struct vnode *vp)
695 {
696 }
697 #endif
698 void cache_fast_lookup_enabled_recalc(void);
699 int change_dir(struct vnode *vp, struct thread *td);
700 void cvtstat(struct stat *st, struct ostat *ost);
701 int freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb);
702 int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost);
703 int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
704 struct vnode **vpp);
705 void getnewvnode_reserve(void);
706 void getnewvnode_drop_reserve(void);
707 int insmntque(struct vnode *vp, struct mount *mp);
708 int insmntque1(struct vnode *vp, struct mount *mp);
709 u_quad_t init_va_filerev(void);
710 int speedup_syncer(void);
711 int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen);
712 int vn_getcwd(char *buf, char **retbuf, size_t *buflen);
713 int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf);
714 int vn_fullpath_jail(struct vnode *vp, char **retbuf, char **freebuf);
715 int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf);
716 int vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp,
717 const char *hdrl_name, size_t hrdl_name_length, char **retbuf,
718 char **freebuf, size_t *buflen);
719 struct vnode *
720 vn_dir_dd_ino(struct vnode *vp);
721 int vn_commname(struct vnode *vn, char *buf, u_int buflen);
722 int vn_path_to_global_path(struct thread *td, struct vnode *vp,
723 char *path, u_int pathlen);
724 int vn_path_to_global_path_hardlink(struct thread *td, struct vnode *vp,
725 struct vnode *dvp, char *path, u_int pathlen, const char *leaf_name,
726 size_t leaf_length);
727 int vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid,
728 gid_t file_gid, accmode_t accmode, struct ucred *cred);
729 int vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid,
730 struct ucred *cred);
731 int vaccess_acl_nfs4(__enum_uint8(vtype) type, uid_t file_uid, gid_t file_gid,
732 struct acl *aclp, accmode_t accmode, struct ucred *cred);
733 int vaccess_acl_posix1e(__enum_uint8(vtype) type, uid_t file_uid,
734 gid_t file_gid, struct acl *acl, accmode_t accmode,
735 struct ucred *cred);
736 void vattr_null(struct vattr *vap);
737 void vlazy(struct vnode *);
738 void vdrop(struct vnode *);
739 void vdropl(struct vnode *);
740 int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td);
741 int vget(struct vnode *vp, int flags);
742 enum vgetstate vget_prep_smr(struct vnode *vp);
743 enum vgetstate vget_prep(struct vnode *vp);
744 int vget_finish(struct vnode *vp, int flags, enum vgetstate vs);
745 void vget_finish_ref(struct vnode *vp, enum vgetstate vs);
746 void vget_abort(struct vnode *vp, enum vgetstate vs);
747 void vgone(struct vnode *vp);
748 void vhold(struct vnode *);
749 void vholdnz(struct vnode *);
750 bool vhold_smr(struct vnode *);
751 int vinactive(struct vnode *vp);
752 int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo);
753 int vtruncbuf(struct vnode *vp, off_t length, int blksize);
754 void v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
755 int blksize);
756 void vunref(struct vnode *);
757 void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3);
758 int vrecycle(struct vnode *vp);
759 int vrecyclel(struct vnode *vp);
760 int vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off,
761 struct ucred *cred);
762 int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off,
763 struct ucred *cred);
764 int vn_close(struct vnode *vp,
765 int flags, struct ucred *file_cred, struct thread *td);
766 int vn_copy_file_range(struct vnode *invp, off_t *inoffp,
767 struct vnode *outvp, off_t *outoffp, size_t *lenp,
768 unsigned int flags, struct ucred *incred, struct ucred *outcred,
769 struct thread *fsize_td);
770 int vn_deallocate(struct vnode *vp, off_t *offset, off_t *length, int flags,
771 int ioflg, struct ucred *active_cred, struct ucred *file_cred);
772 void vn_finished_write(struct mount *mp);
773 void vn_finished_secondary_write(struct mount *mp);
774 int vn_fsync_buf(struct vnode *vp, int waitfor);
775 int vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
776 struct vnode *outvp, off_t *outoffp, size_t *lenp,
777 unsigned int flags, struct ucred *incred, struct ucred *outcred,
778 struct thread *fsize_td);
779 int vn_need_pageq_flush(struct vnode *vp);
780 bool vn_isdisk_error(struct vnode *vp, int *errp);
781 bool vn_isdisk(struct vnode *vp);
782 int _vn_lock(struct vnode *vp, int flags, const char *file, int line);
783 #define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__)
784 void vn_lock_pair(struct vnode *vp1, bool vp1_locked, int lkflags1,
785 struct vnode *vp2, bool vp2_locked, int lkflags2);
786 int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp);
787 int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode,
788 u_int vn_open_flags, struct ucred *cred, struct file *fp);
789 int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
790 struct thread *td, struct file *fp);
791 void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end);
792 void vn_pages_remove_valid(struct vnode *vp, vm_pindex_t start,
793 vm_pindex_t end);
794 int vn_pollrecord(struct vnode *vp, struct thread *p, int events);
795 int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base,
796 int len, off_t offset, enum uio_seg segflg, int ioflg,
797 struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid,
798 struct thread *td);
799 int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base,
800 size_t len, off_t offset, enum uio_seg segflg, int ioflg,
801 struct ucred *active_cred, struct ucred *file_cred, size_t *aresid,
802 struct thread *td);
803 int vn_read_from_obj(struct vnode *vp, struct uio *uio);
804 int vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
805 struct thread *td);
806 int vn_rlimit_fsizex(const struct vnode *vp, struct uio *uio,
807 off_t maxfsz, ssize_t *resid_adj, struct thread *td);
808 void vn_rlimit_fsizex_res(struct uio *uio, ssize_t resid_adj);
809 int vn_rlimit_trunc(u_quad_t size, struct thread *td);
810 int vn_start_write(struct vnode *vp, struct mount **mpp, int flags);
811 int vn_start_secondary_write(struct vnode *vp, struct mount **mpp,
812 int flags);
813 int vn_truncate_locked(struct vnode *vp, off_t length, bool sync,
814 struct ucred *cred);
815 int vn_writechk(struct vnode *vp);
816 int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
817 const char *attrname, int *buflen, char *buf, struct thread *td);
818 int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
819 const char *attrname, int buflen, char *buf, struct thread *td);
820 int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
821 const char *attrname, struct thread *td);
822 int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags,
823 struct vnode **rvp);
824 int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc,
825 void *alloc_arg, int lkflags, struct vnode **rvp);
826 int vn_utimes_perm(struct vnode *vp, struct vattr *vap,
827 struct ucred *cred, struct thread *td);
828 int vn_cmp(struct file *, struct file *, struct thread *td);
829
830 int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio);
831 int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
832 struct uio *uio);
833
834 void vn_seqc_write_begin_locked(struct vnode *vp);
835 void vn_seqc_write_begin(struct vnode *vp);
836 void vn_seqc_write_end_locked(struct vnode *vp);
837 void vn_seqc_write_end(struct vnode *vp);
838 #define vn_seqc_read_any(vp) seqc_read_any(&(vp)->v_seqc)
839 #define vn_seqc_read_notmodify(vp) seqc_read_notmodify(&(vp)->v_seqc)
840 #define vn_seqc_consistent(vp, seq) seqc_consistent(&(vp)->v_seqc, seq)
841
842 #define vn_rangelock_unlock(vp, cookie) \
843 rangelock_unlock(&(vp)->v_rl, (cookie))
844 #define vn_rangelock_rlock(vp, start, end) \
845 rangelock_rlock(&(vp)->v_rl, (start), (end))
846 #define vn_rangelock_tryrlock(vp, start, end) \
847 rangelock_tryrlock(&(vp)->v_rl, (start), (end))
848 #define vn_rangelock_wlock(vp, start, end) \
849 rangelock_wlock(&(vp)->v_rl, (start), (end))
850 #define vn_rangelock_trywlock(vp, start, end) \
851 rangelock_trywlock(&(vp)->v_rl, (start), (end))
852
853 #define vn_irflag_read(vp) atomic_load_short(&(vp)->v_irflag)
854 void vn_irflag_set_locked(struct vnode *vp, short toset);
855 void vn_irflag_set(struct vnode *vp, short toset);
856 void vn_irflag_set_cond_locked(struct vnode *vp, short toset);
857 void vn_irflag_set_cond(struct vnode *vp, short toset);
858 void vn_irflag_unset_locked(struct vnode *vp, short tounset);
859 void vn_irflag_unset(struct vnode *vp, short tounset);
860
861 int vfs_cache_lookup(struct vop_lookup_args *ap);
862 int vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp);
863 void vfs_timestamp(struct timespec *);
864 void vfs_write_resume(struct mount *mp, int flags);
865 int vfs_write_suspend(struct mount *mp, int flags);
866 int vfs_write_suspend_umnt(struct mount *mp);
867 struct vnode *vnlru_alloc_marker(void);
868 void vnlru_free_marker(struct vnode *);
869 void vnlru_free_vfsops(int, struct vfsops *, struct vnode *);
870 int vop_stdbmap(struct vop_bmap_args *);
871 int vop_stdfdatasync_buf(struct vop_fdatasync_args *);
872 int vop_stdfsync(struct vop_fsync_args *);
873 int vop_stdgetwritemount(struct vop_getwritemount_args *);
874 int vop_stdgetpages(struct vop_getpages_args *);
875 int vop_stdinactive(struct vop_inactive_args *);
876 int vop_stdneed_inactive(struct vop_need_inactive_args *);
877 int vop_stdinotify(struct vop_inotify_args *);
878 int vop_stdinotify_add_watch(struct vop_inotify_add_watch_args *);
879 int vop_stdioctl(struct vop_ioctl_args *);
880 int vop_stdkqfilter(struct vop_kqfilter_args *);
881 int vop_stdlock(struct vop_lock1_args *);
882 int vop_stdunlock(struct vop_unlock_args *);
883 int vop_stdislocked(struct vop_islocked_args *);
884 int vop_lock(struct vop_lock1_args *);
885 int vop_unlock(struct vop_unlock_args *);
886 int vop_islocked(struct vop_islocked_args *);
887 int vop_stdputpages(struct vop_putpages_args *);
888 int vop_nopoll(struct vop_poll_args *);
889 int vop_stdaccess(struct vop_access_args *ap);
890 int vop_stdaccessx(struct vop_accessx_args *ap);
891 int vop_stdadvise(struct vop_advise_args *ap);
892 int vop_stdadvlock(struct vop_advlock_args *ap);
893 int vop_stdadvlockasync(struct vop_advlockasync_args *ap);
894 int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap);
895 int vop_stdallocate(struct vop_allocate_args *ap);
896 int vop_stddeallocate(struct vop_deallocate_args *ap);
897 int vop_stdset_text(struct vop_set_text_args *ap);
898 int vop_stdpathconf(struct vop_pathconf_args *);
899 int vop_stdpoll(struct vop_poll_args *);
900 int vop_stdvptocnp(struct vop_vptocnp_args *ap);
901 int vop_stdvptofh(struct vop_vptofh_args *ap);
902 int vop_stdunp_bind(struct vop_unp_bind_args *ap);
903 int vop_stdunp_connect(struct vop_unp_connect_args *ap);
904 int vop_stdunp_detach(struct vop_unp_detach_args *ap);
905 int vop_stdadd_writecount_nomsync(struct vop_add_writecount_args *ap);
906 int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
907 int vop_eopnotsupp(struct vop_generic_args *ap);
908 int vop_ebadf(struct vop_generic_args *ap);
909 int vop_einval(struct vop_generic_args *ap);
910 int vop_enoent(struct vop_generic_args *ap);
911 int vop_enotty(struct vop_generic_args *ap);
912 int vop_eagain(struct vop_generic_args *ap);
913 int vop_null(struct vop_generic_args *ap);
914 int vop_panic(struct vop_generic_args *ap);
915 int dead_poll(struct vop_poll_args *ap);
916 int dead_read(struct vop_read_args *ap);
917 int dead_write(struct vop_write_args *ap);
918
919 /* These are called from within the actual VOPS. */
920 void vop_allocate_post(void *a, int rc);
921 void vop_copy_file_range_post(void *ap, int rc);
922 void vop_close_post(void *a, int rc);
923 void vop_create_pre(void *a);
924 void vop_create_post(void *a, int rc);
925 void vop_deallocate_post(void *a, int rc);
926 void vop_whiteout_pre(void *a);
927 void vop_whiteout_post(void *a, int rc);
928 void vop_deleteextattr_pre(void *a);
929 void vop_deleteextattr_post(void *a, int rc);
930 void vop_link_pre(void *a);
931 void vop_link_post(void *a, int rc);
932 void vop_lookup_post(void *a, int rc);
933 void vop_lookup_pre(void *a);
934 void vop_mkdir_pre(void *a);
935 void vop_mkdir_post(void *a, int rc);
936 void vop_mknod_pre(void *a);
937 void vop_mknod_post(void *a, int rc);
938 void vop_open_post(void *a, int rc);
939 void vop_read_post(void *a, int rc);
940 void vop_read_pgcache_post(void *ap, int rc);
941 void vop_reclaim_post(void *a, int rc);
942 void vop_remove_pre(void *a);
943 void vop_remove_post(void *a, int rc);
944 void vop_rename_post(void *a, int rc);
945 void vop_rename_pre(void *a);
946 void vop_rmdir_pre(void *a);
947 void vop_rmdir_post(void *a, int rc);
948 void vop_setattr_pre(void *a);
949 void vop_setattr_post(void *a, int rc);
950 void vop_setacl_pre(void *a);
951 void vop_setacl_post(void *a, int rc);
952 void vop_setextattr_pre(void *a);
953 void vop_setextattr_post(void *a, int rc);
954 void vop_symlink_pre(void *a);
955 void vop_symlink_post(void *a, int rc);
956 int vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a);
957
958 #ifdef INVARIANTS
959 void vop_fdatasync_debugpre(void *a);
960 void vop_fdatasync_debugpost(void *a, int rc);
961 void vop_fplookup_vexec_debugpre(void *a);
962 void vop_fplookup_vexec_debugpost(void *a, int rc);
963 void vop_fplookup_symlink_debugpre(void *a);
964 void vop_fplookup_symlink_debugpost(void *a, int rc);
965 void vop_fsync_debugpre(void *a);
966 void vop_fsync_debugpost(void *a, int rc);
967 void vop_strategy_debugpre(void *a);
968 void vop_lock_debugpre(void *a);
969 void vop_lock_debugpost(void *a, int rc);
970 void vop_unlock_debugpre(void *a);
971 void vop_need_inactive_debugpre(void *a);
972 void vop_need_inactive_debugpost(void *a, int rc);
973 void vop_mkdir_debugpost(void *a, int rc);
974 #else
975 #define vop_fdatasync_debugpre(x) do { } while (0)
976 #define vop_fdatasync_debugpost(x, y) do { } while (0)
977 #define vop_fplookup_vexec_debugpre(x) do { } while (0)
978 #define vop_fplookup_vexec_debugpost(x, y) do { } while (0)
979 #define vop_fplookup_symlink_debugpre(x) do { } while (0)
980 #define vop_fplookup_symlink_debugpost(x, y) do { } while (0)
981 #define vop_fsync_debugpre(x) do { } while (0)
982 #define vop_fsync_debugpost(x, y) do { } while (0)
983 #define vop_strategy_debugpre(x) do { } while (0)
984 #define vop_lock_debugpre(x) do { } while (0)
985 #define vop_lock_debugpost(x, y) do { } while (0)
986 #define vop_unlock_debugpre(x) do { } while (0)
987 #define vop_need_inactive_debugpre(x) do { } while (0)
988 #define vop_need_inactive_debugpost(x, y) do { } while (0)
989 #define vop_mkdir_debugpost(x, y) do { } while (0)
990 #endif
991
992 void vop_rename_fail(struct vop_rename_args *ap);
993
994 #define vop_stat_helper_pre(ap) ({ \
995 struct vop_stat_args *_ap = (ap); \
996 int _error; \
997 AUDIT_ARG_VNODE1(ap->a_vp); \
998 _error = mac_vnode_check_stat(_ap->a_active_cred, _ap->a_file_cred, _ap->a_vp);\
999 if (__predict_true(_error == 0)) { \
1000 ap->a_sb->st_padding1 = 0; \
1001 bzero(_ap->a_sb->st_spare, sizeof(_ap->a_sb->st_spare)); \
1002 ap->a_sb->st_filerev = 0; \
1003 ap->a_sb->st_bsdflags = 0; \
1004 } \
1005 _error; \
1006 })
1007
1008 #define vop_stat_helper_post(ap, error) ({ \
1009 struct vop_stat_args *_ap = (ap); \
1010 int _error = (error); \
1011 if (priv_check_cred_vfs_generation(_ap->a_active_cred)) \
1012 _ap->a_sb->st_gen = 0; \
1013 _error; \
1014 })
1015
1016 #ifdef INVARIANTS
1017 #define vop_readdir_pre_assert(ap) \
1018 ssize_t nresid, oresid; \
1019 \
1020 oresid = (ap)->a_uio->uio_resid;
1021
1022 #define vop_readdir_post_assert(ap, ret) \
1023 nresid = (ap)->a_uio->uio_resid; \
1024 if ((ret) == 0 && (ap)->a_eofflag != NULL) { \
1025 VNASSERT(oresid == 0 || nresid != oresid || \
1026 *(ap)->a_eofflag == 1, \
1027 (ap)->a_vp, ("VOP_READDIR: eofflag not set")); \
1028 }
1029 #else
1030 #define vop_readdir_pre_assert(ap)
1031 #define vop_readdir_post_assert(ap, ret)
1032 #endif
1033
1034 #define vop_readdir_pre(ap) do { \
1035 vop_readdir_pre_assert(ap)
1036
1037 #define vop_readdir_post(ap, ret) \
1038 vop_readdir_post_assert(ap, ret); \
1039 if ((ret) == 0) { \
1040 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_READ); \
1041 INOTIFY((ap)->a_vp, IN_ACCESS); \
1042 } \
1043 } while (0)
1044
1045 #define vop_write_pre(ap) \
1046 struct vattr va; \
1047 int error; \
1048 off_t osize, ooffset, noffset; \
1049 \
1050 osize = ooffset = noffset = 0; \
1051 if ((vn_irflag_read((ap)->a_vp) & VIRF_KNOTE) != 0) { \
1052 error = VOP_GETATTR((ap)->a_vp, &va, (ap)->a_cred); \
1053 if (error) \
1054 return (error); \
1055 ooffset = (ap)->a_uio->uio_offset; \
1056 osize = (off_t)va.va_size; \
1057 }
1058
1059 #define vop_write_post(ap, ret) \
1060 noffset = (ap)->a_uio->uio_offset; \
1061 if (noffset > ooffset) { \
1062 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_WRITE | \
1063 (noffset > osize ? NOTE_EXTEND : 0)); \
1064 INOTIFY((ap)->a_vp, IN_MODIFY); \
1065 }
1066
1067 #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__)
1068
1069 #ifdef INVARIANTS
1070 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) \
1071 do { \
1072 int error_; \
1073 \
1074 error_ = VOP_ADD_WRITECOUNT((vp), (cnt)); \
1075 VNASSERT(error_ == 0, (vp), ("VOP_ADD_WRITECOUNT returned %d", \
1076 error_)); \
1077 } while (0)
1078 #define VOP_SET_TEXT_CHECKED(vp) \
1079 do { \
1080 int error_; \
1081 \
1082 error_ = VOP_SET_TEXT((vp)); \
1083 VNASSERT(error_ == 0, (vp), ("VOP_SET_TEXT returned %d", \
1084 error_)); \
1085 } while (0)
1086 #define VOP_UNSET_TEXT_CHECKED(vp) \
1087 do { \
1088 int error_; \
1089 \
1090 error_ = VOP_UNSET_TEXT((vp)); \
1091 VNASSERT(error_ == 0, (vp), ("VOP_UNSET_TEXT returned %d", \
1092 error_)); \
1093 } while (0)
1094 #else
1095 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) VOP_ADD_WRITECOUNT((vp), (cnt))
1096 #define VOP_SET_TEXT_CHECKED(vp) VOP_SET_TEXT((vp))
1097 #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp))
1098 #endif
1099
1100 #define VN_IS_DOOMED(vp) __predict_false((vn_irflag_read(vp) & VIRF_DOOMED) != 0)
1101
1102 void vput(struct vnode *vp);
1103 void vrele(struct vnode *vp);
1104 void vref(struct vnode *vp);
1105 void vrefact(struct vnode *vp);
1106 void v_addpollinfo(struct vnode *vp);
1107 static __inline int
vrefcnt(struct vnode * vp)1108 vrefcnt(struct vnode *vp)
1109 {
1110
1111 return (vp->v_usecount);
1112 }
1113
1114 #define vholdl(vp) do { \
1115 ASSERT_VI_LOCKED(vp, __func__); \
1116 vhold(vp); \
1117 } while (0)
1118
1119 #define vrefl(vp) do { \
1120 ASSERT_VI_LOCKED(vp, __func__); \
1121 vref(vp); \
1122 } while (0)
1123
1124 /*
1125 * The caller doesn't know the file size and vnode_create_vobject() should
1126 * determine the size on its own.
1127 */
1128 #define VNODE_NO_SIZE ((off_t)-1)
1129
1130 int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td);
1131 int vnode_create_disk_vobject(struct vnode *vp, off_t size, struct thread *td);
1132 void vnode_destroy_vobject(struct vnode *vp);
1133
1134 extern struct vop_vector fifo_specops;
1135 extern struct vop_vector dead_vnodeops;
1136 extern struct vop_vector default_vnodeops;
1137
1138 #define VOP_PANIC ((void*)(uintptr_t)vop_panic)
1139 #define VOP_NULL ((void*)(uintptr_t)vop_null)
1140 #define VOP_EBADF ((void*)(uintptr_t)vop_ebadf)
1141 #define VOP_ENOTTY ((void*)(uintptr_t)vop_enotty)
1142 #define VOP_EINVAL ((void*)(uintptr_t)vop_einval)
1143 #define VOP_ENOENT ((void*)(uintptr_t)vop_enoent)
1144 #define VOP_EOPNOTSUPP ((void*)(uintptr_t)vop_eopnotsupp)
1145 #define VOP_EAGAIN ((void*)(uintptr_t)vop_eagain)
1146
1147 /* fifo_vnops.c */
1148 int fifo_printinfo(struct vnode *);
1149
1150 /* vfs_hash.c */
1151 typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg);
1152
1153 void vfs_hash_changesize(u_long newhashsize);
1154 int vfs_hash_get(const struct mount *mp, u_int hash, int flags,
1155 struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1156 u_int vfs_hash_index(struct vnode *vp);
1157 int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
1158 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1159 void vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
1160 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1161 void vfs_hash_rehash(struct vnode *vp, u_int hash);
1162 void vfs_hash_remove(struct vnode *vp);
1163
1164 int vfs_kqfilter(struct vop_kqfilter_args *);
1165 struct dirent;
1166 int vn_dir_next_dirent(struct vnode *vp, struct thread *td,
1167 char *dirbuf, size_t dirbuflen,
1168 struct dirent **dpp, size_t *len, off_t *off, int *eofflag);
1169 int vn_dir_check_empty(struct vnode *vp);
1170 int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off);
1171
1172 int vfs_unixify_accmode(accmode_t *accmode);
1173
1174 void vfs_unp_reclaim(struct vnode *vp);
1175
1176 int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode);
1177 int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid,
1178 gid_t gid);
1179 int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1180 struct thread *td);
1181 int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1182 struct thread *td);
1183 int vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *active_cred);
1184 int vn_getsize(struct vnode *vp, off_t *size, struct ucred *active_cred);
1185
1186 void vn_fsid(struct vnode *vp, struct vattr *va);
1187
1188 int vn_dir_check_exec(struct vnode *vp, struct componentname *cnp);
1189 int vn_lktype_write(struct mount *mp, struct vnode *vp);
1190
1191 #ifdef INVARIANTS
1192 void vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state);
1193 #endif
1194
1195 static inline void
vn_set_state(struct vnode * vp,__enum_uint8 (vstate)state)1196 vn_set_state(struct vnode *vp, __enum_uint8(vstate) state)
1197 {
1198 #ifdef INVARIANTS
1199 vn_set_state_validate(vp, state);
1200 #endif
1201 vp->v_state = state;
1202 }
1203
1204 static inline __enum_uint8(vstate)
vn_get_state(struct vnode * vp)1205 vn_get_state(struct vnode *vp)
1206 {
1207 return (vp->v_state);
1208 }
1209
1210 #define VOP_UNLOCK_FLAGS(vp, flags) ({ \
1211 struct vnode *_vp = (vp); \
1212 int _flags = (flags); \
1213 int _error; \
1214 \
1215 if ((_flags & ~(LK_INTERLOCK | LK_RELEASE)) != 0) \
1216 panic("%s: unsupported flags %x\n", __func__, flags); \
1217 _error = VOP_UNLOCK(_vp); \
1218 if (_flags & LK_INTERLOCK) \
1219 VI_UNLOCK(_vp); \
1220 _error; \
1221 })
1222
1223 #include <sys/kernel.h>
1224
1225 #define VFS_VOP_VECTOR_REGISTER(vnodeops) \
1226 SYSINIT(vfs_vector_##vnodeops##_f, SI_SUB_VFS, SI_ORDER_ANY, \
1227 vfs_vector_op_register, &vnodeops)
1228
1229 #define VFS_SMR_DECLARE \
1230 extern smr_t vfs_smr
1231
1232 #define VFS_SMR() vfs_smr
1233 #define vfs_smr_enter() smr_enter(VFS_SMR())
1234 #define vfs_smr_exit() smr_exit(VFS_SMR())
1235 #define vfs_smr_synchronize() smr_synchronize(VFS_SMR())
1236 #define vfs_smr_entered_load(ptr) smr_entered_load((ptr), VFS_SMR())
1237 #define VFS_SMR_ENTERED() SMR_ENTERED(VFS_SMR())
1238 #define VFS_SMR_ASSERT_ENTERED() SMR_ASSERT_ENTERED(VFS_SMR())
1239 #define VFS_SMR_ASSERT_NOT_ENTERED() SMR_ASSERT_NOT_ENTERED(VFS_SMR())
1240 #define VFS_SMR_ZONE_SET(zone) uma_zone_set_smr((zone), VFS_SMR())
1241
1242 #define vn_load_v_data_smr(vp) ({ \
1243 struct vnode *_vp = (vp); \
1244 \
1245 VFS_SMR_ASSERT_ENTERED(); \
1246 atomic_load_consume_ptr(&(_vp)->v_data);\
1247 })
1248
1249 static inline void
vn_delayed_setsize_locked(struct vnode * vp)1250 vn_delayed_setsize_locked(struct vnode *vp)
1251 {
1252 ASSERT_VI_LOCKED(vp, "delayed_setsize");
1253 vp->v_iflag |= VI_DELAYED_SETSIZE;
1254 }
1255
1256 static inline void
vn_delayed_setsize(struct vnode * vp)1257 vn_delayed_setsize(struct vnode *vp)
1258 {
1259 VI_LOCK(vp);
1260 vn_delayed_setsize_locked(vp);
1261 VI_UNLOCK(vp);
1262 }
1263
1264 static inline void
vn_clear_delayed_setsize_locked(struct vnode * vp)1265 vn_clear_delayed_setsize_locked(struct vnode *vp)
1266 {
1267 ASSERT_VI_LOCKED(vp, "delayed_setsize");
1268 vp->v_iflag &= ~VI_DELAYED_SETSIZE;
1269 }
1270
1271 static inline void
vn_clear_delayed_setsize(struct vnode * vp)1272 vn_clear_delayed_setsize(struct vnode *vp)
1273 {
1274 VI_LOCK(vp);
1275 vn_clear_delayed_setsize_locked(vp);
1276 VI_UNLOCK(vp);
1277 }
1278
1279 #endif /* _KERNEL */
1280
1281 #endif /* !_SYS_VNODE_H_ */
1282