1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #ifndef _SYS_VNODE_H_
33 #define _SYS_VNODE_H_
34
35 #include <sys/bufobj.h>
36 #include <sys/queue.h>
37 #include <sys/lock.h>
38 #include <sys/lockmgr.h>
39 #include <sys/mutex.h>
40 #include <sys/rangelock.h>
41 #include <sys/selinfo.h>
42 #include <sys/uio.h>
43 #include <sys/acl.h>
44 #include <sys/ktr.h>
45 #include <sys/_seqc.h>
46
47 /*
48 * The vnode is the focus of all file activity in UNIX. There is a
49 * unique vnode allocated for each active file, each current directory,
50 * each mounted-on file, text file, and the root.
51 */
52
53 /*
54 * Vnode types. VNON means no type.
55 */
__enum_uint8_decl(vtype)56 __enum_uint8_decl(vtype) {
57 VNON,
58 VREG,
59 VDIR,
60 VBLK,
61 VCHR,
62 VLNK,
63 VSOCK,
64 VFIFO,
65 VBAD,
66 VMARKER,
67 VLASTTYPE = VMARKER,
68 };
69
__enum_uint8_decl(vstate)70 __enum_uint8_decl(vstate) {
71 VSTATE_UNINITIALIZED,
72 VSTATE_CONSTRUCTED,
73 VSTATE_DESTROYING,
74 VSTATE_DEAD,
75 VLASTSTATE = VSTATE_DEAD,
76 };
77
78 enum vgetstate {
79 VGET_NONE,
80 VGET_HOLDCNT,
81 VGET_USECOUNT,
82 };
83
84 /*
85 * Each underlying filesystem allocates its own private area and hangs
86 * it from v_data. If non-null, this area is freed in getnewvnode().
87 */
88
89 struct namecache;
90 struct cache_fpl;
91
92 struct vpollinfo {
93 struct mtx vpi_lock; /* lock to protect below */
94 struct selinfo vpi_selinfo; /* identity of poller(s) */
95 short vpi_events; /* what they are looking for */
96 short vpi_revents; /* what has happened */
97 };
98
99 /*
100 * Reading or writing any of these items requires holding the appropriate lock.
101 *
102 * Lock reference:
103 * c - namecache mutex
104 * i - interlock
105 * l - mp mnt_listmtx or freelist mutex
106 * I - updated with atomics, 0->1 and 1->0 transitions with interlock held
107 * m - mount point interlock
108 * p - pollinfo lock
109 * u - Only a reference to the vnode is needed to read.
110 * v - vnode lock
111 *
112 * Vnodes may be found on many lists. The general way to deal with operating
113 * on a vnode that is on a list is:
114 * 1) Lock the list and find the vnode.
115 * 2) Lock interlock so that the vnode does not go away.
116 * 3) Unlock the list to avoid lock order reversals.
117 * 4) vget with LK_INTERLOCK and check for ENOENT, or
118 * 5) Check for DOOMED if the vnode lock is not required.
119 * 6) Perform your operation, then vput().
120 */
121
122 #if defined(_KERNEL) || defined(_KVM_VNODE)
123
124 struct vnode {
125 /*
126 * Fields which define the identity of the vnode. These fields are
127 * owned by the filesystem (XXX: and vgone() ?)
128 */
129 __enum_uint8(vtype) v_type; /* u vnode type */
130 __enum_uint8(vstate) v_state; /* u vnode state */
131 short v_irflag; /* i frequently read flags */
132 seqc_t v_seqc; /* i modification count */
133 uint32_t v_nchash; /* u namecache hash */
134 u_int v_hash;
135 const struct vop_vector *v_op; /* u vnode operations vector */
136 void *v_data; /* u private data for fs */
137
138 /*
139 * Filesystem instance stuff
140 */
141 struct mount *v_mount; /* u ptr to vfs we are in */
142 TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */
143
144 /*
145 * Type specific fields, only one applies to any given vnode.
146 */
147 union {
148 struct mount *v_mountedhere; /* v ptr to mountpoint (VDIR) */
149 struct unpcb *v_unpcb; /* v unix domain net (VSOCK) */
150 struct cdev *v_rdev; /* v device (VCHR, VBLK) */
151 struct fifoinfo *v_fifoinfo; /* v fifo (VFIFO) */
152 };
153
154 /*
155 * vfs_hash: (mount + inode) -> vnode hash. The hash value
156 * itself is grouped with other int fields, to avoid padding.
157 */
158 LIST_ENTRY(vnode) v_hashlist;
159
160 /*
161 * VFS_namecache stuff
162 */
163 LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */
164 TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */
165 struct namecache *v_cache_dd; /* c Cache entry for .. vnode */
166
167 /*
168 * Locking
169 */
170 struct lock v_lock; /* u (if fs don't have one) */
171 struct mtx v_interlock; /* lock for "i" things */
172 struct lock *v_vnlock; /* u pointer to vnode lock */
173
174 /*
175 * The machinery of being a vnode
176 */
177 TAILQ_ENTRY(vnode) v_vnodelist; /* l vnode lists */
178 TAILQ_ENTRY(vnode) v_lazylist; /* l vnode lazy list */
179 struct bufobj v_bufobj; /* * Buffer cache object */
180
181 /*
182 * Hooks for various subsystems and features.
183 */
184 struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */
185 struct label *v_label; /* MAC label for vnode */
186 struct lockf *v_lockf; /* Byte-level advisory lock list */
187 struct rangelock v_rl; /* Byte-range lock */
188
189 u_int v_holdcnt; /* I prevents recycling. */
190 u_int v_usecount; /* I ref count of users */
191 u_short v_iflag; /* i vnode flags (see below) */
192 u_short v_vflag; /* v vnode flags */
193 u_short v_mflag; /* l mnt-specific vnode flags */
194 short v_dbatchcpu; /* i LRU requeue deferral batch */
195 int v_writecount; /* I ref count of writers or
196 (negative) text users */
197 int v_seqc_users; /* i modifications pending */
198 };
199
200 #ifndef DEBUG_LOCKS
201 #ifdef _LP64
202 /*
203 * Not crossing 448 bytes fits 9 vnodes per page. If you have to add fields
204 * to the structure and there is nothing which can be done to prevent growth
205 * then so be it. But don't grow it without a good reason.
206 */
207 _Static_assert(sizeof(struct vnode) <= 448, "vnode size crosses 448 bytes");
208 #endif
209 #endif
210
211 #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */
212
213 #define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj)
214
215 /* XXX: These are temporary to avoid a source sweep at this time */
216 #define v_object v_bufobj.bo_object
217
218 /* We don't need to lock the knlist */
219 #define VN_KNLIST_EMPTY(vp) ((vp)->v_pollinfo == NULL || \
220 KNLIST_EMPTY(&(vp)->v_pollinfo->vpi_selinfo.si_note))
221
222 #define VN_KNOTE(vp, b, a) \
223 do { \
224 if (!VN_KNLIST_EMPTY(vp)) \
225 KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), \
226 (a) | KNF_NOKQLOCK); \
227 } while (0)
228 #define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, KNF_LISTLOCKED)
229 #define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0)
230
231 /*
232 * Vnode flags.
233 * VI flags are protected by interlock and live in v_iflag
234 * VIRF flags are protected by interlock and live in v_irflag
235 * VV flags are protected by the vnode lock and live in v_vflag
236 *
237 * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both
238 * are required for writing but the status may be checked with either.
239 */
240 #define VHOLD_NO_SMR (1<<29) /* Disable vhold_smr */
241 #define VHOLD_ALL_FLAGS (VHOLD_NO_SMR)
242
243 #define VIRF_DOOMED 0x0001 /* This vnode is being recycled */
244 #define VIRF_PGREAD 0x0002 /* Direct reads from the page cache are permitted,
245 never cleared once set */
246 #define VIRF_MOUNTPOINT 0x0004 /* This vnode is mounted on */
247 #define VIRF_TEXT_REF 0x0008 /* Executable mappings ref the vnode */
248 #define VIRF_CROSSMP 0x0010 /* Cross-mp vnode, no locking */
249
250 #define VI_UNUSED0 0x0001 /* unused */
251 #define VI_MOUNT 0x0002 /* Mount in progress */
252 #define VI_DOINGINACT 0x0004 /* VOP_INACTIVE is in progress */
253 #define VI_OWEINACT 0x0008 /* Need to call inactive */
254 #define VI_DEFINACT 0x0010 /* deferred inactive */
255 #define VI_FOPENING 0x0020 /* In open, with opening process having the
256 first right to advlock file */
257
258 #define VV_ROOT 0x0001 /* root of its filesystem */
259 #define VV_ISTTY 0x0002 /* vnode represents a tty */
260 #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */
261 #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */
262 #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */
263 #define VV_VMSIZEVNLOCK 0x0020 /* object size check requires vnode lock */
264 #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */
265 #define VV_SYSTEM 0x0080 /* vnode being used by kernel */
266 #define VV_PROCDEP 0x0100 /* vnode is process dependent */
267 #define VV_UNLINKED 0x0200 /* unlinked but stil open directory */
268 #define VV_DELETED 0x0400 /* should be removed */
269 #define VV_MD 0x0800 /* vnode backs the md device */
270 #define VV_FORCEINSMQ 0x1000 /* force the insmntque to succeed */
271 #define VV_READLINK 0x2000 /* fdescfs linux vnode */
272 #define VV_UNREF 0x4000 /* vunref, do not drop lock in inactive() */
273 #define VV_CROSSLOCK 0x8000 /* vnode lock is shared w/ root mounted here */
274
275 #define VMP_LAZYLIST 0x0001 /* Vnode is on mnt's lazy list */
276
277 /*
278 * Vnode attributes. A field value of VNOVAL represents a field whose value
279 * is unavailable (getattr) or which is not to be changed (setattr).
280 */
281 struct vattr {
282 __enum_uint8(vtype) va_type; /* vnode type (for create) */
283 u_short va_mode; /* files access mode and type */
284 u_short va_padding0;
285 uid_t va_uid; /* owner user id */
286 gid_t va_gid; /* owner group id */
287 nlink_t va_nlink; /* number of references to file */
288 dev_t va_fsid; /* filesystem id */
289 ino_t va_fileid; /* file id */
290 u_quad_t va_size; /* file size in bytes */
291 long va_blocksize; /* blocksize preferred for i/o */
292 struct timespec va_atime; /* time of last access */
293 struct timespec va_mtime; /* time of last modification */
294 struct timespec va_ctime; /* time file changed */
295 struct timespec va_birthtime; /* time file created */
296 u_long va_gen; /* generation number of file */
297 u_long va_flags; /* flags defined for file */
298 dev_t va_rdev; /* device the special file represents */
299 u_quad_t va_bytes; /* bytes of disk space held by file */
300 u_quad_t va_filerev; /* file modification number */
301 u_int va_vaflags; /* operations flags, see below */
302 long va_spare; /* remain quad aligned */
303 };
304
305 /*
306 * Flags for va_vaflags.
307 */
308 #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */
309 #define VA_EXCLUSIVE 0x02 /* exclusive create request */
310 #define VA_SYNC 0x04 /* O_SYNC truncation */
311
312 /*
313 * Flags for ioflag. (high 16 bits used to ask for read-ahead and
314 * help with write clustering)
315 * NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h
316 */
317 #define IO_UNIT 0x0001 /* do I/O as atomic unit */
318 #define IO_APPEND 0x0002 /* append write to end */
319 #define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */
320 #define IO_NODELOCKED 0x0008 /* underlying node already locked */
321 #define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */
322 #define IO_VMIO 0x0020 /* data already in VMIO space */
323 #define IO_INVAL 0x0040 /* invalidate after I/O */
324 #define IO_SYNC 0x0080 /* do I/O synchronously */
325 #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
326 #define IO_NOREUSE 0x0200 /* VMIO data won't be reused */
327 #define IO_EXT 0x0400 /* operate on external attributes */
328 #define IO_NORMAL 0x0800 /* operate on regular data */
329 #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */
330 #define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */
331 #define IO_RANGELOCKED 0x4000 /* range locked */
332 #define IO_DATASYNC 0x8000 /* do only data I/O synchronously */
333
334 #define IO_SEQMAX 0x7F /* seq heuristic max value */
335 #define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */
336
337 /*
338 * Flags for accmode_t.
339 */
340 #define VEXEC 000000000100 /* execute/search permission */
341 #define VWRITE 000000000200 /* write permission */
342 #define VREAD 000000000400 /* read permission */
343 #define VADMIN 000000010000 /* being the file owner */
344 #define VAPPEND 000000040000 /* permission to write/append */
345 /*
346 * VEXPLICIT_DENY makes VOP_ACCESSX(9) return EPERM or EACCES only
347 * if permission was denied explicitly, by a "deny" rule in NFSv4 ACL,
348 * and 0 otherwise. This never happens with ordinary unix access rights
349 * or POSIX.1e ACLs. Obviously, VEXPLICIT_DENY must be OR-ed with
350 * some other V* constant.
351 */
352 #define VEXPLICIT_DENY 000000100000
353 #define VREAD_NAMED_ATTRS 000000200000 /* not used */
354 #define VWRITE_NAMED_ATTRS 000000400000 /* not used */
355 #define VDELETE_CHILD 000001000000
356 #define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */
357 #define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */
358 #define VDELETE 000010000000
359 #define VREAD_ACL 000020000000 /* read ACL and file mode */
360 #define VWRITE_ACL 000040000000 /* change ACL and/or file mode */
361 #define VWRITE_OWNER 000100000000 /* change file owner */
362 #define VSYNCHRONIZE 000200000000 /* not used */
363 #define VCREAT 000400000000 /* creating new file */
364 #define VVERIFY 001000000000 /* verification required */
365
366 /*
367 * Permissions that were traditionally granted only to the file owner.
368 */
369 #define VADMIN_PERMS (VADMIN | VWRITE_ATTRIBUTES | VWRITE_ACL | \
370 VWRITE_OWNER)
371
372 /*
373 * Permissions that were traditionally granted to everyone.
374 */
375 #define VSTAT_PERMS (VREAD_ATTRIBUTES | VREAD_ACL)
376
377 /*
378 * Permissions that allow to change the state of the file in any way.
379 */
380 #define VMODIFY_PERMS (VWRITE | VAPPEND | VADMIN_PERMS | VDELETE_CHILD | \
381 VDELETE)
382
383 /*
384 * Token indicating no attribute value yet assigned.
385 */
386 #define VNOVAL (-1)
387
388 /*
389 * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon)
390 */
391 #define VLKTIMEOUT (hz / 20 + 1)
392
393 #ifdef _KERNEL
394
395 #ifdef MALLOC_DECLARE
396 MALLOC_DECLARE(M_VNODE);
397 #endif
398
399 extern u_int ncsizefactor;
400 extern const u_int io_hold_cnt;
401
402 /*
403 * Convert between vnode types and inode formats (since POSIX.1
404 * defines mode word of stat structure in terms of inode formats).
405 */
406 extern __enum_uint8(vtype) iftovt_tab[];
407 extern int vttoif_tab[];
408 #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
409 #define VTTOIF(indx) (vttoif_tab[(int)(indx)])
410 #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
411
412 /*
413 * Flags to various vnode functions.
414 */
415 #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */
416 #define FORCECLOSE 0x0002 /* vflush: force file closure */
417 #define WRITECLOSE 0x0004 /* vflush: only close writable files */
418 #define EARLYFLUSH 0x0008 /* vflush: early call for ffs_flushfiles */
419 #define V_SAVE 0x0001 /* vinvalbuf: sync file first */
420 #define V_ALT 0x0002 /* vinvalbuf: invalidate only alternate bufs */
421 #define V_NORMAL 0x0004 /* vinvalbuf: invalidate only regular bufs */
422 #define V_CLEANONLY 0x0008 /* vinvalbuf: invalidate only clean bufs */
423 #define V_VMIO 0x0010 /* vinvalbuf: called during pageout */
424 #define V_ALLOWCLEAN 0x0020 /* vinvalbuf: allow clean buffers after flush */
425 #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */
426 #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */
427 #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */
428 #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */
429 #define V_PCATCH 0x0008 /* vn_start_write: make the sleep interruptible */
430 #define V_VALID_FLAGS (V_WAIT | V_NOWAIT | V_XSLEEP | V_PCATCH)
431
432 #define VR_START_WRITE 0x0001 /* vfs_write_resume: start write atomically */
433 #define VR_NO_SUSPCLR 0x0002 /* vfs_write_resume: do not clear suspension */
434
435 #define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the
436 filesystem is being unmounted */
437
438 #define VREF(vp) vref(vp)
439
440 #ifdef DIAGNOSTIC
441 #define VATTR_NULL(vap) vattr_null(vap)
442 #else
443 #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */
444 #endif /* DIAGNOSTIC */
445
446 #define NULLVP ((struct vnode *)NULL)
447
448 /*
449 * Global vnode data.
450 */
451 extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
452 extern struct mount *rootdevmp; /* "/dev" mount */
453 extern u_long desiredvnodes; /* number of vnodes desired */
454 extern struct uma_zone *namei_zone;
455 extern struct vattr va_null; /* predefined null vattr structure */
456
457 extern u_int vn_lock_pair_pause_max;
458
459 #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock)
460 #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags))
461 #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock)
462 #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock)
463 #define VI_MTX(vp) (&(vp)->v_interlock)
464
465 #define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock)
466 #define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock)
467 #define VN_LOCK_DSHARE(vp) lockdisableshare((vp)->v_vnlock)
468
469 #endif /* _KERNEL */
470
471 /*
472 * Mods for extensibility.
473 */
474
475 /*
476 * Flags for vdesc_flags:
477 */
478 #define VDESC_MAX_VPS 16
479 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */
480 #define VDESC_VP0_WILLRELE 0x0001
481 #define VDESC_VP1_WILLRELE 0x0002
482 #define VDESC_VP2_WILLRELE 0x0004
483 #define VDESC_VP3_WILLRELE 0x0008
484
485 /*
486 * A generic structure.
487 * This can be used by bypass routines to identify generic arguments.
488 */
489 struct vop_generic_args {
490 struct vnodeop_desc *a_desc;
491 /* other random data follows, presumably */
492 };
493
494 typedef int vop_bypass_t(struct vop_generic_args *);
495
496 /*
497 * VDESC_NO_OFFSET is used to identify the end of the offset list
498 * and in places where no such field exists.
499 */
500 #define VDESC_NO_OFFSET -1
501
502 /*
503 * This structure describes the vnode operation taking place.
504 */
505 struct vnodeop_desc {
506 char *vdesc_name; /* a readable name for debugging */
507 int vdesc_flags; /* VDESC_* flags */
508 int vdesc_vop_offset;
509 vop_bypass_t *vdesc_call; /* Function to call */
510
511 /*
512 * These ops are used by bypass routines to map and locate arguments.
513 * Creds and procs are not needed in bypass routines, but sometimes
514 * they are useful to (for example) transport layers.
515 * Nameidata is useful because it has a cred in it.
516 */
517 int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */
518 int vdesc_vpp_offset; /* return vpp location */
519 int vdesc_cred_offset; /* cred location, if any */
520 int vdesc_thread_offset; /* thread location, if any */
521 int vdesc_componentname_offset; /* if any */
522 };
523
524 #ifdef _KERNEL
525 /*
526 * A list of all the operation descs.
527 */
528 extern struct vnodeop_desc *vnodeop_descs[];
529
530 #define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field)
531 #define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \
532 ((s_type)(((char*)(struct_p)) + (s_offset)))
533
534 #ifdef DEBUG_VFS_LOCKS
535 /*
536 * Support code to aid in debugging VFS locking problems. Not totally
537 * reliable since if the thread sleeps between changing the lock
538 * state and checking it with the assert, some other thread could
539 * change the state. They are good enough for debugging a single
540 * filesystem using a single-threaded test. Note that the unreliability is
541 * limited to false negatives; efforts were made to ensure that false
542 * positives cannot occur.
543 */
544 void assert_vi_locked(struct vnode *vp, const char *str);
545 void assert_vi_unlocked(struct vnode *vp, const char *str);
546 void assert_vop_elocked(struct vnode *vp, const char *str);
547 void assert_vop_locked(struct vnode *vp, const char *str);
548 void assert_vop_unlocked(struct vnode *vp, const char *str);
549
550 #define ASSERT_VI_LOCKED(vp, str) assert_vi_locked((vp), (str))
551 #define ASSERT_VI_UNLOCKED(vp, str) assert_vi_unlocked((vp), (str))
552 #define ASSERT_VOP_ELOCKED(vp, str) assert_vop_elocked((vp), (str))
553 #define ASSERT_VOP_LOCKED(vp, str) assert_vop_locked((vp), (str))
554 #define ASSERT_VOP_UNLOCKED(vp, str) assert_vop_unlocked((vp), (str))
555
556 #define ASSERT_VOP_IN_SEQC(vp) do { \
557 struct vnode *_vp = (vp); \
558 \
559 VNPASS(seqc_in_modify(_vp->v_seqc), _vp); \
560 } while (0)
561
562 #define ASSERT_VOP_NOT_IN_SEQC(vp) do { \
563 struct vnode *_vp = (vp); \
564 \
565 VNPASS(!seqc_in_modify(_vp->v_seqc), _vp); \
566 } while (0)
567
568 #else /* !DEBUG_VFS_LOCKS */
569
570 #define ASSERT_VI_LOCKED(vp, str) ((void)0)
571 #define ASSERT_VI_UNLOCKED(vp, str) ((void)0)
572 #define ASSERT_VOP_ELOCKED(vp, str) ((void)0)
573 #define ASSERT_VOP_LOCKED(vp, str) ((void)0)
574 #define ASSERT_VOP_UNLOCKED(vp, str) ((void)0)
575
576 #define ASSERT_VOP_IN_SEQC(vp) ((void)0)
577 #define ASSERT_VOP_NOT_IN_SEQC(vp) ((void)0)
578
579 #endif /* DEBUG_VFS_LOCKS */
580
581 /*
582 * This call works for vnodes in the kernel.
583 */
584 #define VCALL(c) ((c)->a_desc->vdesc_call(c))
585
586 #define DOINGASYNC(vp) \
587 (((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \
588 ((curthread->td_pflags & TDP_SYNCIO) == 0))
589
590 /*
591 * VMIO support inline
592 */
593
594 extern int vmiodirenable;
595
596 static __inline int
vn_canvmio(struct vnode * vp)597 vn_canvmio(struct vnode *vp)
598 {
599 if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR)))
600 return(TRUE);
601 return(FALSE);
602 }
603
604 /*
605 * Finally, include the default set of vnode operations.
606 */
607 typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int);
608 #include "vnode_if.h"
609
610 /* vn_open_flags */
611 #define VN_OPEN_NOAUDIT 0x00000001
612 #define VN_OPEN_NOCAPCHECK 0x00000002
613 #define VN_OPEN_NAMECACHE 0x00000004
614 #define VN_OPEN_INVFS 0x00000008
615 #define VN_OPEN_WANTIOCTLCAPS 0x00000010
616
617 /* copy_file_range kernel flags */
618 #define COPY_FILE_RANGE_KFLAGS 0xff000000
619 #define COPY_FILE_RANGE_TIMEO1SEC 0x01000000 /* Return after 1sec. */
620
621 /*
622 * Public vnode manipulation functions.
623 */
624 struct componentname;
625 struct file;
626 struct mount;
627 struct nameidata;
628 struct ostat;
629 struct freebsd11_stat;
630 struct thread;
631 struct proc;
632 struct stat;
633 struct nstat;
634 struct ucred;
635 struct uio;
636 struct vattr;
637 struct vfsops;
638 struct vnode;
639
640 typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **);
641
642 int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn,
643 daddr_t endn);
644 /* cache_* may belong in namei.h. */
645 void cache_changesize(u_long newhashsize);
646
647 #define VFS_CACHE_DROPOLD 0x1
648
649 void cache_enter_time_flags(struct vnode *dvp, struct vnode *vp,
650 struct componentname *cnp, struct timespec *tsp,
651 struct timespec *dtsp, int flags);
652 #define cache_enter(dvp, vp, cnp) \
653 cache_enter_time(dvp, vp, cnp, NULL, NULL)
654 void cache_enter_time(struct vnode *dvp, struct vnode *vp,
655 struct componentname *cnp, struct timespec *tsp,
656 struct timespec *dtsp);
657 int cache_lookup(struct vnode *dvp, struct vnode **vpp,
658 struct componentname *cnp, struct timespec *tsp, int *ticksp);
659 void cache_vnode_init(struct vnode *vp);
660 void cache_purge(struct vnode *vp);
661 void cache_purge_vgone(struct vnode *vp);
662 void cache_purge_negative(struct vnode *vp);
663 void cache_purgevfs(struct mount *mp);
664 char *cache_symlink_alloc(size_t size, int flags);
665 void cache_symlink_free(char *string, size_t size);
666 int cache_symlink_resolve(struct cache_fpl *fpl, const char *string,
667 size_t len);
668 void cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
669 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp);
670 void cache_vop_rmdir(struct vnode *dvp, struct vnode *vp);
671 void cache_vop_vector_register(struct vop_vector *);
672 #ifdef INVARIANTS
673 void cache_validate(struct vnode *dvp, struct vnode *vp,
674 struct componentname *cnp);
675 void cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops);
676 void cache_assert_no_entries(struct vnode *vp);
677 #else
678 static inline void
cache_validate(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)679 cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
680 {
681 }
682
683 static inline void
cache_validate_vop_vector(struct mount * mp,struct vop_vector * vops)684 cache_validate_vop_vector(struct mount *mp, struct vop_vector *vops)
685 {
686 }
687
688 static inline void
cache_assert_no_entries(struct vnode * vp)689 cache_assert_no_entries(struct vnode *vp)
690 {
691 }
692 #endif
693 void cache_fast_lookup_enabled_recalc(void);
694 int change_dir(struct vnode *vp, struct thread *td);
695 void cvtstat(struct stat *st, struct ostat *ost);
696 int freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb);
697 int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost);
698 int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
699 struct vnode **vpp);
700 void getnewvnode_reserve(void);
701 void getnewvnode_drop_reserve(void);
702 int insmntque(struct vnode *vp, struct mount *mp);
703 int insmntque1(struct vnode *vp, struct mount *mp);
704 u_quad_t init_va_filerev(void);
705 int speedup_syncer(void);
706 int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen);
707 int vn_getcwd(char *buf, char **retbuf, size_t *buflen);
708 int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf);
709 int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf);
710 int vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp,
711 const char *hdrl_name, size_t hrdl_name_length, char **retbuf,
712 char **freebuf, size_t *buflen);
713 struct vnode *
714 vn_dir_dd_ino(struct vnode *vp);
715 int vn_commname(struct vnode *vn, char *buf, u_int buflen);
716 int vn_path_to_global_path(struct thread *td, struct vnode *vp,
717 char *path, u_int pathlen);
718 int vn_path_to_global_path_hardlink(struct thread *td, struct vnode *vp,
719 struct vnode *dvp, char *path, u_int pathlen, const char *leaf_name,
720 size_t leaf_length);
721 int vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid,
722 gid_t file_gid, accmode_t accmode, struct ucred *cred);
723 int vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid,
724 struct ucred *cred);
725 int vaccess_acl_nfs4(__enum_uint8(vtype) type, uid_t file_uid, gid_t file_gid,
726 struct acl *aclp, accmode_t accmode, struct ucred *cred);
727 int vaccess_acl_posix1e(__enum_uint8(vtype) type, uid_t file_uid,
728 gid_t file_gid, struct acl *acl, accmode_t accmode,
729 struct ucred *cred);
730 void vattr_null(struct vattr *vap);
731 void vlazy(struct vnode *);
732 void vdrop(struct vnode *);
733 void vdropl(struct vnode *);
734 int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td);
735 int vget(struct vnode *vp, int flags);
736 enum vgetstate vget_prep_smr(struct vnode *vp);
737 enum vgetstate vget_prep(struct vnode *vp);
738 int vget_finish(struct vnode *vp, int flags, enum vgetstate vs);
739 void vget_finish_ref(struct vnode *vp, enum vgetstate vs);
740 void vget_abort(struct vnode *vp, enum vgetstate vs);
741 void vgone(struct vnode *vp);
742 void vhold(struct vnode *);
743 void vholdnz(struct vnode *);
744 bool vhold_smr(struct vnode *);
745 int vinactive(struct vnode *vp);
746 int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo);
747 int vtruncbuf(struct vnode *vp, off_t length, int blksize);
748 void v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
749 int blksize);
750 void vunref(struct vnode *);
751 void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3);
752 int vrecycle(struct vnode *vp);
753 int vrecyclel(struct vnode *vp);
754 int vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off,
755 struct ucred *cred);
756 int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off,
757 struct ucred *cred);
758 int vn_close(struct vnode *vp,
759 int flags, struct ucred *file_cred, struct thread *td);
760 int vn_copy_file_range(struct vnode *invp, off_t *inoffp,
761 struct vnode *outvp, off_t *outoffp, size_t *lenp,
762 unsigned int flags, struct ucred *incred, struct ucred *outcred,
763 struct thread *fsize_td);
764 int vn_deallocate(struct vnode *vp, off_t *offset, off_t *length, int flags,
765 int ioflg, struct ucred *active_cred, struct ucred *file_cred);
766 void vn_finished_write(struct mount *mp);
767 void vn_finished_secondary_write(struct mount *mp);
768 int vn_fsync_buf(struct vnode *vp, int waitfor);
769 int vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
770 struct vnode *outvp, off_t *outoffp, size_t *lenp,
771 unsigned int flags, struct ucred *incred, struct ucred *outcred,
772 struct thread *fsize_td);
773 int vn_need_pageq_flush(struct vnode *vp);
774 bool vn_isdisk_error(struct vnode *vp, int *errp);
775 bool vn_isdisk(struct vnode *vp);
776 int _vn_lock(struct vnode *vp, int flags, const char *file, int line);
777 #define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__)
778 void vn_lock_pair(struct vnode *vp1, bool vp1_locked, int lkflags1,
779 struct vnode *vp2, bool vp2_locked, int lkflags2);
780 int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp);
781 int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode,
782 u_int vn_open_flags, struct ucred *cred, struct file *fp);
783 int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
784 struct thread *td, struct file *fp);
785 void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end);
786 void vn_pages_remove_valid(struct vnode *vp, vm_pindex_t start,
787 vm_pindex_t end);
788 int vn_pollrecord(struct vnode *vp, struct thread *p, int events);
789 int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base,
790 int len, off_t offset, enum uio_seg segflg, int ioflg,
791 struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid,
792 struct thread *td);
793 int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base,
794 size_t len, off_t offset, enum uio_seg segflg, int ioflg,
795 struct ucred *active_cred, struct ucred *file_cred, size_t *aresid,
796 struct thread *td);
797 int vn_read_from_obj(struct vnode *vp, struct uio *uio);
798 int vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
799 struct thread *td);
800 int vn_rlimit_fsizex(const struct vnode *vp, struct uio *uio,
801 off_t maxfsz, ssize_t *resid_adj, struct thread *td);
802 void vn_rlimit_fsizex_res(struct uio *uio, ssize_t resid_adj);
803 int vn_rlimit_trunc(u_quad_t size, struct thread *td);
804 int vn_start_write(struct vnode *vp, struct mount **mpp, int flags);
805 int vn_start_secondary_write(struct vnode *vp, struct mount **mpp,
806 int flags);
807 int vn_truncate_locked(struct vnode *vp, off_t length, bool sync,
808 struct ucred *cred);
809 int vn_writechk(struct vnode *vp);
810 int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
811 const char *attrname, int *buflen, char *buf, struct thread *td);
812 int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
813 const char *attrname, int buflen, char *buf, struct thread *td);
814 int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
815 const char *attrname, struct thread *td);
816 int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags,
817 struct vnode **rvp);
818 int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc,
819 void *alloc_arg, int lkflags, struct vnode **rvp);
820 int vn_utimes_perm(struct vnode *vp, struct vattr *vap,
821 struct ucred *cred, struct thread *td);
822 int vn_cmp(struct file *, struct file *, struct thread *td);
823
824 int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio);
825 int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
826 struct uio *uio);
827
828 void vn_seqc_write_begin_locked(struct vnode *vp);
829 void vn_seqc_write_begin(struct vnode *vp);
830 void vn_seqc_write_end_locked(struct vnode *vp);
831 void vn_seqc_write_end(struct vnode *vp);
832 #define vn_seqc_read_any(vp) seqc_read_any(&(vp)->v_seqc)
833 #define vn_seqc_read_notmodify(vp) seqc_read_notmodify(&(vp)->v_seqc)
834 #define vn_seqc_consistent(vp, seq) seqc_consistent(&(vp)->v_seqc, seq)
835
836 #define vn_rangelock_unlock(vp, cookie) \
837 rangelock_unlock(&(vp)->v_rl, (cookie))
838 #define vn_rangelock_rlock(vp, start, end) \
839 rangelock_rlock(&(vp)->v_rl, (start), (end))
840 #define vn_rangelock_tryrlock(vp, start, end) \
841 rangelock_tryrlock(&(vp)->v_rl, (start), (end))
842 #define vn_rangelock_wlock(vp, start, end) \
843 rangelock_wlock(&(vp)->v_rl, (start), (end))
844 #define vn_rangelock_trywlock(vp, start, end) \
845 rangelock_trywlock(&(vp)->v_rl, (start), (end))
846
847 #define vn_irflag_read(vp) atomic_load_short(&(vp)->v_irflag)
848 void vn_irflag_set_locked(struct vnode *vp, short toset);
849 void vn_irflag_set(struct vnode *vp, short toset);
850 void vn_irflag_set_cond_locked(struct vnode *vp, short toset);
851 void vn_irflag_set_cond(struct vnode *vp, short toset);
852 void vn_irflag_unset_locked(struct vnode *vp, short tounset);
853 void vn_irflag_unset(struct vnode *vp, short tounset);
854
855 int vfs_cache_lookup(struct vop_lookup_args *ap);
856 int vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp);
857 void vfs_timestamp(struct timespec *);
858 void vfs_write_resume(struct mount *mp, int flags);
859 int vfs_write_suspend(struct mount *mp, int flags);
860 int vfs_write_suspend_umnt(struct mount *mp);
861 struct vnode *vnlru_alloc_marker(void);
862 void vnlru_free_marker(struct vnode *);
863 void vnlru_free_vfsops(int, struct vfsops *, struct vnode *);
864 int vop_stdbmap(struct vop_bmap_args *);
865 int vop_stdfdatasync_buf(struct vop_fdatasync_args *);
866 int vop_stdfsync(struct vop_fsync_args *);
867 int vop_stdgetwritemount(struct vop_getwritemount_args *);
868 int vop_stdgetpages(struct vop_getpages_args *);
869 int vop_stdinactive(struct vop_inactive_args *);
870 int vop_stdioctl(struct vop_ioctl_args *);
871 int vop_stdneed_inactive(struct vop_need_inactive_args *);
872 int vop_stdkqfilter(struct vop_kqfilter_args *);
873 int vop_stdlock(struct vop_lock1_args *);
874 int vop_stdunlock(struct vop_unlock_args *);
875 int vop_stdislocked(struct vop_islocked_args *);
876 int vop_lock(struct vop_lock1_args *);
877 int vop_unlock(struct vop_unlock_args *);
878 int vop_islocked(struct vop_islocked_args *);
879 int vop_stdputpages(struct vop_putpages_args *);
880 int vop_nopoll(struct vop_poll_args *);
881 int vop_stdaccess(struct vop_access_args *ap);
882 int vop_stdaccessx(struct vop_accessx_args *ap);
883 int vop_stdadvise(struct vop_advise_args *ap);
884 int vop_stdadvlock(struct vop_advlock_args *ap);
885 int vop_stdadvlockasync(struct vop_advlockasync_args *ap);
886 int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap);
887 int vop_stdallocate(struct vop_allocate_args *ap);
888 int vop_stddeallocate(struct vop_deallocate_args *ap);
889 int vop_stdset_text(struct vop_set_text_args *ap);
890 int vop_stdpathconf(struct vop_pathconf_args *);
891 int vop_stdpoll(struct vop_poll_args *);
892 int vop_stdvptocnp(struct vop_vptocnp_args *ap);
893 int vop_stdvptofh(struct vop_vptofh_args *ap);
894 int vop_stdunp_bind(struct vop_unp_bind_args *ap);
895 int vop_stdunp_connect(struct vop_unp_connect_args *ap);
896 int vop_stdunp_detach(struct vop_unp_detach_args *ap);
897 int vop_stdadd_writecount_nomsync(struct vop_add_writecount_args *ap);
898 int vop_eopnotsupp(struct vop_generic_args *ap);
899 int vop_ebadf(struct vop_generic_args *ap);
900 int vop_einval(struct vop_generic_args *ap);
901 int vop_enoent(struct vop_generic_args *ap);
902 int vop_enotty(struct vop_generic_args *ap);
903 int vop_eagain(struct vop_generic_args *ap);
904 int vop_null(struct vop_generic_args *ap);
905 int vop_panic(struct vop_generic_args *ap);
906 int dead_poll(struct vop_poll_args *ap);
907 int dead_read(struct vop_read_args *ap);
908 int dead_write(struct vop_write_args *ap);
909
910 /* These are called from within the actual VOPS. */
911 void vop_close_post(void *a, int rc);
912 void vop_create_pre(void *a);
913 void vop_create_post(void *a, int rc);
914 void vop_whiteout_pre(void *a);
915 void vop_whiteout_post(void *a, int rc);
916 void vop_deleteextattr_pre(void *a);
917 void vop_deleteextattr_post(void *a, int rc);
918 void vop_link_pre(void *a);
919 void vop_link_post(void *a, int rc);
920 void vop_lookup_post(void *a, int rc);
921 void vop_lookup_pre(void *a);
922 void vop_mkdir_pre(void *a);
923 void vop_mkdir_post(void *a, int rc);
924 void vop_mknod_pre(void *a);
925 void vop_mknod_post(void *a, int rc);
926 void vop_open_post(void *a, int rc);
927 void vop_read_post(void *a, int rc);
928 void vop_read_pgcache_post(void *ap, int rc);
929 void vop_readdir_post(void *a, int rc);
930 void vop_reclaim_post(void *a, int rc);
931 void vop_remove_pre(void *a);
932 void vop_remove_post(void *a, int rc);
933 void vop_rename_post(void *a, int rc);
934 void vop_rename_pre(void *a);
935 void vop_rmdir_pre(void *a);
936 void vop_rmdir_post(void *a, int rc);
937 void vop_setattr_pre(void *a);
938 void vop_setattr_post(void *a, int rc);
939 void vop_setacl_pre(void *a);
940 void vop_setacl_post(void *a, int rc);
941 void vop_setextattr_pre(void *a);
942 void vop_setextattr_post(void *a, int rc);
943 void vop_symlink_pre(void *a);
944 void vop_symlink_post(void *a, int rc);
945 int vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a);
946
947 #ifdef DEBUG_VFS_LOCKS
948 void vop_fdatasync_debugpre(void *a);
949 void vop_fdatasync_debugpost(void *a, int rc);
950 void vop_fplookup_vexec_debugpre(void *a);
951 void vop_fplookup_vexec_debugpost(void *a, int rc);
952 void vop_fplookup_symlink_debugpre(void *a);
953 void vop_fplookup_symlink_debugpost(void *a, int rc);
954 void vop_fsync_debugpre(void *a);
955 void vop_fsync_debugpost(void *a, int rc);
956 void vop_strategy_debugpre(void *a);
957 void vop_lock_debugpre(void *a);
958 void vop_lock_debugpost(void *a, int rc);
959 void vop_unlock_debugpre(void *a);
960 void vop_need_inactive_debugpre(void *a);
961 void vop_need_inactive_debugpost(void *a, int rc);
962 void vop_mkdir_debugpost(void *a, int rc);
963 #else
964 #define vop_fdatasync_debugpre(x) do { } while (0)
965 #define vop_fdatasync_debugpost(x, y) do { } while (0)
966 #define vop_fplookup_vexec_debugpre(x) do { } while (0)
967 #define vop_fplookup_vexec_debugpost(x, y) do { } while (0)
968 #define vop_fplookup_symlink_debugpre(x) do { } while (0)
969 #define vop_fplookup_symlink_debugpost(x, y) do { } while (0)
970 #define vop_fsync_debugpre(x) do { } while (0)
971 #define vop_fsync_debugpost(x, y) do { } while (0)
972 #define vop_strategy_debugpre(x) do { } while (0)
973 #define vop_lock_debugpre(x) do { } while (0)
974 #define vop_lock_debugpost(x, y) do { } while (0)
975 #define vop_unlock_debugpre(x) do { } while (0)
976 #define vop_need_inactive_debugpre(x) do { } while (0)
977 #define vop_need_inactive_debugpost(x, y) do { } while (0)
978 #define vop_mkdir_debugpost(x, y) do { } while (0)
979 #endif
980
981 void vop_rename_fail(struct vop_rename_args *ap);
982
983 #define vop_stat_helper_pre(ap) ({ \
984 struct vop_stat_args *_ap = (ap); \
985 int _error; \
986 AUDIT_ARG_VNODE1(ap->a_vp); \
987 _error = mac_vnode_check_stat(_ap->a_active_cred, _ap->a_file_cred, _ap->a_vp);\
988 if (__predict_true(_error == 0)) { \
989 ap->a_sb->st_padding0 = 0; \
990 ap->a_sb->st_padding1 = 0; \
991 bzero(_ap->a_sb->st_spare, sizeof(_ap->a_sb->st_spare)); \
992 } \
993 _error; \
994 })
995
996 #define vop_stat_helper_post(ap, error) ({ \
997 struct vop_stat_args *_ap = (ap); \
998 int _error = (error); \
999 if (priv_check_cred_vfs_generation(_ap->a_active_cred)) \
1000 _ap->a_sb->st_gen = 0; \
1001 _error; \
1002 })
1003
1004 #define VOP_WRITE_PRE(ap) \
1005 struct vattr va; \
1006 int error; \
1007 off_t osize, ooffset, noffset; \
1008 \
1009 osize = ooffset = noffset = 0; \
1010 if (!VN_KNLIST_EMPTY((ap)->a_vp)) { \
1011 error = VOP_GETATTR((ap)->a_vp, &va, (ap)->a_cred); \
1012 if (error) \
1013 return (error); \
1014 ooffset = (ap)->a_uio->uio_offset; \
1015 osize = (off_t)va.va_size; \
1016 }
1017
1018 #define VOP_WRITE_POST(ap, ret) \
1019 noffset = (ap)->a_uio->uio_offset; \
1020 if (noffset > ooffset && !VN_KNLIST_EMPTY((ap)->a_vp)) { \
1021 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_WRITE \
1022 | (noffset > osize ? NOTE_EXTEND : 0)); \
1023 }
1024
1025 #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__)
1026
1027 #ifdef INVARIANTS
1028 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) \
1029 do { \
1030 int error_; \
1031 \
1032 error_ = VOP_ADD_WRITECOUNT((vp), (cnt)); \
1033 VNASSERT(error_ == 0, (vp), ("VOP_ADD_WRITECOUNT returned %d", \
1034 error_)); \
1035 } while (0)
1036 #define VOP_SET_TEXT_CHECKED(vp) \
1037 do { \
1038 int error_; \
1039 \
1040 error_ = VOP_SET_TEXT((vp)); \
1041 VNASSERT(error_ == 0, (vp), ("VOP_SET_TEXT returned %d", \
1042 error_)); \
1043 } while (0)
1044 #define VOP_UNSET_TEXT_CHECKED(vp) \
1045 do { \
1046 int error_; \
1047 \
1048 error_ = VOP_UNSET_TEXT((vp)); \
1049 VNASSERT(error_ == 0, (vp), ("VOP_UNSET_TEXT returned %d", \
1050 error_)); \
1051 } while (0)
1052 #else
1053 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) VOP_ADD_WRITECOUNT((vp), (cnt))
1054 #define VOP_SET_TEXT_CHECKED(vp) VOP_SET_TEXT((vp))
1055 #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp))
1056 #endif
1057
1058 #define VN_IS_DOOMED(vp) __predict_false((vn_irflag_read(vp) & VIRF_DOOMED) != 0)
1059
1060 void vput(struct vnode *vp);
1061 void vrele(struct vnode *vp);
1062 void vref(struct vnode *vp);
1063 void vrefact(struct vnode *vp);
1064 void v_addpollinfo(struct vnode *vp);
1065 static __inline int
vrefcnt(struct vnode * vp)1066 vrefcnt(struct vnode *vp)
1067 {
1068
1069 return (vp->v_usecount);
1070 }
1071
1072 #define vholdl(vp) do { \
1073 ASSERT_VI_LOCKED(vp, __func__); \
1074 vhold(vp); \
1075 } while (0)
1076
1077 #define vrefl(vp) do { \
1078 ASSERT_VI_LOCKED(vp, __func__); \
1079 vref(vp); \
1080 } while (0)
1081
1082 /*
1083 * The caller doesn't know the file size and vnode_create_vobject() should
1084 * determine the size on its own.
1085 */
1086 #define VNODE_NO_SIZE ((off_t)-1)
1087
1088 int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td);
1089 int vnode_create_disk_vobject(struct vnode *vp, off_t size, struct thread *td);
1090 void vnode_destroy_vobject(struct vnode *vp);
1091
1092 extern struct vop_vector fifo_specops;
1093 extern struct vop_vector dead_vnodeops;
1094 extern struct vop_vector default_vnodeops;
1095
1096 #define VOP_PANIC ((void*)(uintptr_t)vop_panic)
1097 #define VOP_NULL ((void*)(uintptr_t)vop_null)
1098 #define VOP_EBADF ((void*)(uintptr_t)vop_ebadf)
1099 #define VOP_ENOTTY ((void*)(uintptr_t)vop_enotty)
1100 #define VOP_EINVAL ((void*)(uintptr_t)vop_einval)
1101 #define VOP_ENOENT ((void*)(uintptr_t)vop_enoent)
1102 #define VOP_EOPNOTSUPP ((void*)(uintptr_t)vop_eopnotsupp)
1103 #define VOP_EAGAIN ((void*)(uintptr_t)vop_eagain)
1104
1105 /* fifo_vnops.c */
1106 int fifo_printinfo(struct vnode *);
1107
1108 /* vfs_hash.c */
1109 typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg);
1110
1111 void vfs_hash_changesize(u_long newhashsize);
1112 int vfs_hash_get(const struct mount *mp, u_int hash, int flags,
1113 struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1114 u_int vfs_hash_index(struct vnode *vp);
1115 int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
1116 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1117 void vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
1118 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
1119 void vfs_hash_rehash(struct vnode *vp, u_int hash);
1120 void vfs_hash_remove(struct vnode *vp);
1121
1122 int vfs_kqfilter(struct vop_kqfilter_args *);
1123 struct dirent;
1124 int vn_dir_next_dirent(struct vnode *vp, struct thread *td,
1125 char *dirbuf, size_t dirbuflen,
1126 struct dirent **dpp, size_t *len, off_t *off, int *eofflag);
1127 int vn_dir_check_empty(struct vnode *vp);
1128 int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off);
1129
1130 int vfs_unixify_accmode(accmode_t *accmode);
1131
1132 void vfs_unp_reclaim(struct vnode *vp);
1133
1134 int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode);
1135 int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid,
1136 gid_t gid);
1137 int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1138 struct thread *td);
1139 int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1140 struct thread *td);
1141 int vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *active_cred);
1142 int vn_getsize(struct vnode *vp, off_t *size, struct ucred *active_cred);
1143
1144 void vn_fsid(struct vnode *vp, struct vattr *va);
1145
1146 int vn_dir_check_exec(struct vnode *vp, struct componentname *cnp);
1147 int vn_lktype_write(struct mount *mp, struct vnode *vp);
1148
1149 #ifdef INVARIANTS
1150 void vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state);
1151 #endif
1152
1153 static inline void
vn_set_state(struct vnode * vp,__enum_uint8 (vstate)state)1154 vn_set_state(struct vnode *vp, __enum_uint8(vstate) state)
1155 {
1156 #ifdef INVARIANTS
1157 vn_set_state_validate(vp, state);
1158 #endif
1159 vp->v_state = state;
1160 }
1161
1162 static inline __enum_uint8(vstate)
vn_get_state(struct vnode * vp)1163 vn_get_state(struct vnode *vp)
1164 {
1165 return (vp->v_state);
1166 }
1167
1168 #define VOP_UNLOCK_FLAGS(vp, flags) ({ \
1169 struct vnode *_vp = (vp); \
1170 int _flags = (flags); \
1171 int _error; \
1172 \
1173 if ((_flags & ~(LK_INTERLOCK | LK_RELEASE)) != 0) \
1174 panic("%s: unsupported flags %x\n", __func__, flags); \
1175 _error = VOP_UNLOCK(_vp); \
1176 if (_flags & LK_INTERLOCK) \
1177 VI_UNLOCK(_vp); \
1178 _error; \
1179 })
1180
1181 #include <sys/kernel.h>
1182
1183 #define VFS_VOP_VECTOR_REGISTER(vnodeops) \
1184 SYSINIT(vfs_vector_##vnodeops##_f, SI_SUB_VFS, SI_ORDER_ANY, \
1185 vfs_vector_op_register, &vnodeops)
1186
1187 #define VFS_SMR_DECLARE \
1188 extern smr_t vfs_smr
1189
1190 #define VFS_SMR() vfs_smr
1191 #define vfs_smr_enter() smr_enter(VFS_SMR())
1192 #define vfs_smr_exit() smr_exit(VFS_SMR())
1193 #define vfs_smr_synchronize() smr_synchronize(VFS_SMR())
1194 #define vfs_smr_entered_load(ptr) smr_entered_load((ptr), VFS_SMR())
1195 #define VFS_SMR_ASSERT_ENTERED() SMR_ASSERT_ENTERED(VFS_SMR())
1196 #define VFS_SMR_ASSERT_NOT_ENTERED() SMR_ASSERT_NOT_ENTERED(VFS_SMR())
1197 #define VFS_SMR_ZONE_SET(zone) uma_zone_set_smr((zone), VFS_SMR())
1198
1199 #define vn_load_v_data_smr(vp) ({ \
1200 struct vnode *_vp = (vp); \
1201 \
1202 VFS_SMR_ASSERT_ENTERED(); \
1203 atomic_load_consume_ptr(&(_vp)->v_data);\
1204 })
1205
1206 #endif /* _KERNEL */
1207
1208 #endif /* !_SYS_VNODE_H_ */
1209