1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 /*
38 * External virtual filesystem routines
39 */
40
41 #include <sys/cdefs.h>
42 #include "opt_ddb.h"
43 #include "opt_watchdog.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/asan.h>
48 #include <sys/bio.h>
49 #include <sys/buf.h>
50 #include <sys/capsicum.h>
51 #include <sys/condvar.h>
52 #include <sys/conf.h>
53 #include <sys/counter.h>
54 #include <sys/dirent.h>
55 #include <sys/event.h>
56 #include <sys/eventhandler.h>
57 #include <sys/extattr.h>
58 #include <sys/file.h>
59 #include <sys/fcntl.h>
60 #include <sys/jail.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/kthread.h>
64 #include <sys/ktr.h>
65 #include <sys/limits.h>
66 #include <sys/lockf.h>
67 #include <sys/malloc.h>
68 #include <sys/mount.h>
69 #include <sys/namei.h>
70 #include <sys/pctrie.h>
71 #include <sys/priv.h>
72 #include <sys/reboot.h>
73 #include <sys/refcount.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/sleepqueue.h>
77 #include <sys/smr.h>
78 #include <sys/smp.h>
79 #include <sys/stat.h>
80 #include <sys/sysctl.h>
81 #include <sys/syslog.h>
82 #include <sys/vmmeter.h>
83 #include <sys/vnode.h>
84 #include <sys/watchdog.h>
85
86 #include <machine/stdarg.h>
87
88 #include <security/mac/mac_framework.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_extern.h>
93 #include <vm/pmap.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/uma.h>
99
100 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS))
101 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS
102 #endif
103
104 #ifdef DDB
105 #include <ddb/ddb.h>
106 #endif
107
108 static void delmntque(struct vnode *vp);
109 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
110 int slpflag, int slptimeo);
111 static void syncer_shutdown(void *arg, int howto);
112 static int vtryrecycle(struct vnode *vp, bool isvnlru);
113 static void v_init_counters(struct vnode *);
114 static void vn_seqc_init(struct vnode *);
115 static void vn_seqc_write_end_free(struct vnode *vp);
116 static void vgonel(struct vnode *);
117 static bool vhold_recycle_free(struct vnode *);
118 static void vdropl_recycle(struct vnode *vp);
119 static void vdrop_recycle(struct vnode *vp);
120 static void vfs_knllock(void *arg);
121 static void vfs_knlunlock(void *arg);
122 static void vfs_knl_assert_lock(void *arg, int what);
123 static void destroy_vpollinfo(struct vpollinfo *vi);
124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
125 daddr_t startlbn, daddr_t endlbn);
126 static void vnlru_recalc(void);
127
128 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
129 "vnode configuration and statistics");
130 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
131 "vnode configuration");
132 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
133 "vnode statistics");
134 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
135 "vnode recycling");
136
137 /*
138 * Number of vnodes in existence. Increased whenever getnewvnode()
139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode.
140 */
141 static u_long __exclusive_cache_line numvnodes;
142
143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
144 "Number of vnodes in existence (legacy)");
145 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0,
146 "Number of vnodes in existence");
147
148 static counter_u64_t vnodes_created;
149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
150 "Number of vnodes created by getnewvnode (legacy)");
151 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created,
152 "Number of vnodes created by getnewvnode");
153
154 /*
155 * Conversion tables for conversion from vnode types to inode formats
156 * and back.
157 */
158 __enum_uint8(vtype) iftovt_tab[16] = {
159 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
160 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
161 };
162 int vttoif_tab[10] = {
163 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
164 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
165 };
166
167 /*
168 * List of allocates vnodes in the system.
169 */
170 static TAILQ_HEAD(freelst, vnode) vnode_list;
171 static struct vnode *vnode_list_free_marker;
172 static struct vnode *vnode_list_reclaim_marker;
173
174 /*
175 * "Free" vnode target. Free vnodes are rarely completely free, but are
176 * just ones that are cheap to recycle. Usually they are for files which
177 * have been stat'd but not read; these usually have inode and namecache
178 * data attached to them. This target is the preferred minimum size of a
179 * sub-cache consisting mostly of such files. The system balances the size
180 * of this sub-cache with its complement to try to prevent either from
181 * thrashing while the other is relatively inactive. The targets express
182 * a preference for the best balance.
183 *
184 * "Above" this target there are 2 further targets (watermarks) related
185 * to recyling of free vnodes. In the best-operating case, the cache is
186 * exactly full, the free list has size between vlowat and vhiwat above the
187 * free target, and recycling from it and normal use maintains this state.
188 * Sometimes the free list is below vlowat or even empty, but this state
189 * is even better for immediate use provided the cache is not full.
190 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
191 * ones) to reach one of these states. The watermarks are currently hard-
192 * coded as 4% and 9% of the available space higher. These and the default
193 * of 25% for wantfreevnodes are too large if the memory size is large.
194 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim
195 * whenever vnlru_proc() becomes active.
196 */
197 static long wantfreevnodes;
198 static long __exclusive_cache_line freevnodes;
199 static long freevnodes_old;
200
201 static u_long recycles_count;
202 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0,
203 "Number of vnodes recycled to meet vnode cache targets (legacy)");
204 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS,
205 &recycles_count, 0,
206 "Number of vnodes recycled to meet vnode cache targets");
207
208 static u_long recycles_free_count;
209 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS,
210 &recycles_free_count, 0,
211 "Number of free vnodes recycled to meet vnode cache targets (legacy)");
212 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS,
213 &recycles_free_count, 0,
214 "Number of free vnodes recycled to meet vnode cache targets");
215
216 static counter_u64_t direct_recycles_free_count;
217 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD,
218 &direct_recycles_free_count,
219 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets");
220
221 static counter_u64_t vnode_skipped_requeues;
222 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues,
223 "Number of times LRU requeue was skipped due to lock contention");
224
225 static __read_mostly bool vnode_can_skip_requeue;
226 SYSCTL_BOOL(_vfs_vnode_param, OID_AUTO, can_skip_requeue, CTLFLAG_RW,
227 &vnode_can_skip_requeue, 0, "Is LRU requeue skippable");
228
229 static u_long deferred_inact;
230 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD,
231 &deferred_inact, 0, "Number of times inactive processing was deferred");
232
233 /* To keep more than one thread at a time from running vfs_getnewfsid */
234 static struct mtx mntid_mtx;
235
236 /*
237 * Lock for any access to the following:
238 * vnode_list
239 * numvnodes
240 * freevnodes
241 */
242 static struct mtx __exclusive_cache_line vnode_list_mtx;
243
244 /* Publicly exported FS */
245 struct nfs_public nfs_pub;
246
247 static uma_zone_t buf_trie_zone;
248 static smr_t buf_trie_smr;
249
250 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
251 static uma_zone_t vnode_zone;
252 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll");
253
254 __read_frequently smr_t vfs_smr;
255
256 /*
257 * The workitem queue.
258 *
259 * It is useful to delay writes of file data and filesystem metadata
260 * for tens of seconds so that quickly created and deleted files need
261 * not waste disk bandwidth being created and removed. To realize this,
262 * we append vnodes to a "workitem" queue. When running with a soft
263 * updates implementation, most pending metadata dependencies should
264 * not wait for more than a few seconds. Thus, mounted on block devices
265 * are delayed only about a half the time that file data is delayed.
266 * Similarly, directory updates are more critical, so are only delayed
267 * about a third the time that file data is delayed. Thus, there are
268 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
269 * one each second (driven off the filesystem syncer process). The
270 * syncer_delayno variable indicates the next queue that is to be processed.
271 * Items that need to be processed soon are placed in this queue:
272 *
273 * syncer_workitem_pending[syncer_delayno]
274 *
275 * A delay of fifteen seconds is done by placing the request fifteen
276 * entries later in the queue:
277 *
278 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
279 *
280 */
281 static int syncer_delayno;
282 static long syncer_mask;
283 LIST_HEAD(synclist, bufobj);
284 static struct synclist *syncer_workitem_pending;
285 /*
286 * The sync_mtx protects:
287 * bo->bo_synclist
288 * sync_vnode_count
289 * syncer_delayno
290 * syncer_state
291 * syncer_workitem_pending
292 * syncer_worklist_len
293 * rushjob
294 */
295 static struct mtx sync_mtx;
296 static struct cv sync_wakeup;
297
298 #define SYNCER_MAXDELAY 32
299 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
300 static int syncdelay = 30; /* max time to delay syncing data */
301 static int filedelay = 30; /* time to delay syncing files */
302 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
303 "Time to delay syncing files (in seconds)");
304 static int dirdelay = 29; /* time to delay syncing directories */
305 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
306 "Time to delay syncing directories (in seconds)");
307 static int metadelay = 28; /* time to delay syncing metadata */
308 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
309 "Time to delay syncing metadata (in seconds)");
310 static int rushjob; /* number of slots to run ASAP */
311 static int stat_rush_requests; /* number of times I/O speeded up */
312 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
313 "Number of times I/O speeded up (rush requests)");
314
315 #define VDBATCH_SIZE 8
316 struct vdbatch {
317 u_int index;
318 struct mtx lock;
319 struct vnode *tab[VDBATCH_SIZE];
320 };
321 DPCPU_DEFINE_STATIC(struct vdbatch, vd);
322
323 static void vdbatch_dequeue(struct vnode *vp);
324
325 /*
326 * The syncer will require at least SYNCER_MAXDELAY iterations to shutdown;
327 * we probably don't want to pause for the whole second each time.
328 */
329 #define SYNCER_SHUTDOWN_SPEEDUP 32
330 static int sync_vnode_count;
331 static int syncer_worklist_len;
332 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
333 syncer_state;
334
335 /* Target for maximum number of vnodes. */
336 u_long desiredvnodes;
337 static u_long gapvnodes; /* gap between wanted and desired */
338 static u_long vhiwat; /* enough extras after expansion */
339 static u_long vlowat; /* minimal extras before expansion */
340 static bool vstir; /* nonzero to stir non-free vnodes */
341 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */
342
343 static u_long vnlru_read_freevnodes(void);
344
345 /*
346 * Note that no attempt is made to sanitize these parameters.
347 */
348 static int
sysctl_maxvnodes(SYSCTL_HANDLER_ARGS)349 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS)
350 {
351 u_long val;
352 int error;
353
354 val = desiredvnodes;
355 error = sysctl_handle_long(oidp, &val, 0, req);
356 if (error != 0 || req->newptr == NULL)
357 return (error);
358
359 if (val == desiredvnodes)
360 return (0);
361 mtx_lock(&vnode_list_mtx);
362 desiredvnodes = val;
363 wantfreevnodes = desiredvnodes / 4;
364 vnlru_recalc();
365 mtx_unlock(&vnode_list_mtx);
366 /*
367 * XXX There is no protection against multiple threads changing
368 * desiredvnodes at the same time. Locking above only helps vnlru and
369 * getnewvnode.
370 */
371 vfs_hash_changesize(desiredvnodes);
372 cache_changesize(desiredvnodes);
373 return (0);
374 }
375
376 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
377 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes,
378 "LU", "Target for maximum number of vnodes (legacy)");
379 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit,
380 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes,
381 "LU", "Target for maximum number of vnodes");
382
383 static int
sysctl_freevnodes(SYSCTL_HANDLER_ARGS)384 sysctl_freevnodes(SYSCTL_HANDLER_ARGS)
385 {
386 u_long rfreevnodes;
387
388 rfreevnodes = vnlru_read_freevnodes();
389 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req));
390 }
391
392 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes,
393 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes,
394 "LU", "Number of \"free\" vnodes (legacy)");
395 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free,
396 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes,
397 "LU", "Number of \"free\" vnodes");
398
399 static int
sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS)400 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS)
401 {
402 u_long val;
403 int error;
404
405 val = wantfreevnodes;
406 error = sysctl_handle_long(oidp, &val, 0, req);
407 if (error != 0 || req->newptr == NULL)
408 return (error);
409
410 if (val == wantfreevnodes)
411 return (0);
412 mtx_lock(&vnode_list_mtx);
413 wantfreevnodes = val;
414 vnlru_recalc();
415 mtx_unlock(&vnode_list_mtx);
416 return (0);
417 }
418
419 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes,
420 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes,
421 "LU", "Target for minimum number of \"free\" vnodes (legacy)");
422 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree,
423 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes,
424 "LU", "Target for minimum number of \"free\" vnodes");
425
426 static int vnlru_nowhere;
427 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS,
428 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
429
430 static int
sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS)431 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS)
432 {
433 struct vnode *vp;
434 struct nameidata nd;
435 char *buf;
436 unsigned long ndflags;
437 int error;
438
439 if (req->newptr == NULL)
440 return (EINVAL);
441 if (req->newlen >= PATH_MAX)
442 return (E2BIG);
443
444 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK);
445 error = SYSCTL_IN(req, buf, req->newlen);
446 if (error != 0)
447 goto out;
448
449 buf[req->newlen] = '\0';
450
451 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1;
452 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf);
453 if ((error = namei(&nd)) != 0)
454 goto out;
455 vp = nd.ni_vp;
456
457 if (VN_IS_DOOMED(vp)) {
458 /*
459 * This vnode is being recycled. Return != 0 to let the caller
460 * know that the sysctl had no effect. Return EAGAIN because a
461 * subsequent call will likely succeed (since namei will create
462 * a new vnode if necessary)
463 */
464 error = EAGAIN;
465 goto putvnode;
466 }
467
468 vgone(vp);
469 putvnode:
470 vput(vp);
471 NDFREE_PNBUF(&nd);
472 out:
473 free(buf, M_TEMP);
474 return (error);
475 }
476
477 static int
sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS)478 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS)
479 {
480 struct thread *td = curthread;
481 struct vnode *vp;
482 struct file *fp;
483 int error;
484 int fd;
485
486 if (req->newptr == NULL)
487 return (EBADF);
488
489 error = sysctl_handle_int(oidp, &fd, 0, req);
490 if (error != 0)
491 return (error);
492 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp);
493 if (error != 0)
494 return (error);
495 vp = fp->f_vnode;
496
497 error = vn_lock(vp, LK_EXCLUSIVE);
498 if (error != 0)
499 goto drop;
500
501 vgone(vp);
502 VOP_UNLOCK(vp);
503 drop:
504 fdrop(fp, td);
505 return (error);
506 }
507
508 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode,
509 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0,
510 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname");
511 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode,
512 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0,
513 sysctl_ftry_reclaim_vnode, "I",
514 "Try to reclaim a vnode by its file descriptor");
515
516 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
517 #define vnsz2log 8
518 #ifndef DEBUG_LOCKS
519 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log &&
520 sizeof(struct vnode) < 1UL << (vnsz2log + 1),
521 "vnsz2log needs to be updated");
522 #endif
523
524 /*
525 * Support for the bufobj clean & dirty pctrie.
526 */
527 static void *
buf_trie_alloc(struct pctrie * ptree)528 buf_trie_alloc(struct pctrie *ptree)
529 {
530 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT));
531 }
532
533 static void
buf_trie_free(struct pctrie * ptree,void * node)534 buf_trie_free(struct pctrie *ptree, void *node)
535 {
536 uma_zfree_smr(buf_trie_zone, node);
537 }
538 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free,
539 buf_trie_smr);
540
541 /*
542 * Lookup the next element greater than or equal to lblkno, accounting for the
543 * fact that, for pctries, negative values are greater than nonnegative ones.
544 */
545 static struct buf *
buf_lookup_ge(struct bufv * bv,daddr_t lblkno)546 buf_lookup_ge(struct bufv *bv, daddr_t lblkno)
547 {
548 struct buf *bp;
549
550 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, lblkno);
551 if (bp == NULL && lblkno < 0)
552 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, 0);
553 if (bp != NULL && bp->b_lblkno < lblkno)
554 bp = NULL;
555 return (bp);
556 }
557
558 /*
559 * Insert bp, and find the next element smaller than bp, accounting for the fact
560 * that, for pctries, negative values are greater than nonnegative ones.
561 */
562 static int
buf_insert_lookup_le(struct bufv * bv,struct buf * bp,struct buf ** n)563 buf_insert_lookup_le(struct bufv *bv, struct buf *bp, struct buf **n)
564 {
565 int error;
566
567 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, n);
568 if (error != EEXIST) {
569 if (*n == NULL && bp->b_lblkno >= 0)
570 *n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, ~0L);
571 if (*n != NULL && (*n)->b_lblkno >= bp->b_lblkno)
572 *n = NULL;
573 }
574 return (error);
575 }
576
577 /*
578 * Initialize the vnode management data structures.
579 *
580 * Reevaluate the following cap on the number of vnodes after the physical
581 * memory size exceeds 512GB. In the limit, as the physical memory size
582 * grows, the ratio of the memory size in KB to vnodes approaches 64:1.
583 */
584 #ifndef MAXVNODES_MAX
585 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */
586 #endif
587
588 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
589
590 static struct vnode *
vn_alloc_marker(struct mount * mp)591 vn_alloc_marker(struct mount *mp)
592 {
593 struct vnode *vp;
594
595 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
596 vp->v_type = VMARKER;
597 vp->v_mount = mp;
598
599 return (vp);
600 }
601
602 static void
vn_free_marker(struct vnode * vp)603 vn_free_marker(struct vnode *vp)
604 {
605
606 MPASS(vp->v_type == VMARKER);
607 free(vp, M_VNODE_MARKER);
608 }
609
610 #ifdef KASAN
611 static int
vnode_ctor(void * mem,int size,void * arg __unused,int flags __unused)612 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused)
613 {
614 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0);
615 return (0);
616 }
617
618 static void
vnode_dtor(void * mem,int size,void * arg __unused)619 vnode_dtor(void *mem, int size, void *arg __unused)
620 {
621 size_t end1, end2, off1, off2;
622
623 _Static_assert(offsetof(struct vnode, v_vnodelist) <
624 offsetof(struct vnode, v_dbatchcpu),
625 "KASAN marks require updating");
626
627 off1 = offsetof(struct vnode, v_vnodelist);
628 off2 = offsetof(struct vnode, v_dbatchcpu);
629 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist);
630 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu);
631
632 /*
633 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even
634 * after the vnode has been freed. Try to get some KASAN coverage by
635 * marking everything except those two fields as invalid. Because
636 * KASAN's tracking is not byte-granular, any preceding fields sharing
637 * the same 8-byte aligned word must also be marked valid.
638 */
639
640 /* Handle the area from the start until v_vnodelist... */
641 off1 = rounddown2(off1, KASAN_SHADOW_SCALE);
642 kasan_mark(mem, off1, off1, KASAN_UMA_FREED);
643
644 /* ... then the area between v_vnodelist and v_dbatchcpu ... */
645 off1 = roundup2(end1, KASAN_SHADOW_SCALE);
646 off2 = rounddown2(off2, KASAN_SHADOW_SCALE);
647 if (off2 > off1)
648 kasan_mark((void *)((char *)mem + off1), off2 - off1,
649 off2 - off1, KASAN_UMA_FREED);
650
651 /* ... and finally the area from v_dbatchcpu to the end. */
652 off2 = roundup2(end2, KASAN_SHADOW_SCALE);
653 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2,
654 KASAN_UMA_FREED);
655 }
656 #endif /* KASAN */
657
658 /*
659 * Initialize a vnode as it first enters the zone.
660 */
661 static int
vnode_init(void * mem,int size,int flags)662 vnode_init(void *mem, int size, int flags)
663 {
664 struct vnode *vp;
665
666 vp = mem;
667 bzero(vp, size);
668 /*
669 * Setup locks.
670 */
671 vp->v_vnlock = &vp->v_lock;
672 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
673 /*
674 * By default, don't allow shared locks unless filesystems opt-in.
675 */
676 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
677 LK_NOSHARE | LK_IS_VNODE);
678 /*
679 * Initialize bufobj.
680 */
681 bufobj_init(&vp->v_bufobj, vp);
682 /*
683 * Initialize namecache.
684 */
685 cache_vnode_init(vp);
686 /*
687 * Initialize rangelocks.
688 */
689 rangelock_init(&vp->v_rl);
690
691 vp->v_dbatchcpu = NOCPU;
692
693 vp->v_state = VSTATE_DEAD;
694
695 /*
696 * Check vhold_recycle_free for an explanation.
697 */
698 vp->v_holdcnt = VHOLD_NO_SMR;
699 vp->v_type = VNON;
700 mtx_lock(&vnode_list_mtx);
701 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist);
702 mtx_unlock(&vnode_list_mtx);
703 return (0);
704 }
705
706 /*
707 * Free a vnode when it is cleared from the zone.
708 */
709 static void
vnode_fini(void * mem,int size)710 vnode_fini(void *mem, int size)
711 {
712 struct vnode *vp;
713 struct bufobj *bo;
714
715 vp = mem;
716 vdbatch_dequeue(vp);
717 mtx_lock(&vnode_list_mtx);
718 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
719 mtx_unlock(&vnode_list_mtx);
720 rangelock_destroy(&vp->v_rl);
721 lockdestroy(vp->v_vnlock);
722 mtx_destroy(&vp->v_interlock);
723 bo = &vp->v_bufobj;
724 rw_destroy(BO_LOCKPTR(bo));
725
726 kasan_mark(mem, size, size, 0);
727 }
728
729 /*
730 * Provide the size of NFS nclnode and NFS fh for calculation of the
731 * vnode memory consumption. The size is specified directly to
732 * eliminate dependency on NFS-private header.
733 *
734 * Other filesystems may use bigger or smaller (like UFS and ZFS)
735 * private inode data, but the NFS-based estimation is ample enough.
736 * Still, we care about differences in the size between 64- and 32-bit
737 * platforms.
738 *
739 * Namecache structure size is heuristically
740 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1.
741 */
742 #ifdef _LP64
743 #define NFS_NCLNODE_SZ (528 + 64)
744 #define NC_SZ 148
745 #else
746 #define NFS_NCLNODE_SZ (360 + 32)
747 #define NC_SZ 92
748 #endif
749
750 static void
vntblinit(void * dummy __unused)751 vntblinit(void *dummy __unused)
752 {
753 struct vdbatch *vd;
754 uma_ctor ctor;
755 uma_dtor dtor;
756 int cpu, physvnodes, virtvnodes;
757
758 /*
759 * Desiredvnodes is a function of the physical memory size and the
760 * kernel's heap size. Generally speaking, it scales with the
761 * physical memory size. The ratio of desiredvnodes to the physical
762 * memory size is 1:16 until desiredvnodes exceeds 98,304.
763 * Thereafter, the
764 * marginal ratio of desiredvnodes to the physical memory size is
765 * 1:64. However, desiredvnodes is limited by the kernel's heap
766 * size. The memory required by desiredvnodes vnodes and vm objects
767 * must not exceed 1/10th of the kernel's heap size.
768 */
769 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 +
770 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64;
771 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) +
772 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
773 desiredvnodes = min(physvnodes, virtvnodes);
774 if (desiredvnodes > MAXVNODES_MAX) {
775 if (bootverbose)
776 printf("Reducing kern.maxvnodes %lu -> %lu\n",
777 desiredvnodes, MAXVNODES_MAX);
778 desiredvnodes = MAXVNODES_MAX;
779 }
780 wantfreevnodes = desiredvnodes / 4;
781 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
782 TAILQ_INIT(&vnode_list);
783 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF);
784 /*
785 * The lock is taken to appease WITNESS.
786 */
787 mtx_lock(&vnode_list_mtx);
788 vnlru_recalc();
789 mtx_unlock(&vnode_list_mtx);
790 vnode_list_free_marker = vn_alloc_marker(NULL);
791 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist);
792 vnode_list_reclaim_marker = vn_alloc_marker(NULL);
793 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist);
794
795 #ifdef KASAN
796 ctor = vnode_ctor;
797 dtor = vnode_dtor;
798 #else
799 ctor = NULL;
800 dtor = NULL;
801 #endif
802 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor,
803 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN);
804 uma_zone_set_smr(vnode_zone, vfs_smr);
805
806 /*
807 * Preallocate enough nodes to support one-per buf so that
808 * we can not fail an insert. reassignbuf() callers can not
809 * tolerate the insertion failure.
810 */
811 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(),
812 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
813 UMA_ZONE_NOFREE | UMA_ZONE_SMR);
814 buf_trie_smr = uma_zone_get_smr(buf_trie_zone);
815 uma_prealloc(buf_trie_zone, nbuf);
816
817 vnodes_created = counter_u64_alloc(M_WAITOK);
818 direct_recycles_free_count = counter_u64_alloc(M_WAITOK);
819 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK);
820
821 /*
822 * Initialize the filesystem syncer.
823 */
824 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
825 &syncer_mask);
826 syncer_maxdelay = syncer_mask + 1;
827 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
828 cv_init(&sync_wakeup, "syncer");
829
830 CPU_FOREACH(cpu) {
831 vd = DPCPU_ID_PTR((cpu), vd);
832 bzero(vd, sizeof(*vd));
833 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF);
834 }
835 }
836 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
837
838 /*
839 * Mark a mount point as busy. Used to synchronize access and to delay
840 * unmounting. Eventually, mountlist_mtx is not released on failure.
841 *
842 * vfs_busy() is a custom lock, it can block the caller.
843 * vfs_busy() only sleeps if the unmount is active on the mount point.
844 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
845 * vnode belonging to mp.
846 *
847 * Lookup uses vfs_busy() to traverse mount points.
848 * root fs var fs
849 * / vnode lock A / vnode lock (/var) D
850 * /var vnode lock B /log vnode lock(/var/log) E
851 * vfs_busy lock C vfs_busy lock F
852 *
853 * Within each file system, the lock order is C->A->B and F->D->E.
854 *
855 * When traversing across mounts, the system follows that lock order:
856 *
857 * C->A->B
858 * |
859 * +->F->D->E
860 *
861 * The lookup() process for namei("/var") illustrates the process:
862 * 1. VOP_LOOKUP() obtains B while A is held
863 * 2. vfs_busy() obtains a shared lock on F while A and B are held
864 * 3. vput() releases lock on B
865 * 4. vput() releases lock on A
866 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held
867 * 6. vfs_unbusy() releases shared lock on F
868 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
869 * Attempt to lock A (instead of vp_crossmp) while D is held would
870 * violate the global order, causing deadlocks.
871 *
872 * dounmount() locks B while F is drained. Note that for stacked
873 * filesystems, D and B in the example above may be the same lock,
874 * which introdues potential lock order reversal deadlock between
875 * dounmount() and step 5 above. These filesystems may avoid the LOR
876 * by setting VV_CROSSLOCK on the covered vnode so that lock B will
877 * remain held until after step 5.
878 */
879 int
vfs_busy(struct mount * mp,int flags)880 vfs_busy(struct mount *mp, int flags)
881 {
882 struct mount_pcpu *mpcpu;
883
884 MPASS((flags & ~MBF_MASK) == 0);
885 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
886
887 if (vfs_op_thread_enter(mp, mpcpu)) {
888 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
889 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0);
890 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0);
891 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
892 vfs_mp_count_add_pcpu(mpcpu, lockref, 1);
893 vfs_op_thread_exit(mp, mpcpu);
894 if (flags & MBF_MNTLSTLOCK)
895 mtx_unlock(&mountlist_mtx);
896 return (0);
897 }
898
899 MNT_ILOCK(mp);
900 vfs_assert_mount_counters(mp);
901 MNT_REF(mp);
902 /*
903 * If mount point is currently being unmounted, sleep until the
904 * mount point fate is decided. If thread doing the unmounting fails,
905 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
906 * that this mount point has survived the unmount attempt and vfs_busy
907 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE
908 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
909 * about to be really destroyed. vfs_busy needs to release its
910 * reference on the mount point in this case and return with ENOENT,
911 * telling the caller the mount it tried to busy is no longer valid.
912 */
913 while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
914 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers),
915 ("%s: non-empty upper mount list with pending unmount",
916 __func__));
917 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
918 MNT_REL(mp);
919 MNT_IUNLOCK(mp);
920 CTR1(KTR_VFS, "%s: failed busying before sleeping",
921 __func__);
922 return (ENOENT);
923 }
924 if (flags & MBF_MNTLSTLOCK)
925 mtx_unlock(&mountlist_mtx);
926 mp->mnt_kern_flag |= MNTK_MWAIT;
927 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
928 if (flags & MBF_MNTLSTLOCK)
929 mtx_lock(&mountlist_mtx);
930 MNT_ILOCK(mp);
931 }
932 if (flags & MBF_MNTLSTLOCK)
933 mtx_unlock(&mountlist_mtx);
934 mp->mnt_lockref++;
935 MNT_IUNLOCK(mp);
936 return (0);
937 }
938
939 /*
940 * Free a busy filesystem.
941 */
942 void
vfs_unbusy(struct mount * mp)943 vfs_unbusy(struct mount *mp)
944 {
945 struct mount_pcpu *mpcpu;
946 int c;
947
948 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
949
950 if (vfs_op_thread_enter(mp, mpcpu)) {
951 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
952 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1);
953 vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
954 vfs_op_thread_exit(mp, mpcpu);
955 return;
956 }
957
958 MNT_ILOCK(mp);
959 vfs_assert_mount_counters(mp);
960 MNT_REL(mp);
961 c = --mp->mnt_lockref;
962 if (mp->mnt_vfs_ops == 0) {
963 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
964 MNT_IUNLOCK(mp);
965 return;
966 }
967 if (c < 0)
968 vfs_dump_mount_counters(mp);
969 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
970 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
971 CTR1(KTR_VFS, "%s: waking up waiters", __func__);
972 mp->mnt_kern_flag &= ~MNTK_DRAINING;
973 wakeup(&mp->mnt_lockref);
974 }
975 MNT_IUNLOCK(mp);
976 }
977
978 /*
979 * Lookup a mount point by filesystem identifier.
980 */
981 struct mount *
vfs_getvfs(fsid_t * fsid)982 vfs_getvfs(fsid_t *fsid)
983 {
984 struct mount *mp;
985
986 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
987 mtx_lock(&mountlist_mtx);
988 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
989 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) {
990 vfs_ref(mp);
991 mtx_unlock(&mountlist_mtx);
992 return (mp);
993 }
994 }
995 mtx_unlock(&mountlist_mtx);
996 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
997 return ((struct mount *) 0);
998 }
999
1000 /*
1001 * Lookup a mount point by filesystem identifier, busying it before
1002 * returning.
1003 *
1004 * To avoid congestion on mountlist_mtx, implement simple direct-mapped
1005 * cache for popular filesystem identifiers. The cache is lockess, using
1006 * the fact that struct mount's are never freed. In worst case we may
1007 * get pointer to unmounted or even different filesystem, so we have to
1008 * check what we got, and go slow way if so.
1009 */
1010 struct mount *
vfs_busyfs(fsid_t * fsid)1011 vfs_busyfs(fsid_t *fsid)
1012 {
1013 #define FSID_CACHE_SIZE 256
1014 typedef struct mount * volatile vmp_t;
1015 static vmp_t cache[FSID_CACHE_SIZE];
1016 struct mount *mp;
1017 int error;
1018 uint32_t hash;
1019
1020 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
1021 hash = fsid->val[0] ^ fsid->val[1];
1022 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1);
1023 mp = cache[hash];
1024 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0)
1025 goto slow;
1026 if (vfs_busy(mp, 0) != 0) {
1027 cache[hash] = NULL;
1028 goto slow;
1029 }
1030 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0)
1031 return (mp);
1032 else
1033 vfs_unbusy(mp);
1034
1035 slow:
1036 mtx_lock(&mountlist_mtx);
1037 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1038 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) {
1039 error = vfs_busy(mp, MBF_MNTLSTLOCK);
1040 if (error) {
1041 cache[hash] = NULL;
1042 mtx_unlock(&mountlist_mtx);
1043 return (NULL);
1044 }
1045 cache[hash] = mp;
1046 return (mp);
1047 }
1048 }
1049 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
1050 mtx_unlock(&mountlist_mtx);
1051 return ((struct mount *) 0);
1052 }
1053
1054 /*
1055 * Check if a user can access privileged mount options.
1056 */
1057 int
vfs_suser(struct mount * mp,struct thread * td)1058 vfs_suser(struct mount *mp, struct thread *td)
1059 {
1060 int error;
1061
1062 if (jailed(td->td_ucred)) {
1063 /*
1064 * If the jail of the calling thread lacks permission for
1065 * this type of file system, deny immediately.
1066 */
1067 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag))
1068 return (EPERM);
1069
1070 /*
1071 * If the file system was mounted outside the jail of the
1072 * calling thread, deny immediately.
1073 */
1074 if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
1075 return (EPERM);
1076 }
1077
1078 /*
1079 * If file system supports delegated administration, we don't check
1080 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
1081 * by the file system itself.
1082 * If this is not the user that did original mount, we check for
1083 * the PRIV_VFS_MOUNT_OWNER privilege.
1084 */
1085 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
1086 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
1087 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
1088 return (error);
1089 }
1090 return (0);
1091 }
1092
1093 /*
1094 * Get a new unique fsid. Try to make its val[0] unique, since this value
1095 * will be used to create fake device numbers for stat(). Also try (but
1096 * not so hard) make its val[0] unique mod 2^16, since some emulators only
1097 * support 16-bit device numbers. We end up with unique val[0]'s for the
1098 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
1099 *
1100 * Keep in mind that several mounts may be running in parallel. Starting
1101 * the search one past where the previous search terminated is both a
1102 * micro-optimization and a defense against returning the same fsid to
1103 * different mounts.
1104 */
1105 void
vfs_getnewfsid(struct mount * mp)1106 vfs_getnewfsid(struct mount *mp)
1107 {
1108 static uint16_t mntid_base;
1109 struct mount *nmp;
1110 fsid_t tfsid;
1111 int mtype;
1112
1113 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
1114 mtx_lock(&mntid_mtx);
1115 mtype = mp->mnt_vfc->vfc_typenum;
1116 tfsid.val[1] = mtype;
1117 mtype = (mtype & 0xFF) << 24;
1118 for (;;) {
1119 tfsid.val[0] = makedev(255,
1120 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
1121 mntid_base++;
1122 if ((nmp = vfs_getvfs(&tfsid)) == NULL)
1123 break;
1124 vfs_rel(nmp);
1125 }
1126 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
1127 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
1128 mtx_unlock(&mntid_mtx);
1129 }
1130
1131 /*
1132 * Knob to control the precision of file timestamps:
1133 *
1134 * 0 = seconds only; nanoseconds zeroed.
1135 * 1 = seconds and nanoseconds, accurate within 1/HZ.
1136 * 2 = seconds and nanoseconds, truncated to microseconds.
1137 * >=3 = seconds and nanoseconds, maximum precision.
1138 */
1139 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
1140
1141 static int timestamp_precision = TSP_USEC;
1142 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
1143 ×tamp_precision, 0, "File timestamp precision (0: seconds, "
1144 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, "
1145 "3+: sec + ns (max. precision))");
1146
1147 /*
1148 * Get a current timestamp.
1149 */
1150 void
vfs_timestamp(struct timespec * tsp)1151 vfs_timestamp(struct timespec *tsp)
1152 {
1153 struct timeval tv;
1154
1155 switch (timestamp_precision) {
1156 case TSP_SEC:
1157 tsp->tv_sec = time_second;
1158 tsp->tv_nsec = 0;
1159 break;
1160 case TSP_HZ:
1161 getnanotime(tsp);
1162 break;
1163 case TSP_USEC:
1164 microtime(&tv);
1165 TIMEVAL_TO_TIMESPEC(&tv, tsp);
1166 break;
1167 case TSP_NSEC:
1168 default:
1169 nanotime(tsp);
1170 break;
1171 }
1172 }
1173
1174 /*
1175 * Set vnode attributes to VNOVAL
1176 */
1177 void
vattr_null(struct vattr * vap)1178 vattr_null(struct vattr *vap)
1179 {
1180
1181 vap->va_type = VNON;
1182 vap->va_size = VNOVAL;
1183 vap->va_bytes = VNOVAL;
1184 vap->va_mode = VNOVAL;
1185 vap->va_nlink = VNOVAL;
1186 vap->va_uid = VNOVAL;
1187 vap->va_gid = VNOVAL;
1188 vap->va_fsid = VNOVAL;
1189 vap->va_fileid = VNOVAL;
1190 vap->va_blocksize = VNOVAL;
1191 vap->va_rdev = VNOVAL;
1192 vap->va_atime.tv_sec = VNOVAL;
1193 vap->va_atime.tv_nsec = VNOVAL;
1194 vap->va_mtime.tv_sec = VNOVAL;
1195 vap->va_mtime.tv_nsec = VNOVAL;
1196 vap->va_ctime.tv_sec = VNOVAL;
1197 vap->va_ctime.tv_nsec = VNOVAL;
1198 vap->va_birthtime.tv_sec = VNOVAL;
1199 vap->va_birthtime.tv_nsec = VNOVAL;
1200 vap->va_flags = VNOVAL;
1201 vap->va_gen = VNOVAL;
1202 vap->va_vaflags = 0;
1203 vap->va_filerev = VNOVAL;
1204 }
1205
1206 /*
1207 * Try to reduce the total number of vnodes.
1208 *
1209 * This routine (and its user) are buggy in at least the following ways:
1210 * - all parameters were picked years ago when RAM sizes were significantly
1211 * smaller
1212 * - it can pick vnodes based on pages used by the vm object, but filesystems
1213 * like ZFS don't use it making the pick broken
1214 * - since ZFS has its own aging policy it gets partially combated by this one
1215 * - a dedicated method should be provided for filesystems to let them decide
1216 * whether the vnode should be recycled
1217 *
1218 * This routine is called when we have too many vnodes. It attempts
1219 * to free <count> vnodes and will potentially free vnodes that still
1220 * have VM backing store (VM backing store is typically the cause
1221 * of a vnode blowout so we want to do this). Therefore, this operation
1222 * is not considered cheap.
1223 *
1224 * A number of conditions may prevent a vnode from being reclaimed.
1225 * the buffer cache may have references on the vnode, a directory
1226 * vnode may still have references due to the namei cache representing
1227 * underlying files, or the vnode may be in active use. It is not
1228 * desirable to reuse such vnodes. These conditions may cause the
1229 * number of vnodes to reach some minimum value regardless of what
1230 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
1231 *
1232 * @param reclaim_nc_src Only reclaim directories with outgoing namecache
1233 * entries if this argument is strue
1234 * @param trigger Only reclaim vnodes with fewer than this many resident
1235 * pages.
1236 * @param target How many vnodes to reclaim.
1237 * @return The number of vnodes that were reclaimed.
1238 */
1239 static int
vlrureclaim(bool reclaim_nc_src,int trigger,u_long target)1240 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target)
1241 {
1242 struct vnode *vp, *mvp;
1243 struct mount *mp;
1244 struct vm_object *object;
1245 u_long done;
1246 bool retried;
1247
1248 mtx_assert(&vnode_list_mtx, MA_OWNED);
1249
1250 retried = false;
1251 done = 0;
1252
1253 mvp = vnode_list_reclaim_marker;
1254 restart:
1255 vp = mvp;
1256 while (done < target) {
1257 vp = TAILQ_NEXT(vp, v_vnodelist);
1258 if (__predict_false(vp == NULL))
1259 break;
1260
1261 if (__predict_false(vp->v_type == VMARKER))
1262 continue;
1263
1264 /*
1265 * If it's been deconstructed already, it's still
1266 * referenced, or it exceeds the trigger, skip it.
1267 * Also skip free vnodes. We are trying to make space
1268 * for more free vnodes, not reduce their count.
1269 */
1270 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 ||
1271 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)))
1272 goto next_iter;
1273
1274 if (vp->v_type == VBAD || vp->v_type == VNON)
1275 goto next_iter;
1276
1277 object = atomic_load_ptr(&vp->v_object);
1278 if (object == NULL || object->resident_page_count > trigger) {
1279 goto next_iter;
1280 }
1281
1282 /*
1283 * Handle races against vnode allocation. Filesystems lock the
1284 * vnode some time after it gets returned from getnewvnode,
1285 * despite type and hold count being manipulated earlier.
1286 * Resorting to checking v_mount restores guarantees present
1287 * before the global list was reworked to contain all vnodes.
1288 */
1289 if (!VI_TRYLOCK(vp))
1290 goto next_iter;
1291 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1292 VI_UNLOCK(vp);
1293 goto next_iter;
1294 }
1295 if (vp->v_mount == NULL) {
1296 VI_UNLOCK(vp);
1297 goto next_iter;
1298 }
1299 vholdl(vp);
1300 VI_UNLOCK(vp);
1301 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1302 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1303 mtx_unlock(&vnode_list_mtx);
1304
1305 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1306 vdrop_recycle(vp);
1307 goto next_iter_unlocked;
1308 }
1309 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) {
1310 vdrop_recycle(vp);
1311 vn_finished_write(mp);
1312 goto next_iter_unlocked;
1313 }
1314
1315 VI_LOCK(vp);
1316 if (vp->v_usecount > 0 ||
1317 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
1318 (vp->v_object != NULL && vp->v_object->handle == vp &&
1319 vp->v_object->resident_page_count > trigger)) {
1320 VOP_UNLOCK(vp);
1321 vdropl_recycle(vp);
1322 vn_finished_write(mp);
1323 goto next_iter_unlocked;
1324 }
1325 recycles_count++;
1326 vgonel(vp);
1327 VOP_UNLOCK(vp);
1328 vdropl_recycle(vp);
1329 vn_finished_write(mp);
1330 done++;
1331 next_iter_unlocked:
1332 maybe_yield();
1333 mtx_lock(&vnode_list_mtx);
1334 goto restart;
1335 next_iter:
1336 MPASS(vp->v_type != VMARKER);
1337 if (!should_yield())
1338 continue;
1339 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1340 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1341 mtx_unlock(&vnode_list_mtx);
1342 kern_yield(PRI_USER);
1343 mtx_lock(&vnode_list_mtx);
1344 goto restart;
1345 }
1346 if (done == 0 && !retried) {
1347 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1348 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
1349 retried = true;
1350 goto restart;
1351 }
1352 return (done);
1353 }
1354
1355 static int max_free_per_call = 10000;
1356 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0,
1357 "limit on vnode free requests per call to the vnlru_free routine (legacy)");
1358 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW,
1359 &max_free_per_call, 0,
1360 "limit on vnode free requests per call to the vnlru_free routine");
1361
1362 /*
1363 * Attempt to recycle requested amount of free vnodes.
1364 */
1365 static int
vnlru_free_impl(int count,struct vfsops * mnt_op,struct vnode * mvp,bool isvnlru)1366 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru)
1367 {
1368 struct vnode *vp;
1369 struct mount *mp;
1370 int ocount;
1371 bool retried;
1372
1373 mtx_assert(&vnode_list_mtx, MA_OWNED);
1374 if (count > max_free_per_call)
1375 count = max_free_per_call;
1376 if (count == 0) {
1377 mtx_unlock(&vnode_list_mtx);
1378 return (0);
1379 }
1380 ocount = count;
1381 retried = false;
1382 vp = mvp;
1383 for (;;) {
1384 vp = TAILQ_NEXT(vp, v_vnodelist);
1385 if (__predict_false(vp == NULL)) {
1386 /*
1387 * The free vnode marker can be past eligible vnodes:
1388 * 1. if vdbatch_process trylock failed
1389 * 2. if vtryrecycle failed
1390 *
1391 * If so, start the scan from scratch.
1392 */
1393 if (!retried && vnlru_read_freevnodes() > 0) {
1394 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1395 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
1396 vp = mvp;
1397 retried = true;
1398 continue;
1399 }
1400
1401 /*
1402 * Give up
1403 */
1404 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1405 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist);
1406 mtx_unlock(&vnode_list_mtx);
1407 break;
1408 }
1409 if (__predict_false(vp->v_type == VMARKER))
1410 continue;
1411 if (vp->v_holdcnt > 0)
1412 continue;
1413 /*
1414 * Don't recycle if our vnode is from different type
1415 * of mount point. Note that mp is type-safe, the
1416 * check does not reach unmapped address even if
1417 * vnode is reclaimed.
1418 */
1419 if (mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1420 mp->mnt_op != mnt_op) {
1421 continue;
1422 }
1423 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1424 continue;
1425 }
1426 if (!vhold_recycle_free(vp))
1427 continue;
1428 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1429 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1430 mtx_unlock(&vnode_list_mtx);
1431 /*
1432 * FIXME: ignores the return value, meaning it may be nothing
1433 * got recycled but it claims otherwise to the caller.
1434 *
1435 * Originally the value started being ignored in 2005 with
1436 * 114a1006a8204aa156e1f9ad6476cdff89cada7f .
1437 *
1438 * Respecting the value can run into significant stalls if most
1439 * vnodes belong to one file system and it has writes
1440 * suspended. In presence of many threads and millions of
1441 * vnodes they keep contending on the vnode_list_mtx lock only
1442 * to find vnodes they can't recycle.
1443 *
1444 * The solution would be to pre-check if the vnode is likely to
1445 * be recycle-able, but it needs to happen with the
1446 * vnode_list_mtx lock held. This runs into a problem where
1447 * VOP_GETWRITEMOUNT (currently needed to find out about if
1448 * writes are frozen) can take locks which LOR against it.
1449 *
1450 * Check nullfs for one example (null_getwritemount).
1451 */
1452 vtryrecycle(vp, isvnlru);
1453 count--;
1454 if (count == 0) {
1455 break;
1456 }
1457 mtx_lock(&vnode_list_mtx);
1458 vp = mvp;
1459 }
1460 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1461 return (ocount - count);
1462 }
1463
1464 /*
1465 * XXX: returns without vnode_list_mtx locked!
1466 */
1467 static int
vnlru_free_locked_direct(int count)1468 vnlru_free_locked_direct(int count)
1469 {
1470 int ret;
1471
1472 mtx_assert(&vnode_list_mtx, MA_OWNED);
1473 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false);
1474 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1475 return (ret);
1476 }
1477
1478 static int
vnlru_free_locked_vnlru(int count)1479 vnlru_free_locked_vnlru(int count)
1480 {
1481 int ret;
1482
1483 mtx_assert(&vnode_list_mtx, MA_OWNED);
1484 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true);
1485 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1486 return (ret);
1487 }
1488
1489 static int
vnlru_free_vnlru(int count)1490 vnlru_free_vnlru(int count)
1491 {
1492
1493 mtx_lock(&vnode_list_mtx);
1494 return (vnlru_free_locked_vnlru(count));
1495 }
1496
1497 void
vnlru_free_vfsops(int count,struct vfsops * mnt_op,struct vnode * mvp)1498 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp)
1499 {
1500
1501 MPASS(mnt_op != NULL);
1502 MPASS(mvp != NULL);
1503 VNPASS(mvp->v_type == VMARKER, mvp);
1504 mtx_lock(&vnode_list_mtx);
1505 vnlru_free_impl(count, mnt_op, mvp, true);
1506 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1507 }
1508
1509 struct vnode *
vnlru_alloc_marker(void)1510 vnlru_alloc_marker(void)
1511 {
1512 struct vnode *mvp;
1513
1514 mvp = vn_alloc_marker(NULL);
1515 mtx_lock(&vnode_list_mtx);
1516 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist);
1517 mtx_unlock(&vnode_list_mtx);
1518 return (mvp);
1519 }
1520
1521 void
vnlru_free_marker(struct vnode * mvp)1522 vnlru_free_marker(struct vnode *mvp)
1523 {
1524 mtx_lock(&vnode_list_mtx);
1525 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1526 mtx_unlock(&vnode_list_mtx);
1527 vn_free_marker(mvp);
1528 }
1529
1530 static void
vnlru_recalc(void)1531 vnlru_recalc(void)
1532 {
1533
1534 mtx_assert(&vnode_list_mtx, MA_OWNED);
1535 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
1536 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
1537 vlowat = vhiwat / 2;
1538 }
1539
1540 /*
1541 * Attempt to recycle vnodes in a context that is always safe to block.
1542 * Calling vlrurecycle() from the bowels of filesystem code has some
1543 * interesting deadlock problems.
1544 */
1545 static struct proc *vnlruproc;
1546 static int vnlruproc_sig;
1547 static u_long vnlruproc_kicks;
1548
1549 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0,
1550 "Number of times vnlru awakened due to vnode shortage");
1551
1552 #define VNLRU_COUNT_SLOP 100
1553
1554 /*
1555 * The main freevnodes counter is only updated when a counter local to CPU
1556 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally
1557 * walked to compute a more accurate total.
1558 *
1559 * Note: the actual value at any given moment can still exceed slop, but it
1560 * should not be by significant margin in practice.
1561 */
1562 #define VNLRU_FREEVNODES_SLOP 126
1563
1564 static void __noinline
vfs_freevnodes_rollup(int8_t * lfreevnodes)1565 vfs_freevnodes_rollup(int8_t *lfreevnodes)
1566 {
1567
1568 atomic_add_long(&freevnodes, *lfreevnodes);
1569 *lfreevnodes = 0;
1570 critical_exit();
1571 }
1572
1573 static __inline void
vfs_freevnodes_inc(void)1574 vfs_freevnodes_inc(void)
1575 {
1576 int8_t *lfreevnodes;
1577
1578 critical_enter();
1579 lfreevnodes = PCPU_PTR(vfs_freevnodes);
1580 (*lfreevnodes)++;
1581 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP))
1582 vfs_freevnodes_rollup(lfreevnodes);
1583 else
1584 critical_exit();
1585 }
1586
1587 static __inline void
vfs_freevnodes_dec(void)1588 vfs_freevnodes_dec(void)
1589 {
1590 int8_t *lfreevnodes;
1591
1592 critical_enter();
1593 lfreevnodes = PCPU_PTR(vfs_freevnodes);
1594 (*lfreevnodes)--;
1595 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP))
1596 vfs_freevnodes_rollup(lfreevnodes);
1597 else
1598 critical_exit();
1599 }
1600
1601 static u_long
vnlru_read_freevnodes(void)1602 vnlru_read_freevnodes(void)
1603 {
1604 long slop, rfreevnodes, rfreevnodes_old;
1605 int cpu;
1606
1607 rfreevnodes = atomic_load_long(&freevnodes);
1608 rfreevnodes_old = atomic_load_long(&freevnodes_old);
1609
1610 if (rfreevnodes > rfreevnodes_old)
1611 slop = rfreevnodes - rfreevnodes_old;
1612 else
1613 slop = rfreevnodes_old - rfreevnodes;
1614 if (slop < VNLRU_FREEVNODES_SLOP)
1615 return (rfreevnodes >= 0 ? rfreevnodes : 0);
1616 CPU_FOREACH(cpu) {
1617 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes;
1618 }
1619 atomic_store_long(&freevnodes_old, rfreevnodes);
1620 return (freevnodes_old >= 0 ? freevnodes_old : 0);
1621 }
1622
1623 static bool
vnlru_under(u_long rnumvnodes,u_long limit)1624 vnlru_under(u_long rnumvnodes, u_long limit)
1625 {
1626 u_long rfreevnodes, space;
1627
1628 if (__predict_false(rnumvnodes > desiredvnodes))
1629 return (true);
1630
1631 space = desiredvnodes - rnumvnodes;
1632 if (space < limit) {
1633 rfreevnodes = vnlru_read_freevnodes();
1634 if (rfreevnodes > wantfreevnodes)
1635 space += rfreevnodes - wantfreevnodes;
1636 }
1637 return (space < limit);
1638 }
1639
1640 static void
vnlru_kick_locked(void)1641 vnlru_kick_locked(void)
1642 {
1643
1644 mtx_assert(&vnode_list_mtx, MA_OWNED);
1645 if (vnlruproc_sig == 0) {
1646 vnlruproc_sig = 1;
1647 vnlruproc_kicks++;
1648 wakeup(vnlruproc);
1649 }
1650 }
1651
1652 static void
vnlru_kick_cond(void)1653 vnlru_kick_cond(void)
1654 {
1655
1656 if (vnlru_read_freevnodes() > wantfreevnodes)
1657 return;
1658
1659 if (vnlruproc_sig)
1660 return;
1661 mtx_lock(&vnode_list_mtx);
1662 vnlru_kick_locked();
1663 mtx_unlock(&vnode_list_mtx);
1664 }
1665
1666 static void
vnlru_proc_sleep(void)1667 vnlru_proc_sleep(void)
1668 {
1669
1670 if (vnlruproc_sig) {
1671 vnlruproc_sig = 0;
1672 wakeup(&vnlruproc_sig);
1673 }
1674 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz);
1675 }
1676
1677 /*
1678 * A lighter version of the machinery below.
1679 *
1680 * Tries to reach goals only by recycling free vnodes and does not invoke
1681 * uma_reclaim(UMA_RECLAIM_DRAIN).
1682 *
1683 * This works around pathological behavior in vnlru in presence of tons of free
1684 * vnodes, but without having to rewrite the machinery at this time. Said
1685 * behavior boils down to continuously trying to reclaim all kinds of vnodes
1686 * (cycling through all levels of "force") when the count is transiently above
1687 * limit. This happens a lot when all vnodes are used up and vn_alloc
1688 * speculatively increments the counter.
1689 *
1690 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with
1691 * 1 million files in total and 20 find(1) processes stating them in parallel
1692 * (one per each tree).
1693 *
1694 * On a kernel with only stock machinery this needs anywhere between 60 and 120
1695 * seconds to execute (time varies *wildly* between runs). With the workaround
1696 * it consistently stays around 20 seconds [it got further down with later
1697 * changes].
1698 *
1699 * That is to say the entire thing needs a fundamental redesign (most notably
1700 * to accommodate faster recycling), the above only tries to get it ouf the way.
1701 *
1702 * Return values are:
1703 * -1 -- fallback to regular vnlru loop
1704 * 0 -- do nothing, go to sleep
1705 * >0 -- recycle this many vnodes
1706 */
1707 static long
vnlru_proc_light_pick(void)1708 vnlru_proc_light_pick(void)
1709 {
1710 u_long rnumvnodes, rfreevnodes;
1711
1712 if (vstir || vnlruproc_sig == 1)
1713 return (-1);
1714
1715 rnumvnodes = atomic_load_long(&numvnodes);
1716 rfreevnodes = vnlru_read_freevnodes();
1717
1718 /*
1719 * vnode limit might have changed and now we may be at a significant
1720 * excess. Bail if we can't sort it out with free vnodes.
1721 *
1722 * Due to atomic updates the count can legitimately go above
1723 * the limit for a short period, don't bother doing anything in
1724 * that case.
1725 */
1726 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) {
1727 if (rnumvnodes - rfreevnodes >= desiredvnodes ||
1728 rfreevnodes <= wantfreevnodes) {
1729 return (-1);
1730 }
1731
1732 return (rnumvnodes - desiredvnodes);
1733 }
1734
1735 /*
1736 * Don't try to reach wantfreevnodes target if there are too few vnodes
1737 * to begin with.
1738 */
1739 if (rnumvnodes < wantfreevnodes) {
1740 return (0);
1741 }
1742
1743 if (rfreevnodes < wantfreevnodes) {
1744 return (-1);
1745 }
1746
1747 return (0);
1748 }
1749
1750 static bool
vnlru_proc_light(void)1751 vnlru_proc_light(void)
1752 {
1753 long freecount;
1754
1755 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1756
1757 freecount = vnlru_proc_light_pick();
1758 if (freecount == -1)
1759 return (false);
1760
1761 if (freecount != 0) {
1762 vnlru_free_vnlru(freecount);
1763 }
1764
1765 mtx_lock(&vnode_list_mtx);
1766 vnlru_proc_sleep();
1767 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1768 return (true);
1769 }
1770
1771 static u_long uma_reclaim_calls;
1772 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS,
1773 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim");
1774
1775 static void
vnlru_proc(void)1776 vnlru_proc(void)
1777 {
1778 u_long rnumvnodes, rfreevnodes, target;
1779 unsigned long onumvnodes;
1780 int done, force, trigger, usevnodes;
1781 bool reclaim_nc_src, want_reread;
1782
1783 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
1784 SHUTDOWN_PRI_FIRST);
1785
1786 force = 0;
1787 want_reread = false;
1788 for (;;) {
1789 kproc_suspend_check(vnlruproc);
1790
1791 if (force == 0 && vnlru_proc_light())
1792 continue;
1793
1794 mtx_lock(&vnode_list_mtx);
1795 rnumvnodes = atomic_load_long(&numvnodes);
1796
1797 if (want_reread) {
1798 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0;
1799 want_reread = false;
1800 }
1801
1802 /*
1803 * If numvnodes is too large (due to desiredvnodes being
1804 * adjusted using its sysctl, or emergency growth), first
1805 * try to reduce it by discarding free vnodes.
1806 */
1807 if (rnumvnodes > desiredvnodes + 10) {
1808 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes);
1809 mtx_lock(&vnode_list_mtx);
1810 rnumvnodes = atomic_load_long(&numvnodes);
1811 }
1812 /*
1813 * Sleep if the vnode cache is in a good state. This is
1814 * when it is not over-full and has space for about a 4%
1815 * or 9% expansion (by growing its size or inexcessively
1816 * reducing free vnode count). Otherwise, try to reclaim
1817 * space for a 10% expansion.
1818 */
1819 if (vstir && force == 0) {
1820 force = 1;
1821 vstir = false;
1822 }
1823 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) {
1824 vnlru_proc_sleep();
1825 continue;
1826 }
1827 rfreevnodes = vnlru_read_freevnodes();
1828
1829 onumvnodes = rnumvnodes;
1830 /*
1831 * Calculate parameters for recycling. These are the same
1832 * throughout the loop to give some semblance of fairness.
1833 * The trigger point is to avoid recycling vnodes with lots
1834 * of resident pages. We aren't trying to free memory; we
1835 * are trying to recycle or at least free vnodes.
1836 */
1837 if (rnumvnodes <= desiredvnodes)
1838 usevnodes = rnumvnodes - rfreevnodes;
1839 else
1840 usevnodes = rnumvnodes;
1841 if (usevnodes <= 0)
1842 usevnodes = 1;
1843 /*
1844 * The trigger value is chosen to give a conservatively
1845 * large value to ensure that it alone doesn't prevent
1846 * making progress. The value can easily be so large that
1847 * it is effectively infinite in some congested and
1848 * misconfigured cases, and this is necessary. Normally
1849 * it is about 8 to 100 (pages), which is quite large.
1850 */
1851 trigger = vm_cnt.v_page_count * 2 / usevnodes;
1852 if (force < 2)
1853 trigger = vsmalltrigger;
1854 reclaim_nc_src = force >= 3;
1855 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1);
1856 target = target / 10 + 1;
1857 done = vlrureclaim(reclaim_nc_src, trigger, target);
1858 mtx_unlock(&vnode_list_mtx);
1859 /*
1860 * Total number of vnodes can transiently go slightly above the
1861 * limit (see vn_alloc_hard), no need to call uma_reclaim if
1862 * this happens.
1863 */
1864 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes &&
1865 numvnodes <= desiredvnodes) {
1866 uma_reclaim_calls++;
1867 uma_reclaim(UMA_RECLAIM_DRAIN);
1868 }
1869 if (done == 0) {
1870 if (force == 0 || force == 1) {
1871 force = 2;
1872 continue;
1873 }
1874 if (force == 2) {
1875 force = 3;
1876 continue;
1877 }
1878 want_reread = true;
1879 force = 0;
1880 vnlru_nowhere++;
1881 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
1882 } else {
1883 want_reread = true;
1884 kern_yield(PRI_USER);
1885 }
1886 }
1887 }
1888
1889 static struct kproc_desc vnlru_kp = {
1890 "vnlru",
1891 vnlru_proc,
1892 &vnlruproc
1893 };
1894 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
1895 &vnlru_kp);
1896
1897 /*
1898 * Routines having to do with the management of the vnode table.
1899 */
1900
1901 /*
1902 * Try to recycle a freed vnode.
1903 */
1904 static int
vtryrecycle(struct vnode * vp,bool isvnlru)1905 vtryrecycle(struct vnode *vp, bool isvnlru)
1906 {
1907 struct mount *vnmp;
1908
1909 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1910 VNPASS(vp->v_holdcnt > 0, vp);
1911 /*
1912 * This vnode may found and locked via some other list, if so we
1913 * can't recycle it yet.
1914 */
1915 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1916 CTR2(KTR_VFS,
1917 "%s: impossible to recycle, vp %p lock is already held",
1918 __func__, vp);
1919 vdrop_recycle(vp);
1920 return (EWOULDBLOCK);
1921 }
1922 /*
1923 * Don't recycle if its filesystem is being suspended.
1924 */
1925 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1926 VOP_UNLOCK(vp);
1927 CTR2(KTR_VFS,
1928 "%s: impossible to recycle, cannot start the write for %p",
1929 __func__, vp);
1930 vdrop_recycle(vp);
1931 return (EBUSY);
1932 }
1933 /*
1934 * If we got this far, we need to acquire the interlock and see if
1935 * anyone picked up this vnode from another list. If not, we will
1936 * mark it with DOOMED via vgonel() so that anyone who does find it
1937 * will skip over it.
1938 */
1939 VI_LOCK(vp);
1940 if (vp->v_usecount) {
1941 VOP_UNLOCK(vp);
1942 vdropl_recycle(vp);
1943 vn_finished_write(vnmp);
1944 CTR2(KTR_VFS,
1945 "%s: impossible to recycle, %p is already referenced",
1946 __func__, vp);
1947 return (EBUSY);
1948 }
1949 if (!VN_IS_DOOMED(vp)) {
1950 if (isvnlru)
1951 recycles_free_count++;
1952 else
1953 counter_u64_add(direct_recycles_free_count, 1);
1954 vgonel(vp);
1955 }
1956 VOP_UNLOCK(vp);
1957 vdropl_recycle(vp);
1958 vn_finished_write(vnmp);
1959 return (0);
1960 }
1961
1962 /*
1963 * Allocate a new vnode.
1964 *
1965 * The operation never returns an error. Returning an error was disabled
1966 * in r145385 (dated 2005) with the following comment:
1967 *
1968 * XXX Not all VFS_VGET/ffs_vget callers check returns.
1969 *
1970 * Given the age of this commit (almost 15 years at the time of writing this
1971 * comment) restoring the ability to fail requires a significant audit of
1972 * all codepaths.
1973 *
1974 * The routine can try to free a vnode or stall for up to 1 second waiting for
1975 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation.
1976 */
1977 static u_long vn_alloc_cyclecount;
1978 static u_long vn_alloc_sleeps;
1979
1980 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0,
1981 "Number of times vnode allocation blocked waiting on vnlru");
1982
1983 static struct vnode * __noinline
vn_alloc_hard(struct mount * mp,u_long rnumvnodes,bool bumped)1984 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped)
1985 {
1986 u_long rfreevnodes;
1987
1988 if (bumped) {
1989 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) {
1990 atomic_subtract_long(&numvnodes, 1);
1991 bumped = false;
1992 }
1993 }
1994
1995 mtx_lock(&vnode_list_mtx);
1996
1997 rfreevnodes = vnlru_read_freevnodes();
1998 if (vn_alloc_cyclecount++ >= rfreevnodes) {
1999 vn_alloc_cyclecount = 0;
2000 vstir = true;
2001 }
2002 /*
2003 * Grow the vnode cache if it will not be above its target max after
2004 * growing. Otherwise, if there is at least one free vnode, try to
2005 * reclaim 1 item from it before growing the cache (possibly above its
2006 * target max if the reclamation failed or is delayed).
2007 */
2008 if (vnlru_free_locked_direct(1) > 0)
2009 goto alloc;
2010 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
2011 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2012 /*
2013 * Wait for space for a new vnode.
2014 */
2015 if (bumped) {
2016 atomic_subtract_long(&numvnodes, 1);
2017 bumped = false;
2018 }
2019 mtx_lock(&vnode_list_mtx);
2020 vnlru_kick_locked();
2021 vn_alloc_sleeps++;
2022 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz);
2023 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes &&
2024 vnlru_read_freevnodes() > 1)
2025 vnlru_free_locked_direct(1);
2026 else
2027 mtx_unlock(&vnode_list_mtx);
2028 }
2029 alloc:
2030 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
2031 if (!bumped)
2032 atomic_add_long(&numvnodes, 1);
2033 vnlru_kick_cond();
2034 return (uma_zalloc_smr(vnode_zone, M_WAITOK));
2035 }
2036
2037 static struct vnode *
vn_alloc(struct mount * mp)2038 vn_alloc(struct mount *mp)
2039 {
2040 u_long rnumvnodes;
2041
2042 if (__predict_false(vn_alloc_cyclecount != 0))
2043 return (vn_alloc_hard(mp, 0, false));
2044 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
2045 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) {
2046 return (vn_alloc_hard(mp, rnumvnodes, true));
2047 }
2048
2049 return (uma_zalloc_smr(vnode_zone, M_WAITOK));
2050 }
2051
2052 static void
vn_free(struct vnode * vp)2053 vn_free(struct vnode *vp)
2054 {
2055
2056 atomic_subtract_long(&numvnodes, 1);
2057 uma_zfree_smr(vnode_zone, vp);
2058 }
2059
2060 /*
2061 * Allocate a new vnode.
2062 */
2063 int
getnewvnode(const char * tag,struct mount * mp,struct vop_vector * vops,struct vnode ** vpp)2064 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
2065 struct vnode **vpp)
2066 {
2067 struct vnode *vp;
2068 struct thread *td;
2069 struct lock_object *lo;
2070
2071 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
2072
2073 KASSERT(vops->registered,
2074 ("%s: not registered vector op %p\n", __func__, vops));
2075 cache_validate_vop_vector(mp, vops);
2076
2077 td = curthread;
2078 if (td->td_vp_reserved != NULL) {
2079 vp = td->td_vp_reserved;
2080 td->td_vp_reserved = NULL;
2081 } else {
2082 vp = vn_alloc(mp);
2083 }
2084 counter_u64_add(vnodes_created, 1);
2085
2086 vn_set_state(vp, VSTATE_UNINITIALIZED);
2087
2088 /*
2089 * Locks are given the generic name "vnode" when created.
2090 * Follow the historic practice of using the filesystem
2091 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc.
2092 *
2093 * Locks live in a witness group keyed on their name. Thus,
2094 * when a lock is renamed, it must also move from the witness
2095 * group of its old name to the witness group of its new name.
2096 *
2097 * The change only needs to be made when the vnode moves
2098 * from one filesystem type to another. We ensure that each
2099 * filesystem use a single static name pointer for its tag so
2100 * that we can compare pointers rather than doing a strcmp().
2101 */
2102 lo = &vp->v_vnlock->lock_object;
2103 #ifdef WITNESS
2104 if (lo->lo_name != tag) {
2105 #endif
2106 lo->lo_name = tag;
2107 #ifdef WITNESS
2108 WITNESS_DESTROY(lo);
2109 WITNESS_INIT(lo, tag);
2110 }
2111 #endif
2112 /*
2113 * By default, don't allow shared locks unless filesystems opt-in.
2114 */
2115 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
2116 /*
2117 * Finalize various vnode identity bits.
2118 */
2119 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
2120 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
2121 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
2122 vp->v_type = VNON;
2123 vp->v_op = vops;
2124 vp->v_irflag = 0;
2125 v_init_counters(vp);
2126 vn_seqc_init(vp);
2127 vp->v_bufobj.bo_ops = &buf_ops_bio;
2128 #ifdef DIAGNOSTIC
2129 if (mp == NULL && vops != &dead_vnodeops)
2130 printf("NULL mp in getnewvnode(9), tag %s\n", tag);
2131 #endif
2132 #ifdef MAC
2133 mac_vnode_init(vp);
2134 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
2135 mac_vnode_associate_singlelabel(mp, vp);
2136 #endif
2137 if (mp != NULL) {
2138 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
2139 }
2140
2141 /*
2142 * For the filesystems which do not use vfs_hash_insert(),
2143 * still initialize v_hash to have vfs_hash_index() useful.
2144 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
2145 * its own hashing.
2146 */
2147 vp->v_hash = (uintptr_t)vp >> vnsz2log;
2148
2149 *vpp = vp;
2150 return (0);
2151 }
2152
2153 void
getnewvnode_reserve(void)2154 getnewvnode_reserve(void)
2155 {
2156 struct thread *td;
2157
2158 td = curthread;
2159 MPASS(td->td_vp_reserved == NULL);
2160 td->td_vp_reserved = vn_alloc(NULL);
2161 }
2162
2163 void
getnewvnode_drop_reserve(void)2164 getnewvnode_drop_reserve(void)
2165 {
2166 struct thread *td;
2167
2168 td = curthread;
2169 if (td->td_vp_reserved != NULL) {
2170 vn_free(td->td_vp_reserved);
2171 td->td_vp_reserved = NULL;
2172 }
2173 }
2174
2175 static void __noinline
freevnode(struct vnode * vp)2176 freevnode(struct vnode *vp)
2177 {
2178 struct bufobj *bo;
2179
2180 /*
2181 * The vnode has been marked for destruction, so free it.
2182 *
2183 * The vnode will be returned to the zone where it will
2184 * normally remain until it is needed for another vnode. We
2185 * need to cleanup (or verify that the cleanup has already
2186 * been done) any residual data left from its current use
2187 * so as not to contaminate the freshly allocated vnode.
2188 */
2189 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2190 /*
2191 * Paired with vgone.
2192 */
2193 vn_seqc_write_end_free(vp);
2194
2195 bo = &vp->v_bufobj;
2196 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2197 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp);
2198 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2199 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2200 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2201 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2202 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2203 ("clean blk trie not empty"));
2204 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2205 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2206 ("dirty blk trie not empty"));
2207 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp,
2208 ("Leaked inactivation"));
2209 VI_UNLOCK(vp);
2210 cache_assert_no_entries(vp);
2211
2212 #ifdef MAC
2213 mac_vnode_destroy(vp);
2214 #endif
2215 if (vp->v_pollinfo != NULL) {
2216 /*
2217 * Use LK_NOWAIT to shut up witness about the lock. We may get
2218 * here while having another vnode locked when trying to
2219 * satisfy a lookup and needing to recycle.
2220 */
2221 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT);
2222 destroy_vpollinfo(vp->v_pollinfo);
2223 VOP_UNLOCK(vp);
2224 vp->v_pollinfo = NULL;
2225 }
2226 vp->v_mountedhere = NULL;
2227 vp->v_unpcb = NULL;
2228 vp->v_rdev = NULL;
2229 vp->v_fifoinfo = NULL;
2230 vp->v_iflag = 0;
2231 vp->v_vflag = 0;
2232 bo->bo_flag = 0;
2233 vn_free(vp);
2234 }
2235
2236 /*
2237 * Delete from old mount point vnode list, if on one.
2238 */
2239 static void
delmntque(struct vnode * vp)2240 delmntque(struct vnode *vp)
2241 {
2242 struct mount *mp;
2243
2244 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
2245
2246 mp = vp->v_mount;
2247 MNT_ILOCK(mp);
2248 VI_LOCK(vp);
2249 vp->v_mount = NULL;
2250 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
2251 ("bad mount point vnode list size"));
2252 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2253 mp->mnt_nvnodelistsize--;
2254 MNT_REL(mp);
2255 MNT_IUNLOCK(mp);
2256 /*
2257 * The caller expects the interlock to be still held.
2258 */
2259 ASSERT_VI_LOCKED(vp, __func__);
2260 }
2261
2262 static int
insmntque1_int(struct vnode * vp,struct mount * mp,bool dtr)2263 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr)
2264 {
2265
2266 KASSERT(vp->v_mount == NULL,
2267 ("insmntque: vnode already on per mount vnode list"));
2268 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
2269 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) {
2270 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
2271 } else {
2272 KASSERT(!dtr,
2273 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup",
2274 __func__));
2275 }
2276
2277 /*
2278 * We acquire the vnode interlock early to ensure that the
2279 * vnode cannot be recycled by another process releasing a
2280 * holdcnt on it before we get it on both the vnode list
2281 * and the active vnode list. The mount mutex protects only
2282 * manipulation of the vnode list and the vnode freelist
2283 * mutex protects only manipulation of the active vnode list.
2284 * Hence the need to hold the vnode interlock throughout.
2285 */
2286 MNT_ILOCK(mp);
2287 VI_LOCK(vp);
2288 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 &&
2289 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
2290 mp->mnt_nvnodelistsize == 0)) &&
2291 (vp->v_vflag & VV_FORCEINSMQ) == 0) {
2292 VI_UNLOCK(vp);
2293 MNT_IUNLOCK(mp);
2294 if (dtr) {
2295 vp->v_data = NULL;
2296 vp->v_op = &dead_vnodeops;
2297 vgone(vp);
2298 vput(vp);
2299 }
2300 return (EBUSY);
2301 }
2302 vp->v_mount = mp;
2303 MNT_REF(mp);
2304 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2305 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
2306 ("neg mount point vnode list size"));
2307 mp->mnt_nvnodelistsize++;
2308 VI_UNLOCK(vp);
2309 MNT_IUNLOCK(mp);
2310 return (0);
2311 }
2312
2313 /*
2314 * Insert into list of vnodes for the new mount point, if available.
2315 * insmntque() reclaims the vnode on insertion failure, insmntque1()
2316 * leaves handling of the vnode to the caller.
2317 */
2318 int
insmntque(struct vnode * vp,struct mount * mp)2319 insmntque(struct vnode *vp, struct mount *mp)
2320 {
2321 return (insmntque1_int(vp, mp, true));
2322 }
2323
2324 int
insmntque1(struct vnode * vp,struct mount * mp)2325 insmntque1(struct vnode *vp, struct mount *mp)
2326 {
2327 return (insmntque1_int(vp, mp, false));
2328 }
2329
2330 /*
2331 * Flush out and invalidate all buffers associated with a bufobj
2332 * Called with the underlying object locked.
2333 */
2334 int
bufobj_invalbuf(struct bufobj * bo,int flags,int slpflag,int slptimeo)2335 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
2336 {
2337 int error;
2338
2339 BO_LOCK(bo);
2340 if (flags & V_SAVE) {
2341 error = bufobj_wwait(bo, slpflag, slptimeo);
2342 if (error) {
2343 BO_UNLOCK(bo);
2344 return (error);
2345 }
2346 if (bo->bo_dirty.bv_cnt > 0) {
2347 BO_UNLOCK(bo);
2348 do {
2349 error = BO_SYNC(bo, MNT_WAIT);
2350 } while (error == ERELOOKUP);
2351 if (error != 0)
2352 return (error);
2353 BO_LOCK(bo);
2354 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
2355 BO_UNLOCK(bo);
2356 return (EBUSY);
2357 }
2358 }
2359 }
2360 /*
2361 * If you alter this loop please notice that interlock is dropped and
2362 * reacquired in flushbuflist. Special care is needed to ensure that
2363 * no race conditions occur from this.
2364 */
2365 do {
2366 error = flushbuflist(&bo->bo_clean,
2367 flags, bo, slpflag, slptimeo);
2368 if (error == 0 && !(flags & V_CLEANONLY))
2369 error = flushbuflist(&bo->bo_dirty,
2370 flags, bo, slpflag, slptimeo);
2371 if (error != 0 && error != EAGAIN) {
2372 BO_UNLOCK(bo);
2373 return (error);
2374 }
2375 } while (error != 0);
2376
2377 /*
2378 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
2379 * have write I/O in-progress but if there is a VM object then the
2380 * VM object can also have read-I/O in-progress.
2381 */
2382 do {
2383 bufobj_wwait(bo, 0, 0);
2384 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) {
2385 BO_UNLOCK(bo);
2386 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx");
2387 BO_LOCK(bo);
2388 }
2389 } while (bo->bo_numoutput > 0);
2390 BO_UNLOCK(bo);
2391
2392 /*
2393 * Destroy the copy in the VM cache, too.
2394 */
2395 if (bo->bo_object != NULL &&
2396 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) {
2397 VM_OBJECT_WLOCK(bo->bo_object);
2398 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
2399 OBJPR_CLEANONLY : 0);
2400 VM_OBJECT_WUNLOCK(bo->bo_object);
2401 }
2402
2403 #ifdef INVARIANTS
2404 BO_LOCK(bo);
2405 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO |
2406 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 ||
2407 bo->bo_clean.bv_cnt > 0))
2408 panic("vinvalbuf: flush failed");
2409 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 &&
2410 bo->bo_dirty.bv_cnt > 0)
2411 panic("vinvalbuf: flush dirty failed");
2412 BO_UNLOCK(bo);
2413 #endif
2414 return (0);
2415 }
2416
2417 /*
2418 * Flush out and invalidate all buffers associated with a vnode.
2419 * Called with the underlying object locked.
2420 */
2421 int
vinvalbuf(struct vnode * vp,int flags,int slpflag,int slptimeo)2422 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
2423 {
2424
2425 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2426 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
2427 if (vp->v_object != NULL && vp->v_object->handle != vp)
2428 return (0);
2429 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
2430 }
2431
2432 /*
2433 * Flush out buffers on the specified list.
2434 *
2435 */
2436 static int
flushbuflist(struct bufv * bufv,int flags,struct bufobj * bo,int slpflag,int slptimeo)2437 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
2438 int slptimeo)
2439 {
2440 struct buf *bp, *nbp;
2441 int retval, error;
2442 daddr_t lblkno;
2443 b_xflags_t xflags;
2444
2445 ASSERT_BO_WLOCKED(bo);
2446
2447 retval = 0;
2448 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
2449 /*
2450 * If we are flushing both V_NORMAL and V_ALT buffers then
2451 * do not skip any buffers. If we are flushing only V_NORMAL
2452 * buffers then skip buffers marked as BX_ALTDATA. If we are
2453 * flushing only V_ALT buffers then skip buffers not marked
2454 * as BX_ALTDATA.
2455 */
2456 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) &&
2457 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) ||
2458 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) {
2459 continue;
2460 }
2461 if (nbp != NULL) {
2462 lblkno = nbp->b_lblkno;
2463 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
2464 }
2465 retval = EAGAIN;
2466 error = BUF_TIMELOCK(bp,
2467 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo),
2468 "flushbuf", slpflag, slptimeo);
2469 if (error) {
2470 BO_LOCK(bo);
2471 return (error != ENOLCK ? error : EAGAIN);
2472 }
2473 KASSERT(bp->b_bufobj == bo,
2474 ("bp %p wrong b_bufobj %p should be %p",
2475 bp, bp->b_bufobj, bo));
2476 /*
2477 * XXX Since there are no node locks for NFS, I
2478 * believe there is a slight chance that a delayed
2479 * write will occur while sleeping just above, so
2480 * check for it.
2481 */
2482 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
2483 (flags & V_SAVE)) {
2484 bremfree(bp);
2485 bp->b_flags |= B_ASYNC;
2486 bwrite(bp);
2487 BO_LOCK(bo);
2488 return (EAGAIN); /* XXX: why not loop ? */
2489 }
2490 bremfree(bp);
2491 bp->b_flags |= (B_INVAL | B_RELBUF);
2492 bp->b_flags &= ~B_ASYNC;
2493 brelse(bp);
2494 BO_LOCK(bo);
2495 if (nbp == NULL)
2496 break;
2497 nbp = gbincore(bo, lblkno);
2498 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2499 != xflags)
2500 break; /* nbp invalid */
2501 }
2502 return (retval);
2503 }
2504
2505 int
bnoreuselist(struct bufv * bufv,struct bufobj * bo,daddr_t startn,daddr_t endn)2506 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn)
2507 {
2508 struct buf *bp;
2509 int error;
2510 daddr_t lblkno;
2511
2512 ASSERT_BO_LOCKED(bo);
2513
2514 for (lblkno = startn;;) {
2515 again:
2516 bp = buf_lookup_ge(bufv, lblkno);
2517 if (bp == NULL || bp->b_lblkno >= endn)
2518 break;
2519 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
2520 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0);
2521 if (error != 0) {
2522 BO_RLOCK(bo);
2523 if (error == ENOLCK)
2524 goto again;
2525 return (error);
2526 }
2527 KASSERT(bp->b_bufobj == bo,
2528 ("bp %p wrong b_bufobj %p should be %p",
2529 bp, bp->b_bufobj, bo));
2530 lblkno = bp->b_lblkno + 1;
2531 if ((bp->b_flags & B_MANAGED) == 0)
2532 bremfree(bp);
2533 bp->b_flags |= B_RELBUF;
2534 /*
2535 * In the VMIO case, use the B_NOREUSE flag to hint that the
2536 * pages backing each buffer in the range are unlikely to be
2537 * reused. Dirty buffers will have the hint applied once
2538 * they've been written.
2539 */
2540 if ((bp->b_flags & B_VMIO) != 0)
2541 bp->b_flags |= B_NOREUSE;
2542 brelse(bp);
2543 BO_RLOCK(bo);
2544 }
2545 return (0);
2546 }
2547
2548 /*
2549 * Truncate a file's buffer and pages to a specified length. This
2550 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
2551 * sync activity.
2552 */
2553 int
vtruncbuf(struct vnode * vp,off_t length,int blksize)2554 vtruncbuf(struct vnode *vp, off_t length, int blksize)
2555 {
2556 struct buf *bp, *nbp;
2557 struct bufobj *bo;
2558 daddr_t startlbn;
2559
2560 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__,
2561 vp, blksize, (uintmax_t)length);
2562
2563 /*
2564 * Round up to the *next* lbn.
2565 */
2566 startlbn = howmany(length, blksize);
2567
2568 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
2569
2570 bo = &vp->v_bufobj;
2571 restart_unlocked:
2572 BO_LOCK(bo);
2573
2574 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN)
2575 ;
2576
2577 if (length > 0) {
2578 /*
2579 * Write out vnode metadata, e.g. indirect blocks.
2580 */
2581 restartsync:
2582 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2583 if (bp->b_lblkno >= 0)
2584 continue;
2585 /*
2586 * Since we hold the vnode lock this should only
2587 * fail if we're racing with the buf daemon.
2588 */
2589 if (BUF_LOCK(bp,
2590 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2591 BO_LOCKPTR(bo)) == ENOLCK)
2592 goto restart_unlocked;
2593
2594 VNASSERT((bp->b_flags & B_DELWRI), vp,
2595 ("buf(%p) on dirty queue without DELWRI", bp));
2596
2597 bremfree(bp);
2598 bawrite(bp);
2599 BO_LOCK(bo);
2600 goto restartsync;
2601 }
2602 }
2603
2604 bufobj_wwait(bo, 0, 0);
2605 BO_UNLOCK(bo);
2606 vnode_pager_setsize(vp, length);
2607
2608 return (0);
2609 }
2610
2611 /*
2612 * Invalidate the cached pages of a file's buffer within the range of block
2613 * numbers [startlbn, endlbn).
2614 */
2615 void
v_inval_buf_range(struct vnode * vp,daddr_t startlbn,daddr_t endlbn,int blksize)2616 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
2617 int blksize)
2618 {
2619 struct bufobj *bo;
2620 off_t start, end;
2621
2622 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range");
2623
2624 start = blksize * startlbn;
2625 end = blksize * endlbn;
2626
2627 bo = &vp->v_bufobj;
2628 BO_LOCK(bo);
2629 MPASS(blksize == bo->bo_bsize);
2630
2631 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN)
2632 ;
2633
2634 BO_UNLOCK(bo);
2635 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1));
2636 }
2637
2638 static int
v_inval_buf_range_locked(struct vnode * vp,struct bufobj * bo,daddr_t startlbn,daddr_t endlbn)2639 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
2640 daddr_t startlbn, daddr_t endlbn)
2641 {
2642 struct bufv *bv;
2643 struct buf *bp, *nbp;
2644 uint8_t anyfreed;
2645 bool clean;
2646
2647 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
2648 ASSERT_BO_LOCKED(bo);
2649
2650 anyfreed = 1;
2651 clean = true;
2652 do {
2653 bv = clean ? &bo->bo_clean : &bo->bo_dirty;
2654 bp = buf_lookup_ge(bv, startlbn);
2655 if (bp == NULL)
2656 continue;
2657 TAILQ_FOREACH_FROM_SAFE(bp, &bv->bv_hd, b_bobufs, nbp) {
2658 if (bp->b_lblkno >= endlbn)
2659 break;
2660 if (BUF_LOCK(bp,
2661 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2662 BO_LOCKPTR(bo)) == ENOLCK) {
2663 BO_LOCK(bo);
2664 return (EAGAIN);
2665 }
2666
2667 bremfree(bp);
2668 bp->b_flags |= B_INVAL | B_RELBUF;
2669 bp->b_flags &= ~B_ASYNC;
2670 brelse(bp);
2671 anyfreed = 2;
2672
2673 BO_LOCK(bo);
2674 if (nbp != NULL &&
2675 (((nbp->b_xflags &
2676 (clean ? BX_VNCLEAN : BX_VNDIRTY)) == 0) ||
2677 nbp->b_vp != vp ||
2678 (nbp->b_flags & B_DELWRI) == (clean? B_DELWRI: 0)))
2679 return (EAGAIN);
2680 }
2681 } while (clean = !clean, anyfreed-- > 0);
2682 return (0);
2683 }
2684
2685 static void
buf_vlist_remove(struct buf * bp)2686 buf_vlist_remove(struct buf *bp)
2687 {
2688 struct bufv *bv;
2689 b_xflags_t flags;
2690
2691 flags = bp->b_xflags;
2692
2693 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2694 ASSERT_BO_WLOCKED(bp->b_bufobj);
2695 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 &&
2696 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN),
2697 ("%s: buffer %p has invalid queue state", __func__, bp));
2698
2699 if ((flags & BX_VNDIRTY) != 0)
2700 bv = &bp->b_bufobj->bo_dirty;
2701 else
2702 bv = &bp->b_bufobj->bo_clean;
2703 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno);
2704 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
2705 bv->bv_cnt--;
2706 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
2707 }
2708
2709 /*
2710 * Add the buffer to the sorted clean or dirty block list. Return zero on
2711 * success, EEXIST if a buffer with this identity already exists, or another
2712 * error on allocation failure.
2713 */
2714 static inline int
buf_vlist_find_or_add(struct buf * bp,struct bufobj * bo,b_xflags_t xflags)2715 buf_vlist_find_or_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
2716 {
2717 struct bufv *bv;
2718 struct buf *n;
2719 int error;
2720
2721 ASSERT_BO_WLOCKED(bo);
2722 KASSERT((bo->bo_flag & BO_NOBUFS) == 0,
2723 ("buf_vlist_add: bo %p does not allow bufs", bo));
2724 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0,
2725 ("dead bo %p", bo));
2726 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags,
2727 ("buf_vlist_add: b_xflags %#x not set on bp %p", xflags, bp));
2728
2729 if (xflags & BX_VNDIRTY)
2730 bv = &bo->bo_dirty;
2731 else
2732 bv = &bo->bo_clean;
2733
2734 error = buf_insert_lookup_le(bv, bp, &n);
2735 if (n == NULL) {
2736 KASSERT(error != EEXIST,
2737 ("buf_vlist_add: EEXIST but no existing buf found: bp %p",
2738 bp));
2739 } else {
2740 KASSERT(n->b_lblkno <= bp->b_lblkno,
2741 ("buf_vlist_add: out of order insert/lookup: bp %p n %p",
2742 bp, n));
2743 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST),
2744 ("buf_vlist_add: inconsistent result for existing buf: "
2745 "error %d bp %p n %p", error, bp, n));
2746 }
2747 if (error != 0)
2748 return (error);
2749
2750 /* Keep the list ordered. */
2751 if (n == NULL) {
2752 KASSERT(TAILQ_EMPTY(&bv->bv_hd) ||
2753 bp->b_lblkno < TAILQ_FIRST(&bv->bv_hd)->b_lblkno,
2754 ("buf_vlist_add: queue order: "
2755 "%p should be before first %p",
2756 bp, TAILQ_FIRST(&bv->bv_hd)));
2757 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs);
2758 } else {
2759 KASSERT(TAILQ_NEXT(n, b_bobufs) == NULL ||
2760 bp->b_lblkno < TAILQ_NEXT(n, b_bobufs)->b_lblkno,
2761 ("buf_vlist_add: queue order: "
2762 "%p should be before next %p",
2763 bp, TAILQ_NEXT(n, b_bobufs)));
2764 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs);
2765 }
2766
2767 bv->bv_cnt++;
2768 return (0);
2769 }
2770
2771 /*
2772 * Add the buffer to the sorted clean or dirty block list.
2773 *
2774 * NOTE: xflags is passed as a constant, optimizing this inline function!
2775 */
2776 static void
buf_vlist_add(struct buf * bp,struct bufobj * bo,b_xflags_t xflags)2777 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
2778 {
2779 int error;
2780
2781 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
2782 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
2783 bp->b_xflags |= xflags;
2784 error = buf_vlist_find_or_add(bp, bo, xflags);
2785 if (error)
2786 panic("buf_vlist_add: error=%d", error);
2787 }
2788
2789 /*
2790 * Look up a buffer using the buffer tries.
2791 */
2792 struct buf *
gbincore(struct bufobj * bo,daddr_t lblkno)2793 gbincore(struct bufobj *bo, daddr_t lblkno)
2794 {
2795 struct buf *bp;
2796
2797 ASSERT_BO_LOCKED(bo);
2798 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno);
2799 if (bp != NULL)
2800 return (bp);
2801 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno));
2802 }
2803
2804 /*
2805 * Look up a buf using the buffer tries, without the bufobj lock. This relies
2806 * on SMR for safe lookup, and bufs being in a no-free zone to provide type
2807 * stability of the result. Like other lockless lookups, the found buf may
2808 * already be invalid by the time this function returns.
2809 */
2810 struct buf *
gbincore_unlocked(struct bufobj * bo,daddr_t lblkno)2811 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno)
2812 {
2813 struct buf *bp;
2814
2815 ASSERT_BO_UNLOCKED(bo);
2816 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno);
2817 if (bp != NULL)
2818 return (bp);
2819 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno));
2820 }
2821
2822 /*
2823 * Associate a buffer with a vnode.
2824 */
2825 int
bgetvp(struct vnode * vp,struct buf * bp)2826 bgetvp(struct vnode *vp, struct buf *bp)
2827 {
2828 struct bufobj *bo;
2829 int error;
2830
2831 bo = &vp->v_bufobj;
2832 ASSERT_BO_UNLOCKED(bo);
2833 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
2834
2835 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2836 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2837 ("bgetvp: bp already attached! %p", bp));
2838
2839 /*
2840 * Add the buf to the vnode's clean list unless we lost a race and find
2841 * an existing buf in either dirty or clean.
2842 */
2843 bp->b_vp = vp;
2844 bp->b_bufobj = bo;
2845 bp->b_xflags |= BX_VNCLEAN;
2846 error = EEXIST;
2847 BO_LOCK(bo);
2848 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL)
2849 error = buf_vlist_find_or_add(bp, bo, BX_VNCLEAN);
2850 BO_UNLOCK(bo);
2851 if (__predict_true(error == 0)) {
2852 vhold(vp);
2853 return (0);
2854 }
2855 if (error != EEXIST)
2856 panic("bgetvp: buf_vlist_add error: %d", error);
2857 bp->b_vp = NULL;
2858 bp->b_bufobj = NULL;
2859 bp->b_xflags &= ~BX_VNCLEAN;
2860 return (error);
2861 }
2862
2863 /*
2864 * Disassociate a buffer from a vnode.
2865 */
2866 void
brelvp(struct buf * bp)2867 brelvp(struct buf *bp)
2868 {
2869 struct bufobj *bo;
2870 struct vnode *vp;
2871
2872 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2873 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
2874
2875 /*
2876 * Delete from old vnode list, if on one.
2877 */
2878 vp = bp->b_vp; /* XXX */
2879 bo = bp->b_bufobj;
2880 BO_LOCK(bo);
2881 buf_vlist_remove(bp);
2882 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2883 bo->bo_flag &= ~BO_ONWORKLST;
2884 mtx_lock(&sync_mtx);
2885 LIST_REMOVE(bo, bo_synclist);
2886 syncer_worklist_len--;
2887 mtx_unlock(&sync_mtx);
2888 }
2889 bp->b_vp = NULL;
2890 bp->b_bufobj = NULL;
2891 BO_UNLOCK(bo);
2892 vdrop(vp);
2893 }
2894
2895 /*
2896 * Add an item to the syncer work queue.
2897 */
2898 static void
vn_syncer_add_to_worklist(struct bufobj * bo,int delay)2899 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
2900 {
2901 int slot;
2902
2903 ASSERT_BO_WLOCKED(bo);
2904
2905 mtx_lock(&sync_mtx);
2906 if (bo->bo_flag & BO_ONWORKLST)
2907 LIST_REMOVE(bo, bo_synclist);
2908 else {
2909 bo->bo_flag |= BO_ONWORKLST;
2910 syncer_worklist_len++;
2911 }
2912
2913 if (delay > syncer_maxdelay - 2)
2914 delay = syncer_maxdelay - 2;
2915 slot = (syncer_delayno + delay) & syncer_mask;
2916
2917 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
2918 mtx_unlock(&sync_mtx);
2919 }
2920
2921 static int
sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)2922 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
2923 {
2924 int error, len;
2925
2926 mtx_lock(&sync_mtx);
2927 len = syncer_worklist_len - sync_vnode_count;
2928 mtx_unlock(&sync_mtx);
2929 error = SYSCTL_OUT(req, &len, sizeof(len));
2930 return (error);
2931 }
2932
2933 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len,
2934 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0,
2935 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
2936
2937 static struct proc *updateproc;
2938 static void sched_sync(void);
2939 static struct kproc_desc up_kp = {
2940 "syncer",
2941 sched_sync,
2942 &updateproc
2943 };
2944 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
2945
2946 static int
sync_vnode(struct synclist * slp,struct bufobj ** bo,struct thread * td)2947 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
2948 {
2949 struct vnode *vp;
2950 struct mount *mp;
2951
2952 *bo = LIST_FIRST(slp);
2953 if (*bo == NULL)
2954 return (0);
2955 vp = bo2vnode(*bo);
2956 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2957 return (1);
2958 /*
2959 * We use vhold in case the vnode does not
2960 * successfully sync. vhold prevents the vnode from
2961 * going away when we unlock the sync_mtx so that
2962 * we can acquire the vnode interlock.
2963 */
2964 vholdl(vp);
2965 mtx_unlock(&sync_mtx);
2966 VI_UNLOCK(vp);
2967 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2968 vdrop(vp);
2969 mtx_lock(&sync_mtx);
2970 return (*bo == LIST_FIRST(slp));
2971 }
2972 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 ||
2973 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp,
2974 ("suspended mp syncing vp %p", vp));
2975 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2976 (void) VOP_FSYNC(vp, MNT_LAZY, td);
2977 VOP_UNLOCK(vp);
2978 vn_finished_write(mp);
2979 BO_LOCK(*bo);
2980 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
2981 /*
2982 * Put us back on the worklist. The worklist
2983 * routine will remove us from our current
2984 * position and then add us back in at a later
2985 * position.
2986 */
2987 vn_syncer_add_to_worklist(*bo, syncdelay);
2988 }
2989 BO_UNLOCK(*bo);
2990 vdrop(vp);
2991 mtx_lock(&sync_mtx);
2992 return (0);
2993 }
2994
2995 static int first_printf = 1;
2996
2997 /*
2998 * System filesystem synchronizer daemon.
2999 */
3000 static void
sched_sync(void)3001 sched_sync(void)
3002 {
3003 struct synclist *next, *slp;
3004 struct bufobj *bo;
3005 long starttime;
3006 struct thread *td = curthread;
3007 int last_work_seen;
3008 int net_worklist_len;
3009 int syncer_final_iter;
3010 int error;
3011
3012 last_work_seen = 0;
3013 syncer_final_iter = 0;
3014 syncer_state = SYNCER_RUNNING;
3015 starttime = time_uptime;
3016 td->td_pflags |= TDP_NORUNNINGBUF;
3017
3018 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
3019 SHUTDOWN_PRI_LAST);
3020
3021 mtx_lock(&sync_mtx);
3022 for (;;) {
3023 if (syncer_state == SYNCER_FINAL_DELAY &&
3024 syncer_final_iter == 0) {
3025 mtx_unlock(&sync_mtx);
3026 kproc_suspend_check(td->td_proc);
3027 mtx_lock(&sync_mtx);
3028 }
3029 net_worklist_len = syncer_worklist_len - sync_vnode_count;
3030 if (syncer_state != SYNCER_RUNNING &&
3031 starttime != time_uptime) {
3032 if (first_printf) {
3033 printf("\nSyncing disks, vnodes remaining... ");
3034 first_printf = 0;
3035 }
3036 printf("%d ", net_worklist_len);
3037 }
3038 starttime = time_uptime;
3039
3040 /*
3041 * Push files whose dirty time has expired. Be careful
3042 * of interrupt race on slp queue.
3043 *
3044 * Skip over empty worklist slots when shutting down.
3045 */
3046 do {
3047 slp = &syncer_workitem_pending[syncer_delayno];
3048 syncer_delayno += 1;
3049 if (syncer_delayno == syncer_maxdelay)
3050 syncer_delayno = 0;
3051 next = &syncer_workitem_pending[syncer_delayno];
3052 /*
3053 * If the worklist has wrapped since the
3054 * it was emptied of all but syncer vnodes,
3055 * switch to the FINAL_DELAY state and run
3056 * for one more second.
3057 */
3058 if (syncer_state == SYNCER_SHUTTING_DOWN &&
3059 net_worklist_len == 0 &&
3060 last_work_seen == syncer_delayno) {
3061 syncer_state = SYNCER_FINAL_DELAY;
3062 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
3063 }
3064 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
3065 syncer_worklist_len > 0);
3066
3067 /*
3068 * Keep track of the last time there was anything
3069 * on the worklist other than syncer vnodes.
3070 * Return to the SHUTTING_DOWN state if any
3071 * new work appears.
3072 */
3073 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
3074 last_work_seen = syncer_delayno;
3075 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
3076 syncer_state = SYNCER_SHUTTING_DOWN;
3077 while (!LIST_EMPTY(slp)) {
3078 error = sync_vnode(slp, &bo, td);
3079 if (error == 1) {
3080 LIST_REMOVE(bo, bo_synclist);
3081 LIST_INSERT_HEAD(next, bo, bo_synclist);
3082 continue;
3083 }
3084
3085 if (first_printf == 0) {
3086 /*
3087 * Drop the sync mutex, because some watchdog
3088 * drivers need to sleep while patting
3089 */
3090 mtx_unlock(&sync_mtx);
3091 wdog_kern_pat(WD_LASTVAL);
3092 mtx_lock(&sync_mtx);
3093 }
3094 }
3095 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
3096 syncer_final_iter--;
3097 /*
3098 * The variable rushjob allows the kernel to speed up the
3099 * processing of the filesystem syncer process. A rushjob
3100 * value of N tells the filesystem syncer to process the next
3101 * N seconds worth of work on its queue ASAP. Currently rushjob
3102 * is used by the soft update code to speed up the filesystem
3103 * syncer process when the incore state is getting so far
3104 * ahead of the disk that the kernel memory pool is being
3105 * threatened with exhaustion.
3106 */
3107 if (rushjob > 0) {
3108 rushjob -= 1;
3109 continue;
3110 }
3111 /*
3112 * Just sleep for a short period of time between
3113 * iterations when shutting down to allow some I/O
3114 * to happen.
3115 *
3116 * If it has taken us less than a second to process the
3117 * current work, then wait. Otherwise start right over
3118 * again. We can still lose time if any single round
3119 * takes more than two seconds, but it does not really
3120 * matter as we are just trying to generally pace the
3121 * filesystem activity.
3122 */
3123 if (syncer_state != SYNCER_RUNNING ||
3124 time_uptime == starttime) {
3125 thread_lock(td);
3126 sched_prio(td, PPAUSE);
3127 thread_unlock(td);
3128 }
3129 if (syncer_state != SYNCER_RUNNING)
3130 cv_timedwait(&sync_wakeup, &sync_mtx,
3131 hz / SYNCER_SHUTDOWN_SPEEDUP);
3132 else if (time_uptime == starttime)
3133 cv_timedwait(&sync_wakeup, &sync_mtx, hz);
3134 }
3135 }
3136
3137 /*
3138 * Request the syncer daemon to speed up its work.
3139 * We never push it to speed up more than half of its
3140 * normal turn time, otherwise it could take over the cpu.
3141 */
3142 int
speedup_syncer(void)3143 speedup_syncer(void)
3144 {
3145 int ret = 0;
3146
3147 mtx_lock(&sync_mtx);
3148 if (rushjob < syncdelay / 2) {
3149 rushjob += 1;
3150 stat_rush_requests += 1;
3151 ret = 1;
3152 }
3153 mtx_unlock(&sync_mtx);
3154 cv_broadcast(&sync_wakeup);
3155 return (ret);
3156 }
3157
3158 /*
3159 * Tell the syncer to speed up its work and run though its work
3160 * list several times, then tell it to shut down.
3161 */
3162 static void
syncer_shutdown(void * arg,int howto)3163 syncer_shutdown(void *arg, int howto)
3164 {
3165
3166 if (howto & RB_NOSYNC)
3167 return;
3168 mtx_lock(&sync_mtx);
3169 syncer_state = SYNCER_SHUTTING_DOWN;
3170 rushjob = 0;
3171 mtx_unlock(&sync_mtx);
3172 cv_broadcast(&sync_wakeup);
3173 kproc_shutdown(arg, howto);
3174 }
3175
3176 void
syncer_suspend(void)3177 syncer_suspend(void)
3178 {
3179
3180 syncer_shutdown(updateproc, 0);
3181 }
3182
3183 void
syncer_resume(void)3184 syncer_resume(void)
3185 {
3186
3187 mtx_lock(&sync_mtx);
3188 first_printf = 1;
3189 syncer_state = SYNCER_RUNNING;
3190 mtx_unlock(&sync_mtx);
3191 cv_broadcast(&sync_wakeup);
3192 kproc_resume(updateproc);
3193 }
3194
3195 /*
3196 * Move the buffer between the clean and dirty lists of its vnode.
3197 */
3198 void
reassignbuf(struct buf * bp)3199 reassignbuf(struct buf *bp)
3200 {
3201 struct vnode *vp;
3202 struct bufobj *bo;
3203 int delay;
3204 #ifdef INVARIANTS
3205 struct bufv *bv;
3206 #endif
3207
3208 vp = bp->b_vp;
3209 bo = bp->b_bufobj;
3210
3211 KASSERT((bp->b_flags & B_PAGING) == 0,
3212 ("%s: cannot reassign paging buffer %p", __func__, bp));
3213
3214 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
3215 bp, bp->b_vp, bp->b_flags);
3216
3217 BO_LOCK(bo);
3218 if ((bo->bo_flag & BO_NONSTERILE) == 0) {
3219 /*
3220 * Coordinate with getblk's unlocked lookup. Make
3221 * BO_NONSTERILE visible before the first reassignbuf produces
3222 * any side effect. This could be outside the bo lock if we
3223 * used a separate atomic flag field.
3224 */
3225 bo->bo_flag |= BO_NONSTERILE;
3226 atomic_thread_fence_rel();
3227 }
3228 buf_vlist_remove(bp);
3229
3230 /*
3231 * If dirty, put on list of dirty buffers; otherwise insert onto list
3232 * of clean buffers.
3233 */
3234 if (bp->b_flags & B_DELWRI) {
3235 if ((bo->bo_flag & BO_ONWORKLST) == 0) {
3236 switch (vp->v_type) {
3237 case VDIR:
3238 delay = dirdelay;
3239 break;
3240 case VCHR:
3241 delay = metadelay;
3242 break;
3243 default:
3244 delay = filedelay;
3245 }
3246 vn_syncer_add_to_worklist(bo, delay);
3247 }
3248 buf_vlist_add(bp, bo, BX_VNDIRTY);
3249 } else {
3250 buf_vlist_add(bp, bo, BX_VNCLEAN);
3251
3252 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
3253 mtx_lock(&sync_mtx);
3254 LIST_REMOVE(bo, bo_synclist);
3255 syncer_worklist_len--;
3256 mtx_unlock(&sync_mtx);
3257 bo->bo_flag &= ~BO_ONWORKLST;
3258 }
3259 }
3260 #ifdef INVARIANTS
3261 bv = &bo->bo_clean;
3262 bp = TAILQ_FIRST(&bv->bv_hd);
3263 KASSERT(bp == NULL || bp->b_bufobj == bo,
3264 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3265 bp = TAILQ_LAST(&bv->bv_hd, buflists);
3266 KASSERT(bp == NULL || bp->b_bufobj == bo,
3267 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3268 bv = &bo->bo_dirty;
3269 bp = TAILQ_FIRST(&bv->bv_hd);
3270 KASSERT(bp == NULL || bp->b_bufobj == bo,
3271 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3272 bp = TAILQ_LAST(&bv->bv_hd, buflists);
3273 KASSERT(bp == NULL || bp->b_bufobj == bo,
3274 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3275 #endif
3276 BO_UNLOCK(bo);
3277 }
3278
3279 static void
v_init_counters(struct vnode * vp)3280 v_init_counters(struct vnode *vp)
3281 {
3282
3283 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
3284 vp, ("%s called for an initialized vnode", __FUNCTION__));
3285 ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
3286
3287 refcount_init(&vp->v_holdcnt, 1);
3288 refcount_init(&vp->v_usecount, 1);
3289 }
3290
3291 /*
3292 * Get a usecount on a vnode.
3293 *
3294 * vget and vget_finish may fail to lock the vnode if they lose a race against
3295 * it being doomed. LK_RETRY can be passed in flags to lock it anyway.
3296 *
3297 * Consumers which don't guarantee liveness of the vnode can use SMR to
3298 * try to get a reference. Note this operation can fail since the vnode
3299 * may be awaiting getting freed by the time they get to it.
3300 */
3301 enum vgetstate
vget_prep_smr(struct vnode * vp)3302 vget_prep_smr(struct vnode *vp)
3303 {
3304 enum vgetstate vs;
3305
3306 VFS_SMR_ASSERT_ENTERED();
3307
3308 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3309 vs = VGET_USECOUNT;
3310 } else {
3311 if (vhold_smr(vp))
3312 vs = VGET_HOLDCNT;
3313 else
3314 vs = VGET_NONE;
3315 }
3316 return (vs);
3317 }
3318
3319 enum vgetstate
vget_prep(struct vnode * vp)3320 vget_prep(struct vnode *vp)
3321 {
3322 enum vgetstate vs;
3323
3324 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3325 vs = VGET_USECOUNT;
3326 } else {
3327 vhold(vp);
3328 vs = VGET_HOLDCNT;
3329 }
3330 return (vs);
3331 }
3332
3333 void
vget_abort(struct vnode * vp,enum vgetstate vs)3334 vget_abort(struct vnode *vp, enum vgetstate vs)
3335 {
3336
3337 switch (vs) {
3338 case VGET_USECOUNT:
3339 vrele(vp);
3340 break;
3341 case VGET_HOLDCNT:
3342 vdrop(vp);
3343 break;
3344 default:
3345 __assert_unreachable();
3346 }
3347 }
3348
3349 int
vget(struct vnode * vp,int flags)3350 vget(struct vnode *vp, int flags)
3351 {
3352 enum vgetstate vs;
3353
3354 vs = vget_prep(vp);
3355 return (vget_finish(vp, flags, vs));
3356 }
3357
3358 int
vget_finish(struct vnode * vp,int flags,enum vgetstate vs)3359 vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
3360 {
3361 int error;
3362
3363 if ((flags & LK_INTERLOCK) != 0)
3364 ASSERT_VI_LOCKED(vp, __func__);
3365 else
3366 ASSERT_VI_UNLOCKED(vp, __func__);
3367 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3368 VNPASS(vp->v_holdcnt > 0, vp);
3369 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3370
3371 error = vn_lock(vp, flags);
3372 if (__predict_false(error != 0)) {
3373 vget_abort(vp, vs);
3374 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
3375 vp);
3376 return (error);
3377 }
3378
3379 vget_finish_ref(vp, vs);
3380 return (0);
3381 }
3382
3383 void
vget_finish_ref(struct vnode * vp,enum vgetstate vs)3384 vget_finish_ref(struct vnode *vp, enum vgetstate vs)
3385 {
3386 int old;
3387
3388 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3389 VNPASS(vp->v_holdcnt > 0, vp);
3390 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3391
3392 if (vs == VGET_USECOUNT)
3393 return;
3394
3395 /*
3396 * We hold the vnode. If the usecount is 0 it will be utilized to keep
3397 * the vnode around. Otherwise someone else lended their hold count and
3398 * we have to drop ours.
3399 */
3400 old = atomic_fetchadd_int(&vp->v_usecount, 1);
3401 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old));
3402 if (old != 0) {
3403 #ifdef INVARIANTS
3404 old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
3405 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old));
3406 #else
3407 refcount_release(&vp->v_holdcnt);
3408 #endif
3409 }
3410 }
3411
3412 void
vref(struct vnode * vp)3413 vref(struct vnode *vp)
3414 {
3415 enum vgetstate vs;
3416
3417 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3418 vs = vget_prep(vp);
3419 vget_finish_ref(vp, vs);
3420 }
3421
3422 void
vrefact(struct vnode * vp)3423 vrefact(struct vnode *vp)
3424 {
3425 int old __diagused;
3426
3427 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3428 old = refcount_acquire(&vp->v_usecount);
3429 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old));
3430 }
3431
3432 void
vlazy(struct vnode * vp)3433 vlazy(struct vnode *vp)
3434 {
3435 struct mount *mp;
3436
3437 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__));
3438
3439 if ((vp->v_mflag & VMP_LAZYLIST) != 0)
3440 return;
3441 /*
3442 * We may get here for inactive routines after the vnode got doomed.
3443 */
3444 if (VN_IS_DOOMED(vp))
3445 return;
3446 mp = vp->v_mount;
3447 mtx_lock(&mp->mnt_listmtx);
3448 if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
3449 vp->v_mflag |= VMP_LAZYLIST;
3450 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3451 mp->mnt_lazyvnodelistsize++;
3452 }
3453 mtx_unlock(&mp->mnt_listmtx);
3454 }
3455
3456 static void
vunlazy(struct vnode * vp)3457 vunlazy(struct vnode *vp)
3458 {
3459 struct mount *mp;
3460
3461 ASSERT_VI_LOCKED(vp, __func__);
3462 VNPASS(!VN_IS_DOOMED(vp), vp);
3463
3464 mp = vp->v_mount;
3465 mtx_lock(&mp->mnt_listmtx);
3466 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3467 /*
3468 * Don't remove the vnode from the lazy list if another thread
3469 * has increased the hold count. It may have re-enqueued the
3470 * vnode to the lazy list and is now responsible for its
3471 * removal.
3472 */
3473 if (vp->v_holdcnt == 0) {
3474 vp->v_mflag &= ~VMP_LAZYLIST;
3475 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3476 mp->mnt_lazyvnodelistsize--;
3477 }
3478 mtx_unlock(&mp->mnt_listmtx);
3479 }
3480
3481 /*
3482 * This routine is only meant to be called from vgonel prior to dooming
3483 * the vnode.
3484 */
3485 static void
vunlazy_gone(struct vnode * vp)3486 vunlazy_gone(struct vnode *vp)
3487 {
3488 struct mount *mp;
3489
3490 ASSERT_VOP_ELOCKED(vp, __func__);
3491 ASSERT_VI_LOCKED(vp, __func__);
3492 VNPASS(!VN_IS_DOOMED(vp), vp);
3493
3494 if (vp->v_mflag & VMP_LAZYLIST) {
3495 mp = vp->v_mount;
3496 mtx_lock(&mp->mnt_listmtx);
3497 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3498 vp->v_mflag &= ~VMP_LAZYLIST;
3499 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3500 mp->mnt_lazyvnodelistsize--;
3501 mtx_unlock(&mp->mnt_listmtx);
3502 }
3503 }
3504
3505 static void
vdefer_inactive(struct vnode * vp)3506 vdefer_inactive(struct vnode *vp)
3507 {
3508
3509 ASSERT_VI_LOCKED(vp, __func__);
3510 VNPASS(vp->v_holdcnt > 0, vp);
3511 if (VN_IS_DOOMED(vp)) {
3512 vdropl(vp);
3513 return;
3514 }
3515 if (vp->v_iflag & VI_DEFINACT) {
3516 VNPASS(vp->v_holdcnt > 1, vp);
3517 vdropl(vp);
3518 return;
3519 }
3520 if (vp->v_usecount > 0) {
3521 vp->v_iflag &= ~VI_OWEINACT;
3522 vdropl(vp);
3523 return;
3524 }
3525 vlazy(vp);
3526 vp->v_iflag |= VI_DEFINACT;
3527 VI_UNLOCK(vp);
3528 atomic_add_long(&deferred_inact, 1);
3529 }
3530
3531 static void
vdefer_inactive_unlocked(struct vnode * vp)3532 vdefer_inactive_unlocked(struct vnode *vp)
3533 {
3534
3535 VI_LOCK(vp);
3536 if ((vp->v_iflag & VI_OWEINACT) == 0) {
3537 vdropl(vp);
3538 return;
3539 }
3540 vdefer_inactive(vp);
3541 }
3542
3543 enum vput_op { VRELE, VPUT, VUNREF };
3544
3545 /*
3546 * Handle ->v_usecount transitioning to 0.
3547 *
3548 * By releasing the last usecount we take ownership of the hold count which
3549 * provides liveness of the vnode, meaning we have to vdrop.
3550 *
3551 * For all vnodes we may need to perform inactive processing. It requires an
3552 * exclusive lock on the vnode, while it is legal to call here with only a
3553 * shared lock (or no locks). If locking the vnode in an expected manner fails,
3554 * inactive processing gets deferred to the syncer.
3555 *
3556 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend
3557 * on the lock being held all the way until VOP_INACTIVE. This in particular
3558 * happens with UFS which adds half-constructed vnodes to the hash, where they
3559 * can be found by other code.
3560 */
3561 static void
vput_final(struct vnode * vp,enum vput_op func)3562 vput_final(struct vnode *vp, enum vput_op func)
3563 {
3564 int error;
3565 bool want_unlock;
3566
3567 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3568 VNPASS(vp->v_holdcnt > 0, vp);
3569
3570 VI_LOCK(vp);
3571
3572 /*
3573 * By the time we got here someone else might have transitioned
3574 * the count back to > 0.
3575 */
3576 if (vp->v_usecount > 0)
3577 goto out;
3578
3579 /*
3580 * If the vnode is doomed vgone already performed inactive processing
3581 * (if needed).
3582 */
3583 if (VN_IS_DOOMED(vp))
3584 goto out;
3585
3586 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0))
3587 goto out;
3588
3589 if (vp->v_iflag & VI_DOINGINACT)
3590 goto out;
3591
3592 /*
3593 * Locking operations here will drop the interlock and possibly the
3594 * vnode lock, opening a window where the vnode can get doomed all the
3595 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to
3596 * perform inactive.
3597 */
3598 vp->v_iflag |= VI_OWEINACT;
3599 want_unlock = false;
3600 error = 0;
3601 switch (func) {
3602 case VRELE:
3603 switch (VOP_ISLOCKED(vp)) {
3604 case LK_EXCLUSIVE:
3605 break;
3606 case LK_EXCLOTHER:
3607 case 0:
3608 want_unlock = true;
3609 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
3610 VI_LOCK(vp);
3611 break;
3612 default:
3613 /*
3614 * The lock has at least one sharer, but we have no way
3615 * to conclude whether this is us. Play it safe and
3616 * defer processing.
3617 */
3618 error = EAGAIN;
3619 break;
3620 }
3621 break;
3622 case VPUT:
3623 want_unlock = true;
3624 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3625 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
3626 LK_NOWAIT);
3627 VI_LOCK(vp);
3628 }
3629 break;
3630 case VUNREF:
3631 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3632 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
3633 VI_LOCK(vp);
3634 }
3635 break;
3636 }
3637 if (error == 0) {
3638 if (func == VUNREF) {
3639 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
3640 ("recursive vunref"));
3641 vp->v_vflag |= VV_UNREF;
3642 }
3643 for (;;) {
3644 error = vinactive(vp);
3645 if (want_unlock)
3646 VOP_UNLOCK(vp);
3647 if (error != ERELOOKUP || !want_unlock)
3648 break;
3649 VOP_LOCK(vp, LK_EXCLUSIVE);
3650 }
3651 if (func == VUNREF)
3652 vp->v_vflag &= ~VV_UNREF;
3653 vdropl(vp);
3654 } else {
3655 vdefer_inactive(vp);
3656 }
3657 return;
3658 out:
3659 if (func == VPUT)
3660 VOP_UNLOCK(vp);
3661 vdropl(vp);
3662 }
3663
3664 /*
3665 * Decrement ->v_usecount for a vnode.
3666 *
3667 * Releasing the last use count requires additional processing, see vput_final
3668 * above for details.
3669 *
3670 * Comment above each variant denotes lock state on entry and exit.
3671 */
3672
3673 /*
3674 * in: any
3675 * out: same as passed in
3676 */
3677 void
vrele(struct vnode * vp)3678 vrele(struct vnode *vp)
3679 {
3680
3681 ASSERT_VI_UNLOCKED(vp, __func__);
3682 if (!refcount_release(&vp->v_usecount))
3683 return;
3684 vput_final(vp, VRELE);
3685 }
3686
3687 /*
3688 * in: locked
3689 * out: unlocked
3690 */
3691 void
vput(struct vnode * vp)3692 vput(struct vnode *vp)
3693 {
3694
3695 ASSERT_VOP_LOCKED(vp, __func__);
3696 ASSERT_VI_UNLOCKED(vp, __func__);
3697 if (!refcount_release(&vp->v_usecount)) {
3698 VOP_UNLOCK(vp);
3699 return;
3700 }
3701 vput_final(vp, VPUT);
3702 }
3703
3704 /*
3705 * in: locked
3706 * out: locked
3707 */
3708 void
vunref(struct vnode * vp)3709 vunref(struct vnode *vp)
3710 {
3711
3712 ASSERT_VOP_LOCKED(vp, __func__);
3713 ASSERT_VI_UNLOCKED(vp, __func__);
3714 if (!refcount_release(&vp->v_usecount))
3715 return;
3716 vput_final(vp, VUNREF);
3717 }
3718
3719 void
vhold(struct vnode * vp)3720 vhold(struct vnode *vp)
3721 {
3722 int old;
3723
3724 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3725 old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3726 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3727 ("%s: wrong hold count %d", __func__, old));
3728 if (old == 0)
3729 vfs_freevnodes_dec();
3730 }
3731
3732 void
vholdnz(struct vnode * vp)3733 vholdnz(struct vnode *vp)
3734 {
3735
3736 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3737 #ifdef INVARIANTS
3738 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3739 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3740 ("%s: wrong hold count %d", __func__, old));
3741 #else
3742 atomic_add_int(&vp->v_holdcnt, 1);
3743 #endif
3744 }
3745
3746 /*
3747 * Grab a hold count unless the vnode is freed.
3748 *
3749 * Only use this routine if vfs smr is the only protection you have against
3750 * freeing the vnode.
3751 *
3752 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag
3753 * is not set. After the flag is set the vnode becomes immutable to anyone but
3754 * the thread which managed to set the flag.
3755 *
3756 * It may be tempting to replace the loop with:
3757 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3758 * if (count & VHOLD_NO_SMR) {
3759 * backpedal and error out;
3760 * }
3761 *
3762 * However, while this is more performant, it hinders debugging by eliminating
3763 * the previously mentioned invariant.
3764 */
3765 bool
vhold_smr(struct vnode * vp)3766 vhold_smr(struct vnode *vp)
3767 {
3768 int count;
3769
3770 VFS_SMR_ASSERT_ENTERED();
3771
3772 count = atomic_load_int(&vp->v_holdcnt);
3773 for (;;) {
3774 if (count & VHOLD_NO_SMR) {
3775 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3776 ("non-zero hold count with flags %d\n", count));
3777 return (false);
3778 }
3779 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3780 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3781 if (count == 0)
3782 vfs_freevnodes_dec();
3783 return (true);
3784 }
3785 }
3786 }
3787
3788 /*
3789 * Hold a free vnode for recycling.
3790 *
3791 * Note: vnode_init references this comment.
3792 *
3793 * Attempts to recycle only need the global vnode list lock and have no use for
3794 * SMR.
3795 *
3796 * However, vnodes get inserted into the global list before they get fully
3797 * initialized and stay there until UMA decides to free the memory. This in
3798 * particular means the target can be found before it becomes usable and after
3799 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to
3800 * VHOLD_NO_SMR.
3801 *
3802 * Note: the vnode may gain more references after we transition the count 0->1.
3803 */
3804 static bool
vhold_recycle_free(struct vnode * vp)3805 vhold_recycle_free(struct vnode *vp)
3806 {
3807 int count;
3808
3809 mtx_assert(&vnode_list_mtx, MA_OWNED);
3810
3811 count = atomic_load_int(&vp->v_holdcnt);
3812 for (;;) {
3813 if (count & VHOLD_NO_SMR) {
3814 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3815 ("non-zero hold count with flags %d\n", count));
3816 return (false);
3817 }
3818 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3819 if (count > 0) {
3820 return (false);
3821 }
3822 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3823 vfs_freevnodes_dec();
3824 return (true);
3825 }
3826 }
3827 }
3828
3829 static void __noinline
vdbatch_process(struct vdbatch * vd)3830 vdbatch_process(struct vdbatch *vd)
3831 {
3832 struct vnode *vp;
3833 int i;
3834
3835 mtx_assert(&vd->lock, MA_OWNED);
3836 MPASS(curthread->td_pinned > 0);
3837 MPASS(vd->index == VDBATCH_SIZE);
3838
3839 /*
3840 * Attempt to requeue the passed batch, but give up easily.
3841 *
3842 * Despite batching the mechanism is prone to transient *significant*
3843 * lock contention, where vnode_list_mtx becomes the primary bottleneck
3844 * if multiple CPUs get here (one real-world example is highly parallel
3845 * do-nothing make , which will stat *tons* of vnodes). Since it is
3846 * quasi-LRU (read: not that great even if fully honoured) provide an
3847 * option to just dodge the problem. Parties which don't like it are
3848 * welcome to implement something better.
3849 */
3850 if (vnode_can_skip_requeue) {
3851 if (!mtx_trylock(&vnode_list_mtx)) {
3852 counter_u64_add(vnode_skipped_requeues, 1);
3853 critical_enter();
3854 for (i = 0; i < VDBATCH_SIZE; i++) {
3855 vp = vd->tab[i];
3856 vd->tab[i] = NULL;
3857 MPASS(vp->v_dbatchcpu != NOCPU);
3858 vp->v_dbatchcpu = NOCPU;
3859 }
3860 vd->index = 0;
3861 critical_exit();
3862 return;
3863
3864 }
3865 /* fallthrough to locked processing */
3866 } else {
3867 mtx_lock(&vnode_list_mtx);
3868 }
3869
3870 mtx_assert(&vnode_list_mtx, MA_OWNED);
3871 critical_enter();
3872 for (i = 0; i < VDBATCH_SIZE; i++) {
3873 vp = vd->tab[i];
3874 vd->tab[i] = NULL;
3875 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
3876 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist);
3877 MPASS(vp->v_dbatchcpu != NOCPU);
3878 vp->v_dbatchcpu = NOCPU;
3879 }
3880 mtx_unlock(&vnode_list_mtx);
3881 vd->index = 0;
3882 critical_exit();
3883 }
3884
3885 static void
vdbatch_enqueue(struct vnode * vp)3886 vdbatch_enqueue(struct vnode *vp)
3887 {
3888 struct vdbatch *vd;
3889
3890 ASSERT_VI_LOCKED(vp, __func__);
3891 VNPASS(!VN_IS_DOOMED(vp), vp);
3892
3893 if (vp->v_dbatchcpu != NOCPU) {
3894 VI_UNLOCK(vp);
3895 return;
3896 }
3897
3898 sched_pin();
3899 vd = DPCPU_PTR(vd);
3900 mtx_lock(&vd->lock);
3901 MPASS(vd->index < VDBATCH_SIZE);
3902 MPASS(vd->tab[vd->index] == NULL);
3903 /*
3904 * A hack: we depend on being pinned so that we know what to put in
3905 * ->v_dbatchcpu.
3906 */
3907 vp->v_dbatchcpu = curcpu;
3908 vd->tab[vd->index] = vp;
3909 vd->index++;
3910 VI_UNLOCK(vp);
3911 if (vd->index == VDBATCH_SIZE)
3912 vdbatch_process(vd);
3913 mtx_unlock(&vd->lock);
3914 sched_unpin();
3915 }
3916
3917 /*
3918 * This routine must only be called for vnodes which are about to be
3919 * deallocated. Supporting dequeue for arbitrary vndoes would require
3920 * validating that the locked batch matches.
3921 */
3922 static void
vdbatch_dequeue(struct vnode * vp)3923 vdbatch_dequeue(struct vnode *vp)
3924 {
3925 struct vdbatch *vd;
3926 int i;
3927 short cpu;
3928
3929 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp);
3930
3931 cpu = vp->v_dbatchcpu;
3932 if (cpu == NOCPU)
3933 return;
3934
3935 vd = DPCPU_ID_PTR(cpu, vd);
3936 mtx_lock(&vd->lock);
3937 for (i = 0; i < vd->index; i++) {
3938 if (vd->tab[i] != vp)
3939 continue;
3940 vp->v_dbatchcpu = NOCPU;
3941 vd->index--;
3942 vd->tab[i] = vd->tab[vd->index];
3943 vd->tab[vd->index] = NULL;
3944 break;
3945 }
3946 mtx_unlock(&vd->lock);
3947 /*
3948 * Either we dequeued the vnode above or the target CPU beat us to it.
3949 */
3950 MPASS(vp->v_dbatchcpu == NOCPU);
3951 }
3952
3953 /*
3954 * Drop the hold count of the vnode.
3955 *
3956 * It will only get freed if this is the last hold *and* it has been vgone'd.
3957 *
3958 * Because the vnode vm object keeps a hold reference on the vnode if
3959 * there is at least one resident non-cached page, the vnode cannot
3960 * leave the active list without the page cleanup done.
3961 */
3962 static void __noinline
vdropl_final(struct vnode * vp)3963 vdropl_final(struct vnode *vp)
3964 {
3965
3966 ASSERT_VI_LOCKED(vp, __func__);
3967 VNPASS(VN_IS_DOOMED(vp), vp);
3968 /*
3969 * Set the VHOLD_NO_SMR flag.
3970 *
3971 * We may be racing against vhold_smr. If they win we can just pretend
3972 * we never got this far, they will vdrop later.
3973 */
3974 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) {
3975 vfs_freevnodes_inc();
3976 VI_UNLOCK(vp);
3977 /*
3978 * We lost the aforementioned race. Any subsequent access is
3979 * invalid as they might have managed to vdropl on their own.
3980 */
3981 return;
3982 }
3983 /*
3984 * Don't bump freevnodes as this one is going away.
3985 */
3986 freevnode(vp);
3987 }
3988
3989 void
vdrop(struct vnode * vp)3990 vdrop(struct vnode *vp)
3991 {
3992
3993 ASSERT_VI_UNLOCKED(vp, __func__);
3994 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3995 if (refcount_release_if_not_last(&vp->v_holdcnt))
3996 return;
3997 VI_LOCK(vp);
3998 vdropl(vp);
3999 }
4000
4001 static __always_inline void
vdropl_impl(struct vnode * vp,bool enqueue)4002 vdropl_impl(struct vnode *vp, bool enqueue)
4003 {
4004
4005 ASSERT_VI_LOCKED(vp, __func__);
4006 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4007 if (!refcount_release(&vp->v_holdcnt)) {
4008 VI_UNLOCK(vp);
4009 return;
4010 }
4011 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp);
4012 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
4013 if (VN_IS_DOOMED(vp)) {
4014 vdropl_final(vp);
4015 return;
4016 }
4017
4018 vfs_freevnodes_inc();
4019 if (vp->v_mflag & VMP_LAZYLIST) {
4020 vunlazy(vp);
4021 }
4022
4023 if (!enqueue) {
4024 VI_UNLOCK(vp);
4025 return;
4026 }
4027
4028 /*
4029 * Also unlocks the interlock. We can't assert on it as we
4030 * released our hold and by now the vnode might have been
4031 * freed.
4032 */
4033 vdbatch_enqueue(vp);
4034 }
4035
4036 void
vdropl(struct vnode * vp)4037 vdropl(struct vnode *vp)
4038 {
4039
4040 vdropl_impl(vp, true);
4041 }
4042
4043 /*
4044 * vdrop a vnode when recycling
4045 *
4046 * This is a special case routine only to be used when recycling, differs from
4047 * regular vdrop by not requeieing the vnode on LRU.
4048 *
4049 * Consider a case where vtryrecycle continuously fails with all vnodes (due to
4050 * e.g., frozen writes on the filesystem), filling the batch and causing it to
4051 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a
4052 * loop which can last for as long as writes are frozen.
4053 */
4054 static void
vdropl_recycle(struct vnode * vp)4055 vdropl_recycle(struct vnode *vp)
4056 {
4057
4058 vdropl_impl(vp, false);
4059 }
4060
4061 static void
vdrop_recycle(struct vnode * vp)4062 vdrop_recycle(struct vnode *vp)
4063 {
4064
4065 VI_LOCK(vp);
4066 vdropl_recycle(vp);
4067 }
4068
4069 /*
4070 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
4071 * flags. DOINGINACT prevents us from recursing in calls to vinactive.
4072 */
4073 static int
vinactivef(struct vnode * vp)4074 vinactivef(struct vnode *vp)
4075 {
4076 int error;
4077
4078 ASSERT_VOP_ELOCKED(vp, "vinactive");
4079 ASSERT_VI_LOCKED(vp, "vinactive");
4080 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp);
4081 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4082 vp->v_iflag |= VI_DOINGINACT;
4083 vp->v_iflag &= ~VI_OWEINACT;
4084 VI_UNLOCK(vp);
4085
4086 /*
4087 * Before moving off the active list, we must be sure that any
4088 * modified pages are converted into the vnode's dirty
4089 * buffers, since these will no longer be checked once the
4090 * vnode is on the inactive list.
4091 *
4092 * The write-out of the dirty pages is asynchronous. At the
4093 * point that VOP_INACTIVE() is called, there could still be
4094 * pending I/O and dirty pages in the object.
4095 */
4096 if ((vp->v_vflag & VV_NOSYNC) == 0)
4097 vnode_pager_clean_async(vp);
4098
4099 error = VOP_INACTIVE(vp);
4100 VI_LOCK(vp);
4101 VNPASS(vp->v_iflag & VI_DOINGINACT, vp);
4102 vp->v_iflag &= ~VI_DOINGINACT;
4103 return (error);
4104 }
4105
4106 int
vinactive(struct vnode * vp)4107 vinactive(struct vnode *vp)
4108 {
4109
4110 ASSERT_VOP_ELOCKED(vp, "vinactive");
4111 ASSERT_VI_LOCKED(vp, "vinactive");
4112 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4113
4114 if ((vp->v_iflag & VI_OWEINACT) == 0)
4115 return (0);
4116 if (vp->v_iflag & VI_DOINGINACT)
4117 return (0);
4118 if (vp->v_usecount > 0) {
4119 vp->v_iflag &= ~VI_OWEINACT;
4120 return (0);
4121 }
4122 return (vinactivef(vp));
4123 }
4124
4125 /*
4126 * Remove any vnodes in the vnode table belonging to mount point mp.
4127 *
4128 * If FORCECLOSE is not specified, there should not be any active ones,
4129 * return error if any are found (nb: this is a user error, not a
4130 * system error). If FORCECLOSE is specified, detach any active vnodes
4131 * that are found.
4132 *
4133 * If WRITECLOSE is set, only flush out regular file vnodes open for
4134 * writing.
4135 *
4136 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
4137 *
4138 * `rootrefs' specifies the base reference count for the root vnode
4139 * of this filesystem. The root vnode is considered busy if its
4140 * v_usecount exceeds this value. On a successful return, vflush(, td)
4141 * will call vrele() on the root vnode exactly rootrefs times.
4142 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
4143 * be zero.
4144 */
4145 #ifdef DIAGNOSTIC
4146 static int busyprt = 0; /* print out busy vnodes */
4147 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
4148 #endif
4149
4150 int
vflush(struct mount * mp,int rootrefs,int flags,struct thread * td)4151 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
4152 {
4153 struct vnode *vp, *mvp, *rootvp = NULL;
4154 struct vattr vattr;
4155 int busy = 0, error;
4156
4157 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
4158 rootrefs, flags);
4159 if (rootrefs > 0) {
4160 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
4161 ("vflush: bad args"));
4162 /*
4163 * Get the filesystem root vnode. We can vput() it
4164 * immediately, since with rootrefs > 0, it won't go away.
4165 */
4166 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
4167 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
4168 __func__, error);
4169 return (error);
4170 }
4171 vput(rootvp);
4172 }
4173 loop:
4174 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
4175 vholdl(vp);
4176 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
4177 if (error) {
4178 vdrop(vp);
4179 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
4180 goto loop;
4181 }
4182 /*
4183 * Skip over a vnodes marked VV_SYSTEM.
4184 */
4185 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
4186 VOP_UNLOCK(vp);
4187 vdrop(vp);
4188 continue;
4189 }
4190 /*
4191 * If WRITECLOSE is set, flush out unlinked but still open
4192 * files (even if open only for reading) and regular file
4193 * vnodes open for writing.
4194 */
4195 if (flags & WRITECLOSE) {
4196 vnode_pager_clean_async(vp);
4197 do {
4198 error = VOP_FSYNC(vp, MNT_WAIT, td);
4199 } while (error == ERELOOKUP);
4200 if (error != 0) {
4201 VOP_UNLOCK(vp);
4202 vdrop(vp);
4203 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
4204 return (error);
4205 }
4206 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
4207 VI_LOCK(vp);
4208
4209 if ((vp->v_type == VNON ||
4210 (error == 0 && vattr.va_nlink > 0)) &&
4211 (vp->v_writecount <= 0 || vp->v_type != VREG)) {
4212 VOP_UNLOCK(vp);
4213 vdropl(vp);
4214 continue;
4215 }
4216 } else
4217 VI_LOCK(vp);
4218 /*
4219 * With v_usecount == 0, all we need to do is clear out the
4220 * vnode data structures and we are done.
4221 *
4222 * If FORCECLOSE is set, forcibly close the vnode.
4223 */
4224 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
4225 vgonel(vp);
4226 } else {
4227 busy++;
4228 #ifdef DIAGNOSTIC
4229 if (busyprt)
4230 vn_printf(vp, "vflush: busy vnode ");
4231 #endif
4232 }
4233 VOP_UNLOCK(vp);
4234 vdropl(vp);
4235 }
4236 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
4237 /*
4238 * If just the root vnode is busy, and if its refcount
4239 * is equal to `rootrefs', then go ahead and kill it.
4240 */
4241 VI_LOCK(rootvp);
4242 KASSERT(busy > 0, ("vflush: not busy"));
4243 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
4244 ("vflush: usecount %d < rootrefs %d",
4245 rootvp->v_usecount, rootrefs));
4246 if (busy == 1 && rootvp->v_usecount == rootrefs) {
4247 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
4248 vgone(rootvp);
4249 VOP_UNLOCK(rootvp);
4250 busy = 0;
4251 } else
4252 VI_UNLOCK(rootvp);
4253 }
4254 if (busy) {
4255 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
4256 busy);
4257 return (EBUSY);
4258 }
4259 for (; rootrefs > 0; rootrefs--)
4260 vrele(rootvp);
4261 return (0);
4262 }
4263
4264 /*
4265 * Recycle an unused vnode.
4266 */
4267 int
vrecycle(struct vnode * vp)4268 vrecycle(struct vnode *vp)
4269 {
4270 int recycled;
4271
4272 VI_LOCK(vp);
4273 recycled = vrecyclel(vp);
4274 VI_UNLOCK(vp);
4275 return (recycled);
4276 }
4277
4278 /*
4279 * vrecycle, with the vp interlock held.
4280 */
4281 int
vrecyclel(struct vnode * vp)4282 vrecyclel(struct vnode *vp)
4283 {
4284 int recycled;
4285
4286 ASSERT_VOP_ELOCKED(vp, __func__);
4287 ASSERT_VI_LOCKED(vp, __func__);
4288 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4289 recycled = 0;
4290 if (vp->v_usecount == 0) {
4291 recycled = 1;
4292 vgonel(vp);
4293 }
4294 return (recycled);
4295 }
4296
4297 /*
4298 * Eliminate all activity associated with a vnode
4299 * in preparation for reuse.
4300 */
4301 void
vgone(struct vnode * vp)4302 vgone(struct vnode *vp)
4303 {
4304 VI_LOCK(vp);
4305 vgonel(vp);
4306 VI_UNLOCK(vp);
4307 }
4308
4309 /*
4310 * Notify upper mounts about reclaimed or unlinked vnode.
4311 */
4312 void
vfs_notify_upper(struct vnode * vp,enum vfs_notify_upper_type event)4313 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event)
4314 {
4315 struct mount *mp;
4316 struct mount_upper_node *ump;
4317
4318 mp = atomic_load_ptr(&vp->v_mount);
4319 if (mp == NULL)
4320 return;
4321 if (TAILQ_EMPTY(&mp->mnt_notify))
4322 return;
4323
4324 MNT_ILOCK(mp);
4325 mp->mnt_upper_pending++;
4326 KASSERT(mp->mnt_upper_pending > 0,
4327 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending));
4328 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) {
4329 MNT_IUNLOCK(mp);
4330 switch (event) {
4331 case VFS_NOTIFY_UPPER_RECLAIM:
4332 VFS_RECLAIM_LOWERVP(ump->mp, vp);
4333 break;
4334 case VFS_NOTIFY_UPPER_UNLINK:
4335 VFS_UNLINK_LOWERVP(ump->mp, vp);
4336 break;
4337 }
4338 MNT_ILOCK(mp);
4339 }
4340 mp->mnt_upper_pending--;
4341 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 &&
4342 mp->mnt_upper_pending == 0) {
4343 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER;
4344 wakeup(&mp->mnt_uppers);
4345 }
4346 MNT_IUNLOCK(mp);
4347 }
4348
4349 /*
4350 * vgone, with the vp interlock held.
4351 */
4352 static void
vgonel(struct vnode * vp)4353 vgonel(struct vnode *vp)
4354 {
4355 struct thread *td;
4356 struct mount *mp;
4357 vm_object_t object;
4358 bool active, doinginact, oweinact;
4359
4360 ASSERT_VOP_ELOCKED(vp, "vgonel");
4361 ASSERT_VI_LOCKED(vp, "vgonel");
4362 VNASSERT(vp->v_holdcnt, vp,
4363 ("vgonel: vp %p has no reference.", vp));
4364 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4365 td = curthread;
4366
4367 /*
4368 * Don't vgonel if we're already doomed.
4369 */
4370 if (VN_IS_DOOMED(vp)) {
4371 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \
4372 vn_get_state(vp) == VSTATE_DEAD, vp);
4373 return;
4374 }
4375 /*
4376 * Paired with freevnode.
4377 */
4378 vn_seqc_write_begin_locked(vp);
4379 vunlazy_gone(vp);
4380 vn_irflag_set_locked(vp, VIRF_DOOMED);
4381 vn_set_state(vp, VSTATE_DESTROYING);
4382
4383 /*
4384 * Check to see if the vnode is in use. If so, we have to
4385 * call VOP_CLOSE() and VOP_INACTIVE().
4386 *
4387 * It could be that VOP_INACTIVE() requested reclamation, in
4388 * which case we should avoid recursion, so check
4389 * VI_DOINGINACT. This is not precise but good enough.
4390 */
4391 active = vp->v_usecount > 0;
4392 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4393 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0;
4394
4395 /*
4396 * If we need to do inactive VI_OWEINACT will be set.
4397 */
4398 if (vp->v_iflag & VI_DEFINACT) {
4399 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count"));
4400 vp->v_iflag &= ~VI_DEFINACT;
4401 vdropl(vp);
4402 } else {
4403 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count"));
4404 VI_UNLOCK(vp);
4405 }
4406 cache_purge_vgone(vp);
4407 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
4408
4409 /*
4410 * If purging an active vnode, it must be closed and
4411 * deactivated before being reclaimed.
4412 */
4413 if (active)
4414 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
4415 if (!doinginact) {
4416 do {
4417 if (oweinact || active) {
4418 VI_LOCK(vp);
4419 vinactivef(vp);
4420 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4421 VI_UNLOCK(vp);
4422 }
4423 } while (oweinact);
4424 }
4425 if (vp->v_type == VSOCK)
4426 vfs_unp_reclaim(vp);
4427
4428 /*
4429 * Clean out any buffers associated with the vnode.
4430 * If the flush fails, just toss the buffers.
4431 */
4432 mp = NULL;
4433 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
4434 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
4435 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
4436 while (vinvalbuf(vp, 0, 0, 0) != 0)
4437 ;
4438 }
4439
4440 BO_LOCK(&vp->v_bufobj);
4441 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
4442 vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
4443 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
4444 vp->v_bufobj.bo_clean.bv_cnt == 0,
4445 ("vp %p bufobj not invalidated", vp));
4446
4447 /*
4448 * For VMIO bufobj, BO_DEAD is set later, or in
4449 * vm_object_terminate() after the object's page queue is
4450 * flushed.
4451 */
4452 object = vp->v_bufobj.bo_object;
4453 if (object == NULL)
4454 vp->v_bufobj.bo_flag |= BO_DEAD;
4455 BO_UNLOCK(&vp->v_bufobj);
4456
4457 /*
4458 * Handle the VM part. Tmpfs handles v_object on its own (the
4459 * OBJT_VNODE check). Nullfs or other bypassing filesystems
4460 * should not touch the object borrowed from the lower vnode
4461 * (the handle check).
4462 */
4463 if (object != NULL && object->type == OBJT_VNODE &&
4464 object->handle == vp)
4465 vnode_destroy_vobject(vp);
4466
4467 /*
4468 * Reclaim the vnode.
4469 */
4470 if (VOP_RECLAIM(vp))
4471 panic("vgone: cannot reclaim");
4472 if (mp != NULL)
4473 vn_finished_secondary_write(mp);
4474 VNASSERT(vp->v_object == NULL, vp,
4475 ("vop_reclaim left v_object vp=%p", vp));
4476 /*
4477 * Clear the advisory locks and wake up waiting threads.
4478 */
4479 if (vp->v_lockf != NULL) {
4480 (void)VOP_ADVLOCKPURGE(vp);
4481 vp->v_lockf = NULL;
4482 }
4483 /*
4484 * Delete from old mount point vnode list.
4485 */
4486 if (vp->v_mount == NULL) {
4487 VI_LOCK(vp);
4488 } else {
4489 delmntque(vp);
4490 ASSERT_VI_LOCKED(vp, "vgonel 2");
4491 }
4492 /*
4493 * Done with purge, reset to the standard lock and invalidate
4494 * the vnode.
4495 */
4496 vp->v_vnlock = &vp->v_lock;
4497 vp->v_op = &dead_vnodeops;
4498 vp->v_type = VBAD;
4499 vn_set_state(vp, VSTATE_DEAD);
4500 }
4501
4502 /*
4503 * Print out a description of a vnode.
4504 */
4505 static const char *const vtypename[] = {
4506 [VNON] = "VNON",
4507 [VREG] = "VREG",
4508 [VDIR] = "VDIR",
4509 [VBLK] = "VBLK",
4510 [VCHR] = "VCHR",
4511 [VLNK] = "VLNK",
4512 [VSOCK] = "VSOCK",
4513 [VFIFO] = "VFIFO",
4514 [VBAD] = "VBAD",
4515 [VMARKER] = "VMARKER",
4516 };
4517 _Static_assert(nitems(vtypename) == VLASTTYPE + 1,
4518 "vnode type name not added to vtypename");
4519
4520 static const char *const vstatename[] = {
4521 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED",
4522 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED",
4523 [VSTATE_DESTROYING] = "VSTATE_DESTROYING",
4524 [VSTATE_DEAD] = "VSTATE_DEAD",
4525 };
4526 _Static_assert(nitems(vstatename) == VLASTSTATE + 1,
4527 "vnode state name not added to vstatename");
4528
4529 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0,
4530 "new hold count flag not added to vn_printf");
4531
4532 void
vn_printf(struct vnode * vp,const char * fmt,...)4533 vn_printf(struct vnode *vp, const char *fmt, ...)
4534 {
4535 va_list ap;
4536 char buf[256], buf2[16];
4537 u_long flags;
4538 u_int holdcnt;
4539 short irflag;
4540
4541 va_start(ap, fmt);
4542 vprintf(fmt, ap);
4543 va_end(ap);
4544 printf("%p: ", (void *)vp);
4545 printf("type %s state %s op %p\n", vtypename[vp->v_type],
4546 vstatename[vp->v_state], vp->v_op);
4547 holdcnt = atomic_load_int(&vp->v_holdcnt);
4548 printf(" usecount %d, writecount %d, refcount %d seqc users %d",
4549 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS,
4550 vp->v_seqc_users);
4551 switch (vp->v_type) {
4552 case VDIR:
4553 printf(" mountedhere %p\n", vp->v_mountedhere);
4554 break;
4555 case VCHR:
4556 printf(" rdev %p\n", vp->v_rdev);
4557 break;
4558 case VSOCK:
4559 printf(" socket %p\n", vp->v_unpcb);
4560 break;
4561 case VFIFO:
4562 printf(" fifoinfo %p\n", vp->v_fifoinfo);
4563 break;
4564 default:
4565 printf("\n");
4566 break;
4567 }
4568 buf[0] = '\0';
4569 buf[1] = '\0';
4570 if (holdcnt & VHOLD_NO_SMR)
4571 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf));
4572 printf(" hold count flags (%s)\n", buf + 1);
4573
4574 buf[0] = '\0';
4575 buf[1] = '\0';
4576 irflag = vn_irflag_read(vp);
4577 if (irflag & VIRF_DOOMED)
4578 strlcat(buf, "|VIRF_DOOMED", sizeof(buf));
4579 if (irflag & VIRF_PGREAD)
4580 strlcat(buf, "|VIRF_PGREAD", sizeof(buf));
4581 if (irflag & VIRF_MOUNTPOINT)
4582 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf));
4583 if (irflag & VIRF_TEXT_REF)
4584 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf));
4585 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF);
4586 if (flags != 0) {
4587 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags);
4588 strlcat(buf, buf2, sizeof(buf));
4589 }
4590 if (vp->v_vflag & VV_ROOT)
4591 strlcat(buf, "|VV_ROOT", sizeof(buf));
4592 if (vp->v_vflag & VV_ISTTY)
4593 strlcat(buf, "|VV_ISTTY", sizeof(buf));
4594 if (vp->v_vflag & VV_NOSYNC)
4595 strlcat(buf, "|VV_NOSYNC", sizeof(buf));
4596 if (vp->v_vflag & VV_ETERNALDEV)
4597 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
4598 if (vp->v_vflag & VV_CACHEDLABEL)
4599 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
4600 if (vp->v_vflag & VV_VMSIZEVNLOCK)
4601 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf));
4602 if (vp->v_vflag & VV_COPYONWRITE)
4603 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
4604 if (vp->v_vflag & VV_SYSTEM)
4605 strlcat(buf, "|VV_SYSTEM", sizeof(buf));
4606 if (vp->v_vflag & VV_PROCDEP)
4607 strlcat(buf, "|VV_PROCDEP", sizeof(buf));
4608 if (vp->v_vflag & VV_DELETED)
4609 strlcat(buf, "|VV_DELETED", sizeof(buf));
4610 if (vp->v_vflag & VV_MD)
4611 strlcat(buf, "|VV_MD", sizeof(buf));
4612 if (vp->v_vflag & VV_FORCEINSMQ)
4613 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
4614 if (vp->v_vflag & VV_READLINK)
4615 strlcat(buf, "|VV_READLINK", sizeof(buf));
4616 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
4617 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM |
4618 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK);
4619 if (flags != 0) {
4620 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
4621 strlcat(buf, buf2, sizeof(buf));
4622 }
4623 if (vp->v_iflag & VI_MOUNT)
4624 strlcat(buf, "|VI_MOUNT", sizeof(buf));
4625 if (vp->v_iflag & VI_DOINGINACT)
4626 strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
4627 if (vp->v_iflag & VI_OWEINACT)
4628 strlcat(buf, "|VI_OWEINACT", sizeof(buf));
4629 if (vp->v_iflag & VI_DEFINACT)
4630 strlcat(buf, "|VI_DEFINACT", sizeof(buf));
4631 if (vp->v_iflag & VI_FOPENING)
4632 strlcat(buf, "|VI_FOPENING", sizeof(buf));
4633 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT |
4634 VI_OWEINACT | VI_DEFINACT | VI_FOPENING);
4635 if (flags != 0) {
4636 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
4637 strlcat(buf, buf2, sizeof(buf));
4638 }
4639 if (vp->v_mflag & VMP_LAZYLIST)
4640 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf));
4641 flags = vp->v_mflag & ~(VMP_LAZYLIST);
4642 if (flags != 0) {
4643 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags);
4644 strlcat(buf, buf2, sizeof(buf));
4645 }
4646 printf(" flags (%s)", buf + 1);
4647 if (mtx_owned(VI_MTX(vp)))
4648 printf(" VI_LOCKed");
4649 printf("\n");
4650 if (vp->v_object != NULL)
4651 printf(" v_object %p ref %d pages %d "
4652 "cleanbuf %d dirtybuf %d\n",
4653 vp->v_object, vp->v_object->ref_count,
4654 vp->v_object->resident_page_count,
4655 vp->v_bufobj.bo_clean.bv_cnt,
4656 vp->v_bufobj.bo_dirty.bv_cnt);
4657 printf(" ");
4658 lockmgr_printinfo(vp->v_vnlock);
4659 if (vp->v_data != NULL)
4660 VOP_PRINT(vp);
4661 }
4662
4663 #ifdef DDB
4664 /*
4665 * List all of the locked vnodes in the system.
4666 * Called when debugging the kernel.
4667 */
DB_SHOW_COMMAND_FLAGS(lockedvnods,lockedvnodes,DB_CMD_MEMSAFE)4668 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE)
4669 {
4670 struct mount *mp;
4671 struct vnode *vp;
4672
4673 /*
4674 * Note: because this is DDB, we can't obey the locking semantics
4675 * for these structures, which means we could catch an inconsistent
4676 * state and dereference a nasty pointer. Not much to be done
4677 * about that.
4678 */
4679 db_printf("Locked vnodes\n");
4680 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4681 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4682 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
4683 vn_printf(vp, "vnode ");
4684 }
4685 }
4686 }
4687
4688 /*
4689 * Show details about the given vnode.
4690 */
DB_SHOW_COMMAND(vnode,db_show_vnode)4691 DB_SHOW_COMMAND(vnode, db_show_vnode)
4692 {
4693 struct vnode *vp;
4694
4695 if (!have_addr)
4696 return;
4697 vp = (struct vnode *)addr;
4698 vn_printf(vp, "vnode ");
4699 }
4700
4701 /*
4702 * Show details about the given mount point.
4703 */
DB_SHOW_COMMAND(mount,db_show_mount)4704 DB_SHOW_COMMAND(mount, db_show_mount)
4705 {
4706 struct mount *mp;
4707 struct vfsopt *opt;
4708 struct statfs *sp;
4709 struct vnode *vp;
4710 char buf[512];
4711 uint64_t mflags;
4712 u_int flags;
4713
4714 if (!have_addr) {
4715 /* No address given, print short info about all mount points. */
4716 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4717 db_printf("%p %s on %s (%s)\n", mp,
4718 mp->mnt_stat.f_mntfromname,
4719 mp->mnt_stat.f_mntonname,
4720 mp->mnt_stat.f_fstypename);
4721 if (db_pager_quit)
4722 break;
4723 }
4724 db_printf("\nMore info: show mount <addr>\n");
4725 return;
4726 }
4727
4728 mp = (struct mount *)addr;
4729 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
4730 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
4731
4732 buf[0] = '\0';
4733 mflags = mp->mnt_flag;
4734 #define MNT_FLAG(flag) do { \
4735 if (mflags & (flag)) { \
4736 if (buf[0] != '\0') \
4737 strlcat(buf, ", ", sizeof(buf)); \
4738 strlcat(buf, (#flag) + 4, sizeof(buf)); \
4739 mflags &= ~(flag); \
4740 } \
4741 } while (0)
4742 MNT_FLAG(MNT_RDONLY);
4743 MNT_FLAG(MNT_SYNCHRONOUS);
4744 MNT_FLAG(MNT_NOEXEC);
4745 MNT_FLAG(MNT_NOSUID);
4746 MNT_FLAG(MNT_NFS4ACLS);
4747 MNT_FLAG(MNT_UNION);
4748 MNT_FLAG(MNT_ASYNC);
4749 MNT_FLAG(MNT_SUIDDIR);
4750 MNT_FLAG(MNT_SOFTDEP);
4751 MNT_FLAG(MNT_NOSYMFOLLOW);
4752 MNT_FLAG(MNT_GJOURNAL);
4753 MNT_FLAG(MNT_MULTILABEL);
4754 MNT_FLAG(MNT_ACLS);
4755 MNT_FLAG(MNT_NOATIME);
4756 MNT_FLAG(MNT_NOCLUSTERR);
4757 MNT_FLAG(MNT_NOCLUSTERW);
4758 MNT_FLAG(MNT_SUJ);
4759 MNT_FLAG(MNT_EXRDONLY);
4760 MNT_FLAG(MNT_EXPORTED);
4761 MNT_FLAG(MNT_DEFEXPORTED);
4762 MNT_FLAG(MNT_EXPORTANON);
4763 MNT_FLAG(MNT_EXKERB);
4764 MNT_FLAG(MNT_EXPUBLIC);
4765 MNT_FLAG(MNT_LOCAL);
4766 MNT_FLAG(MNT_QUOTA);
4767 MNT_FLAG(MNT_ROOTFS);
4768 MNT_FLAG(MNT_USER);
4769 MNT_FLAG(MNT_IGNORE);
4770 MNT_FLAG(MNT_UPDATE);
4771 MNT_FLAG(MNT_DELEXPORT);
4772 MNT_FLAG(MNT_RELOAD);
4773 MNT_FLAG(MNT_FORCE);
4774 MNT_FLAG(MNT_SNAPSHOT);
4775 MNT_FLAG(MNT_BYFSID);
4776 #undef MNT_FLAG
4777 if (mflags != 0) {
4778 if (buf[0] != '\0')
4779 strlcat(buf, ", ", sizeof(buf));
4780 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
4781 "0x%016jx", mflags);
4782 }
4783 db_printf(" mnt_flag = %s\n", buf);
4784
4785 buf[0] = '\0';
4786 flags = mp->mnt_kern_flag;
4787 #define MNT_KERN_FLAG(flag) do { \
4788 if (flags & (flag)) { \
4789 if (buf[0] != '\0') \
4790 strlcat(buf, ", ", sizeof(buf)); \
4791 strlcat(buf, (#flag) + 5, sizeof(buf)); \
4792 flags &= ~(flag); \
4793 } \
4794 } while (0)
4795 MNT_KERN_FLAG(MNTK_UNMOUNTF);
4796 MNT_KERN_FLAG(MNTK_ASYNC);
4797 MNT_KERN_FLAG(MNTK_SOFTDEP);
4798 MNT_KERN_FLAG(MNTK_NOMSYNC);
4799 MNT_KERN_FLAG(MNTK_DRAINING);
4800 MNT_KERN_FLAG(MNTK_REFEXPIRE);
4801 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
4802 MNT_KERN_FLAG(MNTK_SHARED_WRITES);
4803 MNT_KERN_FLAG(MNTK_NO_IOPF);
4804 MNT_KERN_FLAG(MNTK_RECURSE);
4805 MNT_KERN_FLAG(MNTK_UPPER_WAITER);
4806 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE);
4807 MNT_KERN_FLAG(MNTK_USES_BCACHE);
4808 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG);
4809 MNT_KERN_FLAG(MNTK_FPLOOKUP);
4810 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER);
4811 MNT_KERN_FLAG(MNTK_NOASYNC);
4812 MNT_KERN_FLAG(MNTK_UNMOUNT);
4813 MNT_KERN_FLAG(MNTK_MWAIT);
4814 MNT_KERN_FLAG(MNTK_SUSPEND);
4815 MNT_KERN_FLAG(MNTK_SUSPEND2);
4816 MNT_KERN_FLAG(MNTK_SUSPENDED);
4817 MNT_KERN_FLAG(MNTK_NULL_NOCACHE);
4818 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
4819 #undef MNT_KERN_FLAG
4820 if (flags != 0) {
4821 if (buf[0] != '\0')
4822 strlcat(buf, ", ", sizeof(buf));
4823 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
4824 "0x%08x", flags);
4825 }
4826 db_printf(" mnt_kern_flag = %s\n", buf);
4827
4828 db_printf(" mnt_opt = ");
4829 opt = TAILQ_FIRST(mp->mnt_opt);
4830 if (opt != NULL) {
4831 db_printf("%s", opt->name);
4832 opt = TAILQ_NEXT(opt, link);
4833 while (opt != NULL) {
4834 db_printf(", %s", opt->name);
4835 opt = TAILQ_NEXT(opt, link);
4836 }
4837 }
4838 db_printf("\n");
4839
4840 sp = &mp->mnt_stat;
4841 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx "
4842 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
4843 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
4844 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
4845 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
4846 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
4847 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
4848 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
4849 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
4850 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
4851 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
4852 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
4853
4854 db_printf(" mnt_cred = { uid=%u ruid=%u",
4855 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
4856 if (jailed(mp->mnt_cred))
4857 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
4858 db_printf(" }\n");
4859 db_printf(" mnt_ref = %d (with %d in the struct)\n",
4860 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref);
4861 db_printf(" mnt_gen = %d\n", mp->mnt_gen);
4862 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
4863 db_printf(" mnt_lazyvnodelistsize = %d\n",
4864 mp->mnt_lazyvnodelistsize);
4865 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n",
4866 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount);
4867 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max);
4868 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed);
4869 db_printf(" mnt_lockref = %d (with %d in the struct)\n",
4870 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref);
4871 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
4872 db_printf(" mnt_secondary_accwrites = %d\n",
4873 mp->mnt_secondary_accwrites);
4874 db_printf(" mnt_gjprovider = %s\n",
4875 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
4876 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops);
4877
4878 db_printf("\n\nList of active vnodes\n");
4879 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4880 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) {
4881 vn_printf(vp, "vnode ");
4882 if (db_pager_quit)
4883 break;
4884 }
4885 }
4886 db_printf("\n\nList of inactive vnodes\n");
4887 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4888 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) {
4889 vn_printf(vp, "vnode ");
4890 if (db_pager_quit)
4891 break;
4892 }
4893 }
4894 }
4895 #endif /* DDB */
4896
4897 /*
4898 * Fill in a struct xvfsconf based on a struct vfsconf.
4899 */
4900 static int
vfsconf2x(struct sysctl_req * req,struct vfsconf * vfsp)4901 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
4902 {
4903 struct xvfsconf xvfsp;
4904
4905 bzero(&xvfsp, sizeof(xvfsp));
4906 strcpy(xvfsp.vfc_name, vfsp->vfc_name);
4907 xvfsp.vfc_typenum = vfsp->vfc_typenum;
4908 xvfsp.vfc_refcount = vfsp->vfc_refcount;
4909 xvfsp.vfc_flags = vfsp->vfc_flags;
4910 /*
4911 * These are unused in userland, we keep them
4912 * to not break binary compatibility.
4913 */
4914 xvfsp.vfc_vfsops = NULL;
4915 xvfsp.vfc_next = NULL;
4916 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
4917 }
4918
4919 #ifdef COMPAT_FREEBSD32
4920 struct xvfsconf32 {
4921 uint32_t vfc_vfsops;
4922 char vfc_name[MFSNAMELEN];
4923 int32_t vfc_typenum;
4924 int32_t vfc_refcount;
4925 int32_t vfc_flags;
4926 uint32_t vfc_next;
4927 };
4928
4929 static int
vfsconf2x32(struct sysctl_req * req,struct vfsconf * vfsp)4930 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
4931 {
4932 struct xvfsconf32 xvfsp;
4933
4934 bzero(&xvfsp, sizeof(xvfsp));
4935 strcpy(xvfsp.vfc_name, vfsp->vfc_name);
4936 xvfsp.vfc_typenum = vfsp->vfc_typenum;
4937 xvfsp.vfc_refcount = vfsp->vfc_refcount;
4938 xvfsp.vfc_flags = vfsp->vfc_flags;
4939 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
4940 }
4941 #endif
4942
4943 /*
4944 * Top level filesystem related information gathering.
4945 */
4946 static int
sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)4947 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
4948 {
4949 struct vfsconf *vfsp;
4950 int error;
4951
4952 error = 0;
4953 vfsconf_slock();
4954 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
4955 #ifdef COMPAT_FREEBSD32
4956 if (req->flags & SCTL_MASK32)
4957 error = vfsconf2x32(req, vfsp);
4958 else
4959 #endif
4960 error = vfsconf2x(req, vfsp);
4961 if (error)
4962 break;
4963 }
4964 vfsconf_sunlock();
4965 return (error);
4966 }
4967
4968 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD |
4969 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist,
4970 "S,xvfsconf", "List of all configured filesystems");
4971
4972 #ifndef BURN_BRIDGES
4973 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
4974
4975 static int
vfs_sysctl(SYSCTL_HANDLER_ARGS)4976 vfs_sysctl(SYSCTL_HANDLER_ARGS)
4977 {
4978 int *name = (int *)arg1 - 1; /* XXX */
4979 u_int namelen = arg2 + 1; /* XXX */
4980 struct vfsconf *vfsp;
4981
4982 log(LOG_WARNING, "userland calling deprecated sysctl, "
4983 "please rebuild world\n");
4984
4985 #if 1 || defined(COMPAT_PRELITE2)
4986 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
4987 if (namelen == 1)
4988 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
4989 #endif
4990
4991 switch (name[1]) {
4992 case VFS_MAXTYPENUM:
4993 if (namelen != 2)
4994 return (ENOTDIR);
4995 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
4996 case VFS_CONF:
4997 if (namelen != 3)
4998 return (ENOTDIR); /* overloaded */
4999 vfsconf_slock();
5000 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
5001 if (vfsp->vfc_typenum == name[2])
5002 break;
5003 }
5004 vfsconf_sunlock();
5005 if (vfsp == NULL)
5006 return (EOPNOTSUPP);
5007 #ifdef COMPAT_FREEBSD32
5008 if (req->flags & SCTL_MASK32)
5009 return (vfsconf2x32(req, vfsp));
5010 else
5011 #endif
5012 return (vfsconf2x(req, vfsp));
5013 }
5014 return (EOPNOTSUPP);
5015 }
5016
5017 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP |
5018 CTLFLAG_MPSAFE, vfs_sysctl,
5019 "Generic filesystem");
5020
5021 #if 1 || defined(COMPAT_PRELITE2)
5022
5023 static int
sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)5024 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
5025 {
5026 int error;
5027 struct vfsconf *vfsp;
5028 struct ovfsconf ovfs;
5029
5030 vfsconf_slock();
5031 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
5032 bzero(&ovfs, sizeof(ovfs));
5033 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
5034 strcpy(ovfs.vfc_name, vfsp->vfc_name);
5035 ovfs.vfc_index = vfsp->vfc_typenum;
5036 ovfs.vfc_refcount = vfsp->vfc_refcount;
5037 ovfs.vfc_flags = vfsp->vfc_flags;
5038 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
5039 if (error != 0) {
5040 vfsconf_sunlock();
5041 return (error);
5042 }
5043 }
5044 vfsconf_sunlock();
5045 return (0);
5046 }
5047
5048 #endif /* 1 || COMPAT_PRELITE2 */
5049 #endif /* !BURN_BRIDGES */
5050
5051 static void
unmount_or_warn(struct mount * mp)5052 unmount_or_warn(struct mount *mp)
5053 {
5054 int error;
5055
5056 error = dounmount(mp, MNT_FORCE, curthread);
5057 if (error != 0) {
5058 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
5059 if (error == EBUSY)
5060 printf("BUSY)\n");
5061 else
5062 printf("%d)\n", error);
5063 }
5064 }
5065
5066 /*
5067 * Unmount all filesystems. The list is traversed in reverse order
5068 * of mounting to avoid dependencies.
5069 */
5070 void
vfs_unmountall(void)5071 vfs_unmountall(void)
5072 {
5073 struct mount *mp, *tmp;
5074
5075 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
5076
5077 /*
5078 * Since this only runs when rebooting, it is not interlocked.
5079 */
5080 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) {
5081 vfs_ref(mp);
5082
5083 /*
5084 * Forcibly unmounting "/dev" before "/" would prevent clean
5085 * unmount of the latter.
5086 */
5087 if (mp == rootdevmp)
5088 continue;
5089
5090 unmount_or_warn(mp);
5091 }
5092
5093 if (rootdevmp != NULL)
5094 unmount_or_warn(rootdevmp);
5095 }
5096
5097 static void
vfs_deferred_inactive(struct vnode * vp,int lkflags)5098 vfs_deferred_inactive(struct vnode *vp, int lkflags)
5099 {
5100
5101 ASSERT_VI_LOCKED(vp, __func__);
5102 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
5103 if ((vp->v_iflag & VI_OWEINACT) == 0) {
5104 vdropl(vp);
5105 return;
5106 }
5107 if (vn_lock(vp, lkflags) == 0) {
5108 VI_LOCK(vp);
5109 vinactive(vp);
5110 VOP_UNLOCK(vp);
5111 vdropl(vp);
5112 return;
5113 }
5114 vdefer_inactive_unlocked(vp);
5115 }
5116
5117 static int
vfs_periodic_inactive_filter(struct vnode * vp,void * arg)5118 vfs_periodic_inactive_filter(struct vnode *vp, void *arg)
5119 {
5120
5121 return (vp->v_iflag & VI_DEFINACT);
5122 }
5123
5124 static void __noinline
vfs_periodic_inactive(struct mount * mp,int flags)5125 vfs_periodic_inactive(struct mount *mp, int flags)
5126 {
5127 struct vnode *vp, *mvp;
5128 int lkflags;
5129
5130 lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
5131 if (flags != MNT_WAIT)
5132 lkflags |= LK_NOWAIT;
5133
5134 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) {
5135 if ((vp->v_iflag & VI_DEFINACT) == 0) {
5136 VI_UNLOCK(vp);
5137 continue;
5138 }
5139 vp->v_iflag &= ~VI_DEFINACT;
5140 vfs_deferred_inactive(vp, lkflags);
5141 }
5142 }
5143
5144 static inline bool
vfs_want_msync(struct vnode * vp)5145 vfs_want_msync(struct vnode *vp)
5146 {
5147 struct vm_object *obj;
5148
5149 /*
5150 * This test may be performed without any locks held.
5151 * We rely on vm_object's type stability.
5152 */
5153 if (vp->v_vflag & VV_NOSYNC)
5154 return (false);
5155 obj = vp->v_object;
5156 return (obj != NULL && vm_object_mightbedirty(obj));
5157 }
5158
5159 static int
vfs_periodic_msync_inactive_filter(struct vnode * vp,void * arg __unused)5160 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused)
5161 {
5162
5163 if (vp->v_vflag & VV_NOSYNC)
5164 return (false);
5165 if (vp->v_iflag & VI_DEFINACT)
5166 return (true);
5167 return (vfs_want_msync(vp));
5168 }
5169
5170 static void __noinline
vfs_periodic_msync_inactive(struct mount * mp,int flags)5171 vfs_periodic_msync_inactive(struct mount *mp, int flags)
5172 {
5173 struct vnode *vp, *mvp;
5174 int lkflags;
5175 bool seen_defer;
5176
5177 lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
5178 if (flags != MNT_WAIT)
5179 lkflags |= LK_NOWAIT;
5180
5181 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) {
5182 seen_defer = false;
5183 if (vp->v_iflag & VI_DEFINACT) {
5184 vp->v_iflag &= ~VI_DEFINACT;
5185 seen_defer = true;
5186 }
5187 if (!vfs_want_msync(vp)) {
5188 if (seen_defer)
5189 vfs_deferred_inactive(vp, lkflags);
5190 else
5191 VI_UNLOCK(vp);
5192 continue;
5193 }
5194 if (vget(vp, lkflags) == 0) {
5195 if ((vp->v_vflag & VV_NOSYNC) == 0) {
5196 if (flags == MNT_WAIT)
5197 vnode_pager_clean_sync(vp);
5198 else
5199 vnode_pager_clean_async(vp);
5200 }
5201 vput(vp);
5202 if (seen_defer)
5203 vdrop(vp);
5204 } else {
5205 if (seen_defer)
5206 vdefer_inactive_unlocked(vp);
5207 }
5208 }
5209 }
5210
5211 void
vfs_periodic(struct mount * mp,int flags)5212 vfs_periodic(struct mount *mp, int flags)
5213 {
5214
5215 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
5216
5217 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0)
5218 vfs_periodic_inactive(mp, flags);
5219 else
5220 vfs_periodic_msync_inactive(mp, flags);
5221 }
5222
5223 static void
destroy_vpollinfo_free(struct vpollinfo * vi)5224 destroy_vpollinfo_free(struct vpollinfo *vi)
5225 {
5226
5227 knlist_destroy(&vi->vpi_selinfo.si_note);
5228 mtx_destroy(&vi->vpi_lock);
5229 free(vi, M_VNODEPOLL);
5230 }
5231
5232 static void
destroy_vpollinfo(struct vpollinfo * vi)5233 destroy_vpollinfo(struct vpollinfo *vi)
5234 {
5235
5236 knlist_clear(&vi->vpi_selinfo.si_note, 1);
5237 seldrain(&vi->vpi_selinfo);
5238 destroy_vpollinfo_free(vi);
5239 }
5240
5241 /*
5242 * Initialize per-vnode helper structure to hold poll-related state.
5243 */
5244 void
v_addpollinfo(struct vnode * vp)5245 v_addpollinfo(struct vnode *vp)
5246 {
5247 struct vpollinfo *vi;
5248
5249 if (vp->v_pollinfo != NULL)
5250 return;
5251 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO);
5252 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
5253 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
5254 vfs_knlunlock, vfs_knl_assert_lock);
5255 VI_LOCK(vp);
5256 if (vp->v_pollinfo != NULL) {
5257 VI_UNLOCK(vp);
5258 destroy_vpollinfo_free(vi);
5259 return;
5260 }
5261 vp->v_pollinfo = vi;
5262 VI_UNLOCK(vp);
5263 }
5264
5265 /*
5266 * Record a process's interest in events which might happen to
5267 * a vnode. Because poll uses the historic select-style interface
5268 * internally, this routine serves as both the ``check for any
5269 * pending events'' and the ``record my interest in future events''
5270 * functions. (These are done together, while the lock is held,
5271 * to avoid race conditions.)
5272 */
5273 int
vn_pollrecord(struct vnode * vp,struct thread * td,int events)5274 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
5275 {
5276
5277 v_addpollinfo(vp);
5278 mtx_lock(&vp->v_pollinfo->vpi_lock);
5279 if (vp->v_pollinfo->vpi_revents & events) {
5280 /*
5281 * This leaves events we are not interested
5282 * in available for the other process which
5283 * which presumably had requested them
5284 * (otherwise they would never have been
5285 * recorded).
5286 */
5287 events &= vp->v_pollinfo->vpi_revents;
5288 vp->v_pollinfo->vpi_revents &= ~events;
5289
5290 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5291 return (events);
5292 }
5293 vp->v_pollinfo->vpi_events |= events;
5294 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
5295 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5296 return (0);
5297 }
5298
5299 /*
5300 * Routine to create and manage a filesystem syncer vnode.
5301 */
5302 #define sync_close ((int (*)(struct vop_close_args *))nullop)
5303 static int sync_fsync(struct vop_fsync_args *);
5304 static int sync_inactive(struct vop_inactive_args *);
5305 static int sync_reclaim(struct vop_reclaim_args *);
5306
5307 static struct vop_vector sync_vnodeops = {
5308 .vop_bypass = VOP_EOPNOTSUPP,
5309 .vop_close = sync_close,
5310 .vop_fsync = sync_fsync,
5311 .vop_getwritemount = vop_stdgetwritemount,
5312 .vop_inactive = sync_inactive,
5313 .vop_need_inactive = vop_stdneed_inactive,
5314 .vop_reclaim = sync_reclaim,
5315 .vop_lock1 = vop_stdlock,
5316 .vop_unlock = vop_stdunlock,
5317 .vop_islocked = vop_stdislocked,
5318 .vop_fplookup_vexec = VOP_EAGAIN,
5319 .vop_fplookup_symlink = VOP_EAGAIN,
5320 };
5321 VFS_VOP_VECTOR_REGISTER(sync_vnodeops);
5322
5323 /*
5324 * Create a new filesystem syncer vnode for the specified mount point.
5325 */
5326 void
vfs_allocate_syncvnode(struct mount * mp)5327 vfs_allocate_syncvnode(struct mount *mp)
5328 {
5329 struct vnode *vp;
5330 struct bufobj *bo;
5331 static long start, incr, next;
5332 int error;
5333
5334 /* Allocate a new vnode */
5335 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
5336 if (error != 0)
5337 panic("vfs_allocate_syncvnode: getnewvnode() failed");
5338 vp->v_type = VNON;
5339 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5340 vp->v_vflag |= VV_FORCEINSMQ;
5341 error = insmntque1(vp, mp);
5342 if (error != 0)
5343 panic("vfs_allocate_syncvnode: insmntque() failed");
5344 vp->v_vflag &= ~VV_FORCEINSMQ;
5345 vn_set_state(vp, VSTATE_CONSTRUCTED);
5346 VOP_UNLOCK(vp);
5347 /*
5348 * Place the vnode onto the syncer worklist. We attempt to
5349 * scatter them about on the list so that they will go off
5350 * at evenly distributed times even if all the filesystems
5351 * are mounted at once.
5352 */
5353 next += incr;
5354 if (next == 0 || next > syncer_maxdelay) {
5355 start /= 2;
5356 incr /= 2;
5357 if (start == 0) {
5358 start = syncer_maxdelay / 2;
5359 incr = syncer_maxdelay;
5360 }
5361 next = start;
5362 }
5363 bo = &vp->v_bufobj;
5364 BO_LOCK(bo);
5365 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
5366 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
5367 mtx_lock(&sync_mtx);
5368 sync_vnode_count++;
5369 if (mp->mnt_syncer == NULL) {
5370 mp->mnt_syncer = vp;
5371 vp = NULL;
5372 }
5373 mtx_unlock(&sync_mtx);
5374 BO_UNLOCK(bo);
5375 if (vp != NULL) {
5376 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5377 vgone(vp);
5378 vput(vp);
5379 }
5380 }
5381
5382 void
vfs_deallocate_syncvnode(struct mount * mp)5383 vfs_deallocate_syncvnode(struct mount *mp)
5384 {
5385 struct vnode *vp;
5386
5387 mtx_lock(&sync_mtx);
5388 vp = mp->mnt_syncer;
5389 if (vp != NULL)
5390 mp->mnt_syncer = NULL;
5391 mtx_unlock(&sync_mtx);
5392 if (vp != NULL)
5393 vrele(vp);
5394 }
5395
5396 /*
5397 * Do a lazy sync of the filesystem.
5398 */
5399 static int
sync_fsync(struct vop_fsync_args * ap)5400 sync_fsync(struct vop_fsync_args *ap)
5401 {
5402 struct vnode *syncvp = ap->a_vp;
5403 struct mount *mp = syncvp->v_mount;
5404 int error, save;
5405 struct bufobj *bo;
5406
5407 /*
5408 * We only need to do something if this is a lazy evaluation.
5409 */
5410 if (ap->a_waitfor != MNT_LAZY)
5411 return (0);
5412
5413 /*
5414 * Move ourselves to the back of the sync list.
5415 */
5416 bo = &syncvp->v_bufobj;
5417 BO_LOCK(bo);
5418 vn_syncer_add_to_worklist(bo, syncdelay);
5419 BO_UNLOCK(bo);
5420
5421 /*
5422 * Walk the list of vnodes pushing all that are dirty and
5423 * not already on the sync list.
5424 */
5425 if (vfs_busy(mp, MBF_NOWAIT) != 0)
5426 return (0);
5427 VOP_UNLOCK(syncvp);
5428 save = curthread_pflags_set(TDP_SYNCIO);
5429 /*
5430 * The filesystem at hand may be idle with free vnodes stored in the
5431 * batch. Return them instead of letting them stay there indefinitely.
5432 */
5433 vfs_periodic(mp, MNT_NOWAIT);
5434 error = VFS_SYNC(mp, MNT_LAZY);
5435 curthread_pflags_restore(save);
5436 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY);
5437 vfs_unbusy(mp);
5438 return (error);
5439 }
5440
5441 /*
5442 * The syncer vnode is no referenced.
5443 */
5444 static int
sync_inactive(struct vop_inactive_args * ap)5445 sync_inactive(struct vop_inactive_args *ap)
5446 {
5447
5448 vgone(ap->a_vp);
5449 return (0);
5450 }
5451
5452 /*
5453 * The syncer vnode is no longer needed and is being decommissioned.
5454 *
5455 * Modifications to the worklist must be protected by sync_mtx.
5456 */
5457 static int
sync_reclaim(struct vop_reclaim_args * ap)5458 sync_reclaim(struct vop_reclaim_args *ap)
5459 {
5460 struct vnode *vp = ap->a_vp;
5461 struct bufobj *bo;
5462
5463 bo = &vp->v_bufobj;
5464 BO_LOCK(bo);
5465 mtx_lock(&sync_mtx);
5466 if (vp->v_mount->mnt_syncer == vp)
5467 vp->v_mount->mnt_syncer = NULL;
5468 if (bo->bo_flag & BO_ONWORKLST) {
5469 LIST_REMOVE(bo, bo_synclist);
5470 syncer_worklist_len--;
5471 sync_vnode_count--;
5472 bo->bo_flag &= ~BO_ONWORKLST;
5473 }
5474 mtx_unlock(&sync_mtx);
5475 BO_UNLOCK(bo);
5476
5477 return (0);
5478 }
5479
5480 int
vn_need_pageq_flush(struct vnode * vp)5481 vn_need_pageq_flush(struct vnode *vp)
5482 {
5483 struct vm_object *obj;
5484
5485 obj = vp->v_object;
5486 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
5487 vm_object_mightbedirty(obj));
5488 }
5489
5490 /*
5491 * Check if vnode represents a disk device
5492 */
5493 bool
vn_isdisk_error(struct vnode * vp,int * errp)5494 vn_isdisk_error(struct vnode *vp, int *errp)
5495 {
5496 int error;
5497
5498 if (vp->v_type != VCHR) {
5499 error = ENOTBLK;
5500 goto out;
5501 }
5502 error = 0;
5503 dev_lock();
5504 if (vp->v_rdev == NULL)
5505 error = ENXIO;
5506 else if (vp->v_rdev->si_devsw == NULL)
5507 error = ENXIO;
5508 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
5509 error = ENOTBLK;
5510 dev_unlock();
5511 out:
5512 *errp = error;
5513 return (error == 0);
5514 }
5515
5516 bool
vn_isdisk(struct vnode * vp)5517 vn_isdisk(struct vnode *vp)
5518 {
5519 int error;
5520
5521 return (vn_isdisk_error(vp, &error));
5522 }
5523
5524 /*
5525 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
5526 * the comment above cache_fplookup for details.
5527 */
5528 int
vaccess_vexec_smr(mode_t file_mode,uid_t file_uid,gid_t file_gid,struct ucred * cred)5529 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred)
5530 {
5531 int error;
5532
5533 VFS_SMR_ASSERT_ENTERED();
5534
5535 /* Check the owner. */
5536 if (cred->cr_uid == file_uid) {
5537 if (file_mode & S_IXUSR)
5538 return (0);
5539 goto out_error;
5540 }
5541
5542 /* Otherwise, check the groups (first match) */
5543 if (groupmember(file_gid, cred)) {
5544 if (file_mode & S_IXGRP)
5545 return (0);
5546 goto out_error;
5547 }
5548
5549 /* Otherwise, check everyone else. */
5550 if (file_mode & S_IXOTH)
5551 return (0);
5552 out_error:
5553 /*
5554 * Permission check failed, but it is possible denial will get overwritten
5555 * (e.g., when root is traversing through a 700 directory owned by someone
5556 * else).
5557 *
5558 * vaccess() calls priv_check_cred which in turn can descent into MAC
5559 * modules overriding this result. It's quite unclear what semantics
5560 * are allowed for them to operate, thus for safety we don't call them
5561 * from within the SMR section. This also means if any such modules
5562 * are present, we have to let the regular lookup decide.
5563 */
5564 error = priv_check_cred_vfs_lookup_nomac(cred);
5565 switch (error) {
5566 case 0:
5567 return (0);
5568 case EAGAIN:
5569 /*
5570 * MAC modules present.
5571 */
5572 return (EAGAIN);
5573 case EPERM:
5574 return (EACCES);
5575 default:
5576 return (error);
5577 }
5578 }
5579
5580 /*
5581 * Common filesystem object access control check routine. Accepts a
5582 * vnode's type, "mode", uid and gid, requested access mode, and credentials.
5583 * Returns 0 on success, or an errno on failure.
5584 */
5585 int
vaccess(__enum_uint8 (vtype)type,mode_t file_mode,uid_t file_uid,gid_t file_gid,accmode_t accmode,struct ucred * cred)5586 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
5587 accmode_t accmode, struct ucred *cred)
5588 {
5589 accmode_t dac_granted;
5590 accmode_t priv_granted;
5591
5592 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
5593 ("invalid bit in accmode"));
5594 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
5595 ("VAPPEND without VWRITE"));
5596
5597 /*
5598 * Look for a normal, non-privileged way to access the file/directory
5599 * as requested. If it exists, go with that.
5600 */
5601
5602 dac_granted = 0;
5603
5604 /* Check the owner. */
5605 if (cred->cr_uid == file_uid) {
5606 dac_granted |= VADMIN;
5607 if (file_mode & S_IXUSR)
5608 dac_granted |= VEXEC;
5609 if (file_mode & S_IRUSR)
5610 dac_granted |= VREAD;
5611 if (file_mode & S_IWUSR)
5612 dac_granted |= (VWRITE | VAPPEND);
5613
5614 if ((accmode & dac_granted) == accmode)
5615 return (0);
5616
5617 goto privcheck;
5618 }
5619
5620 /* Otherwise, check the groups (first match) */
5621 if (groupmember(file_gid, cred)) {
5622 if (file_mode & S_IXGRP)
5623 dac_granted |= VEXEC;
5624 if (file_mode & S_IRGRP)
5625 dac_granted |= VREAD;
5626 if (file_mode & S_IWGRP)
5627 dac_granted |= (VWRITE | VAPPEND);
5628
5629 if ((accmode & dac_granted) == accmode)
5630 return (0);
5631
5632 goto privcheck;
5633 }
5634
5635 /* Otherwise, check everyone else. */
5636 if (file_mode & S_IXOTH)
5637 dac_granted |= VEXEC;
5638 if (file_mode & S_IROTH)
5639 dac_granted |= VREAD;
5640 if (file_mode & S_IWOTH)
5641 dac_granted |= (VWRITE | VAPPEND);
5642 if ((accmode & dac_granted) == accmode)
5643 return (0);
5644
5645 privcheck:
5646 /*
5647 * Build a privilege mask to determine if the set of privileges
5648 * satisfies the requirements when combined with the granted mask
5649 * from above. For each privilege, if the privilege is required,
5650 * bitwise or the request type onto the priv_granted mask.
5651 */
5652 priv_granted = 0;
5653
5654 if (type == VDIR) {
5655 /*
5656 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
5657 * requests, instead of PRIV_VFS_EXEC.
5658 */
5659 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
5660 !priv_check_cred(cred, PRIV_VFS_LOOKUP))
5661 priv_granted |= VEXEC;
5662 } else {
5663 /*
5664 * Ensure that at least one execute bit is on. Otherwise,
5665 * a privileged user will always succeed, and we don't want
5666 * this to happen unless the file really is executable.
5667 */
5668 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
5669 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
5670 !priv_check_cred(cred, PRIV_VFS_EXEC))
5671 priv_granted |= VEXEC;
5672 }
5673
5674 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
5675 !priv_check_cred(cred, PRIV_VFS_READ))
5676 priv_granted |= VREAD;
5677
5678 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
5679 !priv_check_cred(cred, PRIV_VFS_WRITE))
5680 priv_granted |= (VWRITE | VAPPEND);
5681
5682 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
5683 !priv_check_cred(cred, PRIV_VFS_ADMIN))
5684 priv_granted |= VADMIN;
5685
5686 if ((accmode & (priv_granted | dac_granted)) == accmode) {
5687 return (0);
5688 }
5689
5690 return ((accmode & VADMIN) ? EPERM : EACCES);
5691 }
5692
5693 /*
5694 * Credential check based on process requesting service, and per-attribute
5695 * permissions.
5696 */
5697 int
extattr_check_cred(struct vnode * vp,int attrnamespace,struct ucred * cred,struct thread * td,accmode_t accmode)5698 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
5699 struct thread *td, accmode_t accmode)
5700 {
5701
5702 /*
5703 * Kernel-invoked always succeeds.
5704 */
5705 if (cred == NOCRED)
5706 return (0);
5707
5708 /*
5709 * Do not allow privileged processes in jail to directly manipulate
5710 * system attributes.
5711 */
5712 switch (attrnamespace) {
5713 case EXTATTR_NAMESPACE_SYSTEM:
5714 /* Potentially should be: return (EPERM); */
5715 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM));
5716 case EXTATTR_NAMESPACE_USER:
5717 return (VOP_ACCESS(vp, accmode, cred, td));
5718 default:
5719 return (EPERM);
5720 }
5721 }
5722
5723 #ifdef DEBUG_VFS_LOCKS
5724 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
5725 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
5726 "Drop into debugger on lock violation");
5727
5728 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */
5729 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
5730 0, "Check for interlock across VOPs");
5731
5732 int vfs_badlock_print = 1; /* Print lock violations. */
5733 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
5734 0, "Print lock violations");
5735
5736 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */
5737 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode,
5738 0, "Print vnode details on lock violations");
5739
5740 #ifdef KDB
5741 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */
5742 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
5743 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
5744 #endif
5745
5746 static void
vfs_badlock(const char * msg,const char * str,struct vnode * vp)5747 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
5748 {
5749
5750 #ifdef KDB
5751 if (vfs_badlock_backtrace)
5752 kdb_backtrace();
5753 #endif
5754 if (vfs_badlock_vnode)
5755 vn_printf(vp, "vnode ");
5756 if (vfs_badlock_print)
5757 printf("%s: %p %s\n", str, (void *)vp, msg);
5758 if (vfs_badlock_ddb)
5759 kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
5760 }
5761
5762 void
assert_vi_locked(struct vnode * vp,const char * str)5763 assert_vi_locked(struct vnode *vp, const char *str)
5764 {
5765
5766 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
5767 vfs_badlock("interlock is not locked but should be", str, vp);
5768 }
5769
5770 void
assert_vi_unlocked(struct vnode * vp,const char * str)5771 assert_vi_unlocked(struct vnode *vp, const char *str)
5772 {
5773
5774 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
5775 vfs_badlock("interlock is locked but should not be", str, vp);
5776 }
5777
5778 void
assert_vop_locked(struct vnode * vp,const char * str)5779 assert_vop_locked(struct vnode *vp, const char *str)
5780 {
5781 if (KERNEL_PANICKED() || vp == NULL)
5782 return;
5783
5784 #ifdef WITNESS
5785 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5786 witness_is_owned(&vp->v_vnlock->lock_object) == -1)
5787 #else
5788 int locked = VOP_ISLOCKED(vp);
5789 if (locked == 0 || locked == LK_EXCLOTHER)
5790 #endif
5791 vfs_badlock("is not locked but should be", str, vp);
5792 }
5793
5794 void
assert_vop_unlocked(struct vnode * vp,const char * str)5795 assert_vop_unlocked(struct vnode *vp, const char *str)
5796 {
5797 if (KERNEL_PANICKED() || vp == NULL)
5798 return;
5799
5800 #ifdef WITNESS
5801 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5802 witness_is_owned(&vp->v_vnlock->lock_object) == 1)
5803 #else
5804 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
5805 #endif
5806 vfs_badlock("is locked but should not be", str, vp);
5807 }
5808
5809 void
assert_vop_elocked(struct vnode * vp,const char * str)5810 assert_vop_elocked(struct vnode *vp, const char *str)
5811 {
5812 if (KERNEL_PANICKED() || vp == NULL)
5813 return;
5814
5815 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
5816 vfs_badlock("is not exclusive locked but should be", str, vp);
5817 }
5818 #endif /* DEBUG_VFS_LOCKS */
5819
5820 void
vop_rename_fail(struct vop_rename_args * ap)5821 vop_rename_fail(struct vop_rename_args *ap)
5822 {
5823
5824 if (ap->a_tvp != NULL)
5825 vput(ap->a_tvp);
5826 if (ap->a_tdvp == ap->a_tvp)
5827 vrele(ap->a_tdvp);
5828 else
5829 vput(ap->a_tdvp);
5830 vrele(ap->a_fdvp);
5831 vrele(ap->a_fvp);
5832 }
5833
5834 void
vop_rename_pre(void * ap)5835 vop_rename_pre(void *ap)
5836 {
5837 struct vop_rename_args *a = ap;
5838
5839 #ifdef DEBUG_VFS_LOCKS
5840 if (a->a_tvp)
5841 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
5842 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
5843 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
5844 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
5845
5846 /* Check the source (from). */
5847 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
5848 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
5849 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
5850 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
5851 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
5852
5853 /* Check the target. */
5854 if (a->a_tvp)
5855 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
5856 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
5857 #endif
5858 /*
5859 * It may be tempting to add vn_seqc_write_begin/end calls here and
5860 * in vop_rename_post but that's not going to work out since some
5861 * filesystems relookup vnodes mid-rename. This is probably a bug.
5862 *
5863 * For now filesystems are expected to do the relevant calls after they
5864 * decide what vnodes to operate on.
5865 */
5866 if (a->a_tdvp != a->a_fdvp)
5867 vhold(a->a_fdvp);
5868 if (a->a_tvp != a->a_fvp)
5869 vhold(a->a_fvp);
5870 vhold(a->a_tdvp);
5871 if (a->a_tvp)
5872 vhold(a->a_tvp);
5873 }
5874
5875 #ifdef DEBUG_VFS_LOCKS
5876 void
vop_fplookup_vexec_debugpre(void * ap __unused)5877 vop_fplookup_vexec_debugpre(void *ap __unused)
5878 {
5879
5880 VFS_SMR_ASSERT_ENTERED();
5881 }
5882
5883 void
vop_fplookup_vexec_debugpost(void * ap,int rc)5884 vop_fplookup_vexec_debugpost(void *ap, int rc)
5885 {
5886 struct vop_fplookup_vexec_args *a;
5887 struct vnode *vp;
5888
5889 a = ap;
5890 vp = a->a_vp;
5891
5892 VFS_SMR_ASSERT_ENTERED();
5893 if (rc == EOPNOTSUPP)
5894 VNPASS(VN_IS_DOOMED(vp), vp);
5895 }
5896
5897 void
vop_fplookup_symlink_debugpre(void * ap __unused)5898 vop_fplookup_symlink_debugpre(void *ap __unused)
5899 {
5900
5901 VFS_SMR_ASSERT_ENTERED();
5902 }
5903
5904 void
vop_fplookup_symlink_debugpost(void * ap __unused,int rc __unused)5905 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused)
5906 {
5907
5908 VFS_SMR_ASSERT_ENTERED();
5909 }
5910
5911 static void
vop_fsync_debugprepost(struct vnode * vp,const char * name)5912 vop_fsync_debugprepost(struct vnode *vp, const char *name)
5913 {
5914 if (vp->v_type == VCHR)
5915 ;
5916 /*
5917 * The shared vs. exclusive locking policy for fsync()
5918 * is actually determined by vp's write mount as indicated
5919 * by VOP_GETWRITEMOUNT(), which for stacked filesystems
5920 * may not be the same as vp->v_mount. However, if the
5921 * underlying filesystem which really handles the fsync()
5922 * supports shared locking, the stacked filesystem must also
5923 * be prepared for its VOP_FSYNC() operation to be called
5924 * with only a shared lock. On the other hand, if the
5925 * stacked filesystem claims support for shared write
5926 * locking but the underlying filesystem does not, and the
5927 * caller incorrectly uses a shared lock, this condition
5928 * should still be caught when the stacked filesystem
5929 * invokes VOP_FSYNC() on the underlying filesystem.
5930 */
5931 else if (MNT_SHARED_WRITES(vp->v_mount))
5932 ASSERT_VOP_LOCKED(vp, name);
5933 else
5934 ASSERT_VOP_ELOCKED(vp, name);
5935 }
5936
5937 void
vop_fsync_debugpre(void * a)5938 vop_fsync_debugpre(void *a)
5939 {
5940 struct vop_fsync_args *ap;
5941
5942 ap = a;
5943 vop_fsync_debugprepost(ap->a_vp, "fsync");
5944 }
5945
5946 void
vop_fsync_debugpost(void * a,int rc __unused)5947 vop_fsync_debugpost(void *a, int rc __unused)
5948 {
5949 struct vop_fsync_args *ap;
5950
5951 ap = a;
5952 vop_fsync_debugprepost(ap->a_vp, "fsync");
5953 }
5954
5955 void
vop_fdatasync_debugpre(void * a)5956 vop_fdatasync_debugpre(void *a)
5957 {
5958 struct vop_fdatasync_args *ap;
5959
5960 ap = a;
5961 vop_fsync_debugprepost(ap->a_vp, "fsync");
5962 }
5963
5964 void
vop_fdatasync_debugpost(void * a,int rc __unused)5965 vop_fdatasync_debugpost(void *a, int rc __unused)
5966 {
5967 struct vop_fdatasync_args *ap;
5968
5969 ap = a;
5970 vop_fsync_debugprepost(ap->a_vp, "fsync");
5971 }
5972
5973 void
vop_strategy_debugpre(void * ap)5974 vop_strategy_debugpre(void *ap)
5975 {
5976 struct vop_strategy_args *a;
5977 struct buf *bp;
5978
5979 a = ap;
5980 bp = a->a_bp;
5981
5982 /*
5983 * Cluster ops lock their component buffers but not the IO container.
5984 */
5985 if ((bp->b_flags & B_CLUSTER) != 0)
5986 return;
5987
5988 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) {
5989 if (vfs_badlock_print)
5990 printf(
5991 "VOP_STRATEGY: bp is not locked but should be\n");
5992 if (vfs_badlock_ddb)
5993 kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
5994 }
5995 }
5996
5997 void
vop_lock_debugpre(void * ap)5998 vop_lock_debugpre(void *ap)
5999 {
6000 struct vop_lock1_args *a = ap;
6001
6002 if ((a->a_flags & LK_INTERLOCK) == 0)
6003 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
6004 else
6005 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
6006 }
6007
6008 void
vop_lock_debugpost(void * ap,int rc)6009 vop_lock_debugpost(void *ap, int rc)
6010 {
6011 struct vop_lock1_args *a = ap;
6012
6013 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
6014 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
6015 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
6016 }
6017
6018 void
vop_unlock_debugpre(void * ap)6019 vop_unlock_debugpre(void *ap)
6020 {
6021 struct vop_unlock_args *a = ap;
6022 struct vnode *vp = a->a_vp;
6023
6024 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp);
6025 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK");
6026 }
6027
6028 void
vop_need_inactive_debugpre(void * ap)6029 vop_need_inactive_debugpre(void *ap)
6030 {
6031 struct vop_need_inactive_args *a = ap;
6032
6033 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE");
6034 }
6035
6036 void
vop_need_inactive_debugpost(void * ap,int rc)6037 vop_need_inactive_debugpost(void *ap, int rc)
6038 {
6039 struct vop_need_inactive_args *a = ap;
6040
6041 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE");
6042 }
6043 #endif
6044
6045 void
vop_create_pre(void * ap)6046 vop_create_pre(void *ap)
6047 {
6048 struct vop_create_args *a;
6049 struct vnode *dvp;
6050
6051 a = ap;
6052 dvp = a->a_dvp;
6053 vn_seqc_write_begin(dvp);
6054 }
6055
6056 void
vop_create_post(void * ap,int rc)6057 vop_create_post(void *ap, int rc)
6058 {
6059 struct vop_create_args *a;
6060 struct vnode *dvp;
6061
6062 a = ap;
6063 dvp = a->a_dvp;
6064 vn_seqc_write_end(dvp);
6065 if (!rc)
6066 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6067 }
6068
6069 void
vop_whiteout_pre(void * ap)6070 vop_whiteout_pre(void *ap)
6071 {
6072 struct vop_whiteout_args *a;
6073 struct vnode *dvp;
6074
6075 a = ap;
6076 dvp = a->a_dvp;
6077 vn_seqc_write_begin(dvp);
6078 }
6079
6080 void
vop_whiteout_post(void * ap,int rc)6081 vop_whiteout_post(void *ap, int rc)
6082 {
6083 struct vop_whiteout_args *a;
6084 struct vnode *dvp;
6085
6086 a = ap;
6087 dvp = a->a_dvp;
6088 vn_seqc_write_end(dvp);
6089 }
6090
6091 void
vop_deleteextattr_pre(void * ap)6092 vop_deleteextattr_pre(void *ap)
6093 {
6094 struct vop_deleteextattr_args *a;
6095 struct vnode *vp;
6096
6097 a = ap;
6098 vp = a->a_vp;
6099 vn_seqc_write_begin(vp);
6100 }
6101
6102 void
vop_deleteextattr_post(void * ap,int rc)6103 vop_deleteextattr_post(void *ap, int rc)
6104 {
6105 struct vop_deleteextattr_args *a;
6106 struct vnode *vp;
6107
6108 a = ap;
6109 vp = a->a_vp;
6110 vn_seqc_write_end(vp);
6111 if (!rc)
6112 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
6113 }
6114
6115 void
vop_link_pre(void * ap)6116 vop_link_pre(void *ap)
6117 {
6118 struct vop_link_args *a;
6119 struct vnode *vp, *tdvp;
6120
6121 a = ap;
6122 vp = a->a_vp;
6123 tdvp = a->a_tdvp;
6124 vn_seqc_write_begin(vp);
6125 vn_seqc_write_begin(tdvp);
6126 }
6127
6128 void
vop_link_post(void * ap,int rc)6129 vop_link_post(void *ap, int rc)
6130 {
6131 struct vop_link_args *a;
6132 struct vnode *vp, *tdvp;
6133
6134 a = ap;
6135 vp = a->a_vp;
6136 tdvp = a->a_tdvp;
6137 vn_seqc_write_end(vp);
6138 vn_seqc_write_end(tdvp);
6139 if (!rc) {
6140 VFS_KNOTE_LOCKED(vp, NOTE_LINK);
6141 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE);
6142 }
6143 }
6144
6145 void
vop_mkdir_pre(void * ap)6146 vop_mkdir_pre(void *ap)
6147 {
6148 struct vop_mkdir_args *a;
6149 struct vnode *dvp;
6150
6151 a = ap;
6152 dvp = a->a_dvp;
6153 vn_seqc_write_begin(dvp);
6154 }
6155
6156 void
vop_mkdir_post(void * ap,int rc)6157 vop_mkdir_post(void *ap, int rc)
6158 {
6159 struct vop_mkdir_args *a;
6160 struct vnode *dvp;
6161
6162 a = ap;
6163 dvp = a->a_dvp;
6164 vn_seqc_write_end(dvp);
6165 if (!rc)
6166 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK);
6167 }
6168
6169 #ifdef DEBUG_VFS_LOCKS
6170 void
vop_mkdir_debugpost(void * ap,int rc)6171 vop_mkdir_debugpost(void *ap, int rc)
6172 {
6173 struct vop_mkdir_args *a;
6174
6175 a = ap;
6176 if (!rc)
6177 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp);
6178 }
6179 #endif
6180
6181 void
vop_mknod_pre(void * ap)6182 vop_mknod_pre(void *ap)
6183 {
6184 struct vop_mknod_args *a;
6185 struct vnode *dvp;
6186
6187 a = ap;
6188 dvp = a->a_dvp;
6189 vn_seqc_write_begin(dvp);
6190 }
6191
6192 void
vop_mknod_post(void * ap,int rc)6193 vop_mknod_post(void *ap, int rc)
6194 {
6195 struct vop_mknod_args *a;
6196 struct vnode *dvp;
6197
6198 a = ap;
6199 dvp = a->a_dvp;
6200 vn_seqc_write_end(dvp);
6201 if (!rc)
6202 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6203 }
6204
6205 void
vop_reclaim_post(void * ap,int rc)6206 vop_reclaim_post(void *ap, int rc)
6207 {
6208 struct vop_reclaim_args *a;
6209 struct vnode *vp;
6210
6211 a = ap;
6212 vp = a->a_vp;
6213 ASSERT_VOP_IN_SEQC(vp);
6214 if (!rc)
6215 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE);
6216 }
6217
6218 void
vop_remove_pre(void * ap)6219 vop_remove_pre(void *ap)
6220 {
6221 struct vop_remove_args *a;
6222 struct vnode *dvp, *vp;
6223
6224 a = ap;
6225 dvp = a->a_dvp;
6226 vp = a->a_vp;
6227 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_UNLINK);
6228 vn_seqc_write_begin(dvp);
6229 vn_seqc_write_begin(vp);
6230 }
6231
6232 void
vop_remove_post(void * ap,int rc)6233 vop_remove_post(void *ap, int rc)
6234 {
6235 struct vop_remove_args *a;
6236 struct vnode *dvp, *vp;
6237
6238 a = ap;
6239 dvp = a->a_dvp;
6240 vp = a->a_vp;
6241 vn_seqc_write_end(dvp);
6242 vn_seqc_write_end(vp);
6243 if (!rc) {
6244 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6245 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6246 }
6247 }
6248
6249 void
vop_rename_post(void * ap,int rc)6250 vop_rename_post(void *ap, int rc)
6251 {
6252 struct vop_rename_args *a = ap;
6253 long hint;
6254
6255 if (!rc) {
6256 hint = NOTE_WRITE;
6257 if (a->a_fdvp == a->a_tdvp) {
6258 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR)
6259 hint |= NOTE_LINK;
6260 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
6261 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
6262 } else {
6263 hint |= NOTE_EXTEND;
6264 if (a->a_fvp->v_type == VDIR)
6265 hint |= NOTE_LINK;
6266 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
6267
6268 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL &&
6269 a->a_tvp->v_type == VDIR)
6270 hint &= ~NOTE_LINK;
6271 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
6272 }
6273
6274 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
6275 if (a->a_tvp)
6276 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
6277 }
6278 if (a->a_tdvp != a->a_fdvp)
6279 vdrop(a->a_fdvp);
6280 if (a->a_tvp != a->a_fvp)
6281 vdrop(a->a_fvp);
6282 vdrop(a->a_tdvp);
6283 if (a->a_tvp)
6284 vdrop(a->a_tvp);
6285 }
6286
6287 void
vop_rmdir_pre(void * ap)6288 vop_rmdir_pre(void *ap)
6289 {
6290 struct vop_rmdir_args *a;
6291 struct vnode *dvp, *vp;
6292
6293 a = ap;
6294 dvp = a->a_dvp;
6295 vp = a->a_vp;
6296 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_UNLINK);
6297 vn_seqc_write_begin(dvp);
6298 vn_seqc_write_begin(vp);
6299 }
6300
6301 void
vop_rmdir_post(void * ap,int rc)6302 vop_rmdir_post(void *ap, int rc)
6303 {
6304 struct vop_rmdir_args *a;
6305 struct vnode *dvp, *vp;
6306
6307 a = ap;
6308 dvp = a->a_dvp;
6309 vp = a->a_vp;
6310 vn_seqc_write_end(dvp);
6311 vn_seqc_write_end(vp);
6312 if (!rc) {
6313 vp->v_vflag |= VV_UNLINKED;
6314 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK);
6315 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6316 }
6317 }
6318
6319 void
vop_setattr_pre(void * ap)6320 vop_setattr_pre(void *ap)
6321 {
6322 struct vop_setattr_args *a;
6323 struct vnode *vp;
6324
6325 a = ap;
6326 vp = a->a_vp;
6327 vn_seqc_write_begin(vp);
6328 }
6329
6330 void
vop_setattr_post(void * ap,int rc)6331 vop_setattr_post(void *ap, int rc)
6332 {
6333 struct vop_setattr_args *a;
6334 struct vnode *vp;
6335
6336 a = ap;
6337 vp = a->a_vp;
6338 vn_seqc_write_end(vp);
6339 if (!rc)
6340 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6341 }
6342
6343 void
vop_setacl_pre(void * ap)6344 vop_setacl_pre(void *ap)
6345 {
6346 struct vop_setacl_args *a;
6347 struct vnode *vp;
6348
6349 a = ap;
6350 vp = a->a_vp;
6351 vn_seqc_write_begin(vp);
6352 }
6353
6354 void
vop_setacl_post(void * ap,int rc __unused)6355 vop_setacl_post(void *ap, int rc __unused)
6356 {
6357 struct vop_setacl_args *a;
6358 struct vnode *vp;
6359
6360 a = ap;
6361 vp = a->a_vp;
6362 vn_seqc_write_end(vp);
6363 }
6364
6365 void
vop_setextattr_pre(void * ap)6366 vop_setextattr_pre(void *ap)
6367 {
6368 struct vop_setextattr_args *a;
6369 struct vnode *vp;
6370
6371 a = ap;
6372 vp = a->a_vp;
6373 vn_seqc_write_begin(vp);
6374 }
6375
6376 void
vop_setextattr_post(void * ap,int rc)6377 vop_setextattr_post(void *ap, int rc)
6378 {
6379 struct vop_setextattr_args *a;
6380 struct vnode *vp;
6381
6382 a = ap;
6383 vp = a->a_vp;
6384 vn_seqc_write_end(vp);
6385 if (!rc)
6386 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6387 }
6388
6389 void
vop_symlink_pre(void * ap)6390 vop_symlink_pre(void *ap)
6391 {
6392 struct vop_symlink_args *a;
6393 struct vnode *dvp;
6394
6395 a = ap;
6396 dvp = a->a_dvp;
6397 vn_seqc_write_begin(dvp);
6398 }
6399
6400 void
vop_symlink_post(void * ap,int rc)6401 vop_symlink_post(void *ap, int rc)
6402 {
6403 struct vop_symlink_args *a;
6404 struct vnode *dvp;
6405
6406 a = ap;
6407 dvp = a->a_dvp;
6408 vn_seqc_write_end(dvp);
6409 if (!rc)
6410 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6411 }
6412
6413 void
vop_open_post(void * ap,int rc)6414 vop_open_post(void *ap, int rc)
6415 {
6416 struct vop_open_args *a = ap;
6417
6418 if (!rc)
6419 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN);
6420 }
6421
6422 void
vop_close_post(void * ap,int rc)6423 vop_close_post(void *ap, int rc)
6424 {
6425 struct vop_close_args *a = ap;
6426
6427 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */
6428 !VN_IS_DOOMED(a->a_vp))) {
6429 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ?
6430 NOTE_CLOSE_WRITE : NOTE_CLOSE);
6431 }
6432 }
6433
6434 void
vop_read_post(void * ap,int rc)6435 vop_read_post(void *ap, int rc)
6436 {
6437 struct vop_read_args *a = ap;
6438
6439 if (!rc)
6440 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
6441 }
6442
6443 void
vop_read_pgcache_post(void * ap,int rc)6444 vop_read_pgcache_post(void *ap, int rc)
6445 {
6446 struct vop_read_pgcache_args *a = ap;
6447
6448 if (!rc)
6449 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ);
6450 }
6451
6452 void
vop_readdir_post(void * ap,int rc)6453 vop_readdir_post(void *ap, int rc)
6454 {
6455 struct vop_readdir_args *a = ap;
6456
6457 if (!rc)
6458 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
6459 }
6460
6461 static struct knlist fs_knlist;
6462
6463 static void
vfs_event_init(void * arg)6464 vfs_event_init(void *arg)
6465 {
6466 knlist_init_mtx(&fs_knlist, NULL);
6467 }
6468 /* XXX - correct order? */
6469 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
6470
6471 void
vfs_event_signal(fsid_t * fsid,uint32_t event,intptr_t data __unused)6472 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
6473 {
6474
6475 KNOTE_UNLOCKED(&fs_knlist, event);
6476 }
6477
6478 static int filt_fsattach(struct knote *kn);
6479 static void filt_fsdetach(struct knote *kn);
6480 static int filt_fsevent(struct knote *kn, long hint);
6481
6482 const struct filterops fs_filtops = {
6483 .f_isfd = 0,
6484 .f_attach = filt_fsattach,
6485 .f_detach = filt_fsdetach,
6486 .f_event = filt_fsevent
6487 };
6488
6489 static int
filt_fsattach(struct knote * kn)6490 filt_fsattach(struct knote *kn)
6491 {
6492
6493 kn->kn_flags |= EV_CLEAR;
6494 knlist_add(&fs_knlist, kn, 0);
6495 return (0);
6496 }
6497
6498 static void
filt_fsdetach(struct knote * kn)6499 filt_fsdetach(struct knote *kn)
6500 {
6501
6502 knlist_remove(&fs_knlist, kn, 0);
6503 }
6504
6505 static int
filt_fsevent(struct knote * kn,long hint)6506 filt_fsevent(struct knote *kn, long hint)
6507 {
6508
6509 kn->kn_fflags |= kn->kn_sfflags & hint;
6510
6511 return (kn->kn_fflags != 0);
6512 }
6513
6514 static int
sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)6515 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
6516 {
6517 struct vfsidctl vc;
6518 int error;
6519 struct mount *mp;
6520
6521 error = SYSCTL_IN(req, &vc, sizeof(vc));
6522 if (error)
6523 return (error);
6524 if (vc.vc_vers != VFS_CTL_VERS1)
6525 return (EINVAL);
6526 mp = vfs_getvfs(&vc.vc_fsid);
6527 if (mp == NULL)
6528 return (ENOENT);
6529 /* ensure that a specific sysctl goes to the right filesystem. */
6530 if (strcmp(vc.vc_fstypename, "*") != 0 &&
6531 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
6532 vfs_rel(mp);
6533 return (EINVAL);
6534 }
6535 VCTLTOREQ(&vc, req);
6536 error = VFS_SYSCTL(mp, vc.vc_op, req);
6537 vfs_rel(mp);
6538 return (error);
6539 }
6540
6541 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR,
6542 NULL, 0, sysctl_vfs_ctl, "",
6543 "Sysctl by fsid");
6544
6545 /*
6546 * Function to initialize a va_filerev field sensibly.
6547 * XXX: Wouldn't a random number make a lot more sense ??
6548 */
6549 u_quad_t
init_va_filerev(void)6550 init_va_filerev(void)
6551 {
6552 struct bintime bt;
6553
6554 getbinuptime(&bt);
6555 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
6556 }
6557
6558 static int filt_vfsread(struct knote *kn, long hint);
6559 static int filt_vfswrite(struct knote *kn, long hint);
6560 static int filt_vfsvnode(struct knote *kn, long hint);
6561 static void filt_vfsdetach(struct knote *kn);
6562 static const struct filterops vfsread_filtops = {
6563 .f_isfd = 1,
6564 .f_detach = filt_vfsdetach,
6565 .f_event = filt_vfsread
6566 };
6567 static const struct filterops vfswrite_filtops = {
6568 .f_isfd = 1,
6569 .f_detach = filt_vfsdetach,
6570 .f_event = filt_vfswrite
6571 };
6572 static const struct filterops vfsvnode_filtops = {
6573 .f_isfd = 1,
6574 .f_detach = filt_vfsdetach,
6575 .f_event = filt_vfsvnode
6576 };
6577
6578 static void
vfs_knllock(void * arg)6579 vfs_knllock(void *arg)
6580 {
6581 struct vnode *vp = arg;
6582
6583 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6584 }
6585
6586 static void
vfs_knlunlock(void * arg)6587 vfs_knlunlock(void *arg)
6588 {
6589 struct vnode *vp = arg;
6590
6591 VOP_UNLOCK(vp);
6592 }
6593
6594 static void
vfs_knl_assert_lock(void * arg,int what)6595 vfs_knl_assert_lock(void *arg, int what)
6596 {
6597 #ifdef DEBUG_VFS_LOCKS
6598 struct vnode *vp = arg;
6599
6600 if (what == LA_LOCKED)
6601 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
6602 else
6603 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
6604 #endif
6605 }
6606
6607 int
vfs_kqfilter(struct vop_kqfilter_args * ap)6608 vfs_kqfilter(struct vop_kqfilter_args *ap)
6609 {
6610 struct vnode *vp = ap->a_vp;
6611 struct knote *kn = ap->a_kn;
6612 struct knlist *knl;
6613
6614 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ &&
6615 kn->kn_filter != EVFILT_WRITE),
6616 ("READ/WRITE filter on a FIFO leaked through"));
6617 switch (kn->kn_filter) {
6618 case EVFILT_READ:
6619 kn->kn_fop = &vfsread_filtops;
6620 break;
6621 case EVFILT_WRITE:
6622 kn->kn_fop = &vfswrite_filtops;
6623 break;
6624 case EVFILT_VNODE:
6625 kn->kn_fop = &vfsvnode_filtops;
6626 break;
6627 default:
6628 return (EINVAL);
6629 }
6630
6631 kn->kn_hook = (caddr_t)vp;
6632
6633 v_addpollinfo(vp);
6634 if (vp->v_pollinfo == NULL)
6635 return (ENOMEM);
6636 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
6637 vhold(vp);
6638 knlist_add(knl, kn, 0);
6639
6640 return (0);
6641 }
6642
6643 /*
6644 * Detach knote from vnode
6645 */
6646 static void
filt_vfsdetach(struct knote * kn)6647 filt_vfsdetach(struct knote *kn)
6648 {
6649 struct vnode *vp = (struct vnode *)kn->kn_hook;
6650
6651 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
6652 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
6653 vdrop(vp);
6654 }
6655
6656 /*ARGSUSED*/
6657 static int
filt_vfsread(struct knote * kn,long hint)6658 filt_vfsread(struct knote *kn, long hint)
6659 {
6660 struct vnode *vp = (struct vnode *)kn->kn_hook;
6661 off_t size;
6662 int res;
6663
6664 /*
6665 * filesystem is gone, so set the EOF flag and schedule
6666 * the knote for deletion.
6667 */
6668 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6669 VI_LOCK(vp);
6670 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
6671 VI_UNLOCK(vp);
6672 return (1);
6673 }
6674
6675 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0)
6676 return (0);
6677
6678 VI_LOCK(vp);
6679 kn->kn_data = size - kn->kn_fp->f_offset;
6680 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0;
6681 VI_UNLOCK(vp);
6682 return (res);
6683 }
6684
6685 /*ARGSUSED*/
6686 static int
filt_vfswrite(struct knote * kn,long hint)6687 filt_vfswrite(struct knote *kn, long hint)
6688 {
6689 struct vnode *vp = (struct vnode *)kn->kn_hook;
6690
6691 VI_LOCK(vp);
6692
6693 /*
6694 * filesystem is gone, so set the EOF flag and schedule
6695 * the knote for deletion.
6696 */
6697 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
6698 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
6699
6700 kn->kn_data = 0;
6701 VI_UNLOCK(vp);
6702 return (1);
6703 }
6704
6705 static int
filt_vfsvnode(struct knote * kn,long hint)6706 filt_vfsvnode(struct knote *kn, long hint)
6707 {
6708 struct vnode *vp = (struct vnode *)kn->kn_hook;
6709 int res;
6710
6711 VI_LOCK(vp);
6712 if (kn->kn_sfflags & hint)
6713 kn->kn_fflags |= hint;
6714 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6715 kn->kn_flags |= EV_EOF;
6716 VI_UNLOCK(vp);
6717 return (1);
6718 }
6719 res = (kn->kn_fflags != 0);
6720 VI_UNLOCK(vp);
6721 return (res);
6722 }
6723
6724 int
vfs_read_dirent(struct vop_readdir_args * ap,struct dirent * dp,off_t off)6725 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
6726 {
6727 int error;
6728
6729 if (dp->d_reclen > ap->a_uio->uio_resid)
6730 return (ENAMETOOLONG);
6731 error = uiomove(dp, dp->d_reclen, ap->a_uio);
6732 if (error) {
6733 if (ap->a_ncookies != NULL) {
6734 if (ap->a_cookies != NULL)
6735 free(ap->a_cookies, M_TEMP);
6736 ap->a_cookies = NULL;
6737 *ap->a_ncookies = 0;
6738 }
6739 return (error);
6740 }
6741 if (ap->a_ncookies == NULL)
6742 return (0);
6743
6744 KASSERT(ap->a_cookies,
6745 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
6746
6747 *ap->a_cookies = realloc(*ap->a_cookies,
6748 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO);
6749 (*ap->a_cookies)[*ap->a_ncookies] = off;
6750 *ap->a_ncookies += 1;
6751 return (0);
6752 }
6753
6754 /*
6755 * The purpose of this routine is to remove granularity from accmode_t,
6756 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
6757 * VADMIN and VAPPEND.
6758 *
6759 * If it returns 0, the caller is supposed to continue with the usual
6760 * access checks using 'accmode' as modified by this routine. If it
6761 * returns nonzero value, the caller is supposed to return that value
6762 * as errno.
6763 *
6764 * Note that after this routine runs, accmode may be zero.
6765 */
6766 int
vfs_unixify_accmode(accmode_t * accmode)6767 vfs_unixify_accmode(accmode_t *accmode)
6768 {
6769 /*
6770 * There is no way to specify explicit "deny" rule using
6771 * file mode or POSIX.1e ACLs.
6772 */
6773 if (*accmode & VEXPLICIT_DENY) {
6774 *accmode = 0;
6775 return (0);
6776 }
6777
6778 /*
6779 * None of these can be translated into usual access bits.
6780 * Also, the common case for NFSv4 ACLs is to not contain
6781 * either of these bits. Caller should check for VWRITE
6782 * on the containing directory instead.
6783 */
6784 if (*accmode & (VDELETE_CHILD | VDELETE))
6785 return (EPERM);
6786
6787 if (*accmode & VADMIN_PERMS) {
6788 *accmode &= ~VADMIN_PERMS;
6789 *accmode |= VADMIN;
6790 }
6791
6792 /*
6793 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
6794 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
6795 */
6796 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
6797
6798 return (0);
6799 }
6800
6801 /*
6802 * Clear out a doomed vnode (if any) and replace it with a new one as long
6803 * as the fs is not being unmounted. Return the root vnode to the caller.
6804 */
6805 static int __noinline
vfs_cache_root_fallback(struct mount * mp,int flags,struct vnode ** vpp)6806 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp)
6807 {
6808 struct vnode *vp;
6809 int error;
6810
6811 restart:
6812 if (mp->mnt_rootvnode != NULL) {
6813 MNT_ILOCK(mp);
6814 vp = mp->mnt_rootvnode;
6815 if (vp != NULL) {
6816 if (!VN_IS_DOOMED(vp)) {
6817 vrefact(vp);
6818 MNT_IUNLOCK(mp);
6819 error = vn_lock(vp, flags);
6820 if (error == 0) {
6821 *vpp = vp;
6822 return (0);
6823 }
6824 vrele(vp);
6825 goto restart;
6826 }
6827 /*
6828 * Clear the old one.
6829 */
6830 mp->mnt_rootvnode = NULL;
6831 }
6832 MNT_IUNLOCK(mp);
6833 if (vp != NULL) {
6834 vfs_op_barrier_wait(mp);
6835 vrele(vp);
6836 }
6837 }
6838 error = VFS_CACHEDROOT(mp, flags, vpp);
6839 if (error != 0)
6840 return (error);
6841 if (mp->mnt_vfs_ops == 0) {
6842 MNT_ILOCK(mp);
6843 if (mp->mnt_vfs_ops != 0) {
6844 MNT_IUNLOCK(mp);
6845 return (0);
6846 }
6847 if (mp->mnt_rootvnode == NULL) {
6848 vrefact(*vpp);
6849 mp->mnt_rootvnode = *vpp;
6850 } else {
6851 if (mp->mnt_rootvnode != *vpp) {
6852 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) {
6853 panic("%s: mismatch between vnode returned "
6854 " by VFS_CACHEDROOT and the one cached "
6855 " (%p != %p)",
6856 __func__, *vpp, mp->mnt_rootvnode);
6857 }
6858 }
6859 }
6860 MNT_IUNLOCK(mp);
6861 }
6862 return (0);
6863 }
6864
6865 int
vfs_cache_root(struct mount * mp,int flags,struct vnode ** vpp)6866 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp)
6867 {
6868 struct mount_pcpu *mpcpu;
6869 struct vnode *vp;
6870 int error;
6871
6872 if (!vfs_op_thread_enter(mp, mpcpu))
6873 return (vfs_cache_root_fallback(mp, flags, vpp));
6874 vp = atomic_load_ptr(&mp->mnt_rootvnode);
6875 if (vp == NULL || VN_IS_DOOMED(vp)) {
6876 vfs_op_thread_exit(mp, mpcpu);
6877 return (vfs_cache_root_fallback(mp, flags, vpp));
6878 }
6879 vrefact(vp);
6880 vfs_op_thread_exit(mp, mpcpu);
6881 error = vn_lock(vp, flags);
6882 if (error != 0) {
6883 vrele(vp);
6884 return (vfs_cache_root_fallback(mp, flags, vpp));
6885 }
6886 *vpp = vp;
6887 return (0);
6888 }
6889
6890 struct vnode *
vfs_cache_root_clear(struct mount * mp)6891 vfs_cache_root_clear(struct mount *mp)
6892 {
6893 struct vnode *vp;
6894
6895 /*
6896 * ops > 0 guarantees there is nobody who can see this vnode
6897 */
6898 MPASS(mp->mnt_vfs_ops > 0);
6899 vp = mp->mnt_rootvnode;
6900 if (vp != NULL)
6901 vn_seqc_write_begin(vp);
6902 mp->mnt_rootvnode = NULL;
6903 return (vp);
6904 }
6905
6906 void
vfs_cache_root_set(struct mount * mp,struct vnode * vp)6907 vfs_cache_root_set(struct mount *mp, struct vnode *vp)
6908 {
6909
6910 MPASS(mp->mnt_vfs_ops > 0);
6911 vrefact(vp);
6912 mp->mnt_rootvnode = vp;
6913 }
6914
6915 /*
6916 * These are helper functions for filesystems to traverse all
6917 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
6918 *
6919 * This interface replaces MNT_VNODE_FOREACH.
6920 */
6921
6922 struct vnode *
__mnt_vnode_next_all(struct vnode ** mvp,struct mount * mp)6923 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
6924 {
6925 struct vnode *vp;
6926
6927 maybe_yield();
6928 MNT_ILOCK(mp);
6929 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6930 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
6931 vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
6932 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
6933 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6934 continue;
6935 VI_LOCK(vp);
6936 if (VN_IS_DOOMED(vp)) {
6937 VI_UNLOCK(vp);
6938 continue;
6939 }
6940 break;
6941 }
6942 if (vp == NULL) {
6943 __mnt_vnode_markerfree_all(mvp, mp);
6944 /* MNT_IUNLOCK(mp); -- done in above function */
6945 mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
6946 return (NULL);
6947 }
6948 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
6949 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6950 MNT_IUNLOCK(mp);
6951 return (vp);
6952 }
6953
6954 struct vnode *
__mnt_vnode_first_all(struct vnode ** mvp,struct mount * mp)6955 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
6956 {
6957 struct vnode *vp;
6958
6959 *mvp = vn_alloc_marker(mp);
6960 MNT_ILOCK(mp);
6961 MNT_REF(mp);
6962
6963 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
6964 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
6965 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6966 continue;
6967 VI_LOCK(vp);
6968 if (VN_IS_DOOMED(vp)) {
6969 VI_UNLOCK(vp);
6970 continue;
6971 }
6972 break;
6973 }
6974 if (vp == NULL) {
6975 MNT_REL(mp);
6976 MNT_IUNLOCK(mp);
6977 vn_free_marker(*mvp);
6978 *mvp = NULL;
6979 return (NULL);
6980 }
6981 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6982 MNT_IUNLOCK(mp);
6983 return (vp);
6984 }
6985
6986 void
__mnt_vnode_markerfree_all(struct vnode ** mvp,struct mount * mp)6987 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
6988 {
6989
6990 if (*mvp == NULL) {
6991 MNT_IUNLOCK(mp);
6992 return;
6993 }
6994
6995 mtx_assert(MNT_MTX(mp), MA_OWNED);
6996
6997 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6998 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
6999 MNT_REL(mp);
7000 MNT_IUNLOCK(mp);
7001 vn_free_marker(*mvp);
7002 *mvp = NULL;
7003 }
7004
7005 /*
7006 * These are helper functions for filesystems to traverse their
7007 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h
7008 */
7009 static void
mnt_vnode_markerfree_lazy(struct vnode ** mvp,struct mount * mp)7010 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
7011 {
7012
7013 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
7014
7015 MNT_ILOCK(mp);
7016 MNT_REL(mp);
7017 MNT_IUNLOCK(mp);
7018 vn_free_marker(*mvp);
7019 *mvp = NULL;
7020 }
7021
7022 /*
7023 * Relock the mp mount vnode list lock with the vp vnode interlock in the
7024 * conventional lock order during mnt_vnode_next_lazy iteration.
7025 *
7026 * On entry, the mount vnode list lock is held and the vnode interlock is not.
7027 * The list lock is dropped and reacquired. On success, both locks are held.
7028 * On failure, the mount vnode list lock is held but the vnode interlock is
7029 * not, and the procedure may have yielded.
7030 */
7031 static bool
mnt_vnode_next_lazy_relock(struct vnode * mvp,struct mount * mp,struct vnode * vp)7032 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp,
7033 struct vnode *vp)
7034 {
7035
7036 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER &&
7037 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp,
7038 ("%s: bad marker", __func__));
7039 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp,
7040 ("%s: inappropriate vnode", __func__));
7041 ASSERT_VI_UNLOCKED(vp, __func__);
7042 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
7043
7044 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist);
7045 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist);
7046
7047 /*
7048 * Note we may be racing against vdrop which transitioned the hold
7049 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine,
7050 * if we are the only user after we get the interlock we will just
7051 * vdrop.
7052 */
7053 vhold(vp);
7054 mtx_unlock(&mp->mnt_listmtx);
7055 VI_LOCK(vp);
7056 if (VN_IS_DOOMED(vp)) {
7057 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
7058 goto out_lost;
7059 }
7060 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
7061 /*
7062 * There is nothing to do if we are the last user.
7063 */
7064 if (!refcount_release_if_not_last(&vp->v_holdcnt))
7065 goto out_lost;
7066 mtx_lock(&mp->mnt_listmtx);
7067 return (true);
7068 out_lost:
7069 vdropl(vp);
7070 maybe_yield();
7071 mtx_lock(&mp->mnt_listmtx);
7072 return (false);
7073 }
7074
7075 static struct vnode *
mnt_vnode_next_lazy(struct vnode ** mvp,struct mount * mp,mnt_lazy_cb_t * cb,void * cbarg)7076 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7077 void *cbarg)
7078 {
7079 struct vnode *vp;
7080
7081 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
7082 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
7083 restart:
7084 vp = TAILQ_NEXT(*mvp, v_lazylist);
7085 while (vp != NULL) {
7086 if (vp->v_type == VMARKER) {
7087 vp = TAILQ_NEXT(vp, v_lazylist);
7088 continue;
7089 }
7090 /*
7091 * See if we want to process the vnode. Note we may encounter a
7092 * long string of vnodes we don't care about and hog the list
7093 * as a result. Check for it and requeue the marker.
7094 */
7095 VNPASS(!VN_IS_DOOMED(vp), vp);
7096 if (!cb(vp, cbarg)) {
7097 if (!should_yield()) {
7098 vp = TAILQ_NEXT(vp, v_lazylist);
7099 continue;
7100 }
7101 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp,
7102 v_lazylist);
7103 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp,
7104 v_lazylist);
7105 mtx_unlock(&mp->mnt_listmtx);
7106 kern_yield(PRI_USER);
7107 mtx_lock(&mp->mnt_listmtx);
7108 goto restart;
7109 }
7110 /*
7111 * Try-lock because this is the wrong lock order.
7112 */
7113 if (!VI_TRYLOCK(vp) &&
7114 !mnt_vnode_next_lazy_relock(*mvp, mp, vp))
7115 goto restart;
7116 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
7117 KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
7118 ("alien vnode on the lazy list %p %p", vp, mp));
7119 VNPASS(vp->v_mount == mp, vp);
7120 VNPASS(!VN_IS_DOOMED(vp), vp);
7121 break;
7122 }
7123 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);
7124
7125 /* Check if we are done */
7126 if (vp == NULL) {
7127 mtx_unlock(&mp->mnt_listmtx);
7128 mnt_vnode_markerfree_lazy(mvp, mp);
7129 return (NULL);
7130 }
7131 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist);
7132 mtx_unlock(&mp->mnt_listmtx);
7133 ASSERT_VI_LOCKED(vp, "lazy iter");
7134 return (vp);
7135 }
7136
7137 struct vnode *
__mnt_vnode_next_lazy(struct vnode ** mvp,struct mount * mp,mnt_lazy_cb_t * cb,void * cbarg)7138 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7139 void *cbarg)
7140 {
7141
7142 maybe_yield();
7143 mtx_lock(&mp->mnt_listmtx);
7144 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg));
7145 }
7146
7147 struct vnode *
__mnt_vnode_first_lazy(struct vnode ** mvp,struct mount * mp,mnt_lazy_cb_t * cb,void * cbarg)7148 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7149 void *cbarg)
7150 {
7151 struct vnode *vp;
7152
7153 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist))
7154 return (NULL);
7155
7156 *mvp = vn_alloc_marker(mp);
7157 MNT_ILOCK(mp);
7158 MNT_REF(mp);
7159 MNT_IUNLOCK(mp);
7160
7161 mtx_lock(&mp->mnt_listmtx);
7162 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist);
7163 if (vp == NULL) {
7164 mtx_unlock(&mp->mnt_listmtx);
7165 mnt_vnode_markerfree_lazy(mvp, mp);
7166 return (NULL);
7167 }
7168 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist);
7169 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg));
7170 }
7171
7172 void
__mnt_vnode_markerfree_lazy(struct vnode ** mvp,struct mount * mp)7173 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
7174 {
7175
7176 if (*mvp == NULL)
7177 return;
7178
7179 mtx_lock(&mp->mnt_listmtx);
7180 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);
7181 mtx_unlock(&mp->mnt_listmtx);
7182 mnt_vnode_markerfree_lazy(mvp, mp);
7183 }
7184
7185 int
vn_dir_check_exec(struct vnode * vp,struct componentname * cnp)7186 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp)
7187 {
7188
7189 if ((cnp->cn_flags & NOEXECCHECK) != 0) {
7190 cnp->cn_flags &= ~NOEXECCHECK;
7191 return (0);
7192 }
7193
7194 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread));
7195 }
7196
7197 /*
7198 * Do not use this variant unless you have means other than the hold count
7199 * to prevent the vnode from getting freed.
7200 */
7201 void
vn_seqc_write_begin_locked(struct vnode * vp)7202 vn_seqc_write_begin_locked(struct vnode *vp)
7203 {
7204
7205 ASSERT_VI_LOCKED(vp, __func__);
7206 VNPASS(vp->v_holdcnt > 0, vp);
7207 VNPASS(vp->v_seqc_users >= 0, vp);
7208 vp->v_seqc_users++;
7209 if (vp->v_seqc_users == 1)
7210 seqc_sleepable_write_begin(&vp->v_seqc);
7211 }
7212
7213 void
vn_seqc_write_begin(struct vnode * vp)7214 vn_seqc_write_begin(struct vnode *vp)
7215 {
7216
7217 VI_LOCK(vp);
7218 vn_seqc_write_begin_locked(vp);
7219 VI_UNLOCK(vp);
7220 }
7221
7222 void
vn_seqc_write_end_locked(struct vnode * vp)7223 vn_seqc_write_end_locked(struct vnode *vp)
7224 {
7225
7226 ASSERT_VI_LOCKED(vp, __func__);
7227 VNPASS(vp->v_seqc_users > 0, vp);
7228 vp->v_seqc_users--;
7229 if (vp->v_seqc_users == 0)
7230 seqc_sleepable_write_end(&vp->v_seqc);
7231 }
7232
7233 void
vn_seqc_write_end(struct vnode * vp)7234 vn_seqc_write_end(struct vnode *vp)
7235 {
7236
7237 VI_LOCK(vp);
7238 vn_seqc_write_end_locked(vp);
7239 VI_UNLOCK(vp);
7240 }
7241
7242 /*
7243 * Special case handling for allocating and freeing vnodes.
7244 *
7245 * The counter remains unchanged on free so that a doomed vnode will
7246 * keep testing as in modify as long as it is accessible with SMR.
7247 */
7248 static void
vn_seqc_init(struct vnode * vp)7249 vn_seqc_init(struct vnode *vp)
7250 {
7251
7252 vp->v_seqc = 0;
7253 vp->v_seqc_users = 0;
7254 }
7255
7256 static void
vn_seqc_write_end_free(struct vnode * vp)7257 vn_seqc_write_end_free(struct vnode *vp)
7258 {
7259
7260 VNPASS(seqc_in_modify(vp->v_seqc), vp);
7261 VNPASS(vp->v_seqc_users == 1, vp);
7262 }
7263
7264 void
vn_irflag_set_locked(struct vnode * vp,short toset)7265 vn_irflag_set_locked(struct vnode *vp, short toset)
7266 {
7267 short flags;
7268
7269 ASSERT_VI_LOCKED(vp, __func__);
7270 flags = vn_irflag_read(vp);
7271 VNASSERT((flags & toset) == 0, vp,
7272 ("%s: some of the passed flags already set (have %d, passed %d)\n",
7273 __func__, flags, toset));
7274 atomic_store_short(&vp->v_irflag, flags | toset);
7275 }
7276
7277 void
vn_irflag_set(struct vnode * vp,short toset)7278 vn_irflag_set(struct vnode *vp, short toset)
7279 {
7280
7281 VI_LOCK(vp);
7282 vn_irflag_set_locked(vp, toset);
7283 VI_UNLOCK(vp);
7284 }
7285
7286 void
vn_irflag_set_cond_locked(struct vnode * vp,short toset)7287 vn_irflag_set_cond_locked(struct vnode *vp, short toset)
7288 {
7289 short flags;
7290
7291 ASSERT_VI_LOCKED(vp, __func__);
7292 flags = vn_irflag_read(vp);
7293 atomic_store_short(&vp->v_irflag, flags | toset);
7294 }
7295
7296 void
vn_irflag_set_cond(struct vnode * vp,short toset)7297 vn_irflag_set_cond(struct vnode *vp, short toset)
7298 {
7299
7300 VI_LOCK(vp);
7301 vn_irflag_set_cond_locked(vp, toset);
7302 VI_UNLOCK(vp);
7303 }
7304
7305 void
vn_irflag_unset_locked(struct vnode * vp,short tounset)7306 vn_irflag_unset_locked(struct vnode *vp, short tounset)
7307 {
7308 short flags;
7309
7310 ASSERT_VI_LOCKED(vp, __func__);
7311 flags = vn_irflag_read(vp);
7312 VNASSERT((flags & tounset) == tounset, vp,
7313 ("%s: some of the passed flags not set (have %d, passed %d)\n",
7314 __func__, flags, tounset));
7315 atomic_store_short(&vp->v_irflag, flags & ~tounset);
7316 }
7317
7318 void
vn_irflag_unset(struct vnode * vp,short tounset)7319 vn_irflag_unset(struct vnode *vp, short tounset)
7320 {
7321
7322 VI_LOCK(vp);
7323 vn_irflag_unset_locked(vp, tounset);
7324 VI_UNLOCK(vp);
7325 }
7326
7327 int
vn_getsize_locked(struct vnode * vp,off_t * size,struct ucred * cred)7328 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred)
7329 {
7330 struct vattr vattr;
7331 int error;
7332
7333 ASSERT_VOP_LOCKED(vp, __func__);
7334 error = VOP_GETATTR(vp, &vattr, cred);
7335 if (__predict_true(error == 0)) {
7336 if (vattr.va_size <= OFF_MAX)
7337 *size = vattr.va_size;
7338 else
7339 error = EFBIG;
7340 }
7341 return (error);
7342 }
7343
7344 int
vn_getsize(struct vnode * vp,off_t * size,struct ucred * cred)7345 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred)
7346 {
7347 int error;
7348
7349 VOP_LOCK(vp, LK_SHARED);
7350 error = vn_getsize_locked(vp, size, cred);
7351 VOP_UNLOCK(vp);
7352 return (error);
7353 }
7354
7355 #ifdef INVARIANTS
7356 void
vn_set_state_validate(struct vnode * vp,__enum_uint8 (vstate)state)7357 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state)
7358 {
7359
7360 switch (vp->v_state) {
7361 case VSTATE_UNINITIALIZED:
7362 switch (state) {
7363 case VSTATE_CONSTRUCTED:
7364 case VSTATE_DESTROYING:
7365 return;
7366 default:
7367 break;
7368 }
7369 break;
7370 case VSTATE_CONSTRUCTED:
7371 ASSERT_VOP_ELOCKED(vp, __func__);
7372 switch (state) {
7373 case VSTATE_DESTROYING:
7374 return;
7375 default:
7376 break;
7377 }
7378 break;
7379 case VSTATE_DESTROYING:
7380 ASSERT_VOP_ELOCKED(vp, __func__);
7381 switch (state) {
7382 case VSTATE_DEAD:
7383 return;
7384 default:
7385 break;
7386 }
7387 break;
7388 case VSTATE_DEAD:
7389 switch (state) {
7390 case VSTATE_UNINITIALIZED:
7391 return;
7392 default:
7393 break;
7394 }
7395 break;
7396 }
7397
7398 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state);
7399 panic("invalid state transition %d -> %d\n", vp->v_state, state);
7400 }
7401 #endif
7402