1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Poul-Henning Kamp of the FreeBSD Project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_ddb.h" 41 #include "opt_ktrace.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/capsicum.h> 46 #include <sys/counter.h> 47 #include <sys/filedesc.h> 48 #include <sys/fnv_hash.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/malloc.h> 53 #include <sys/fcntl.h> 54 #include <sys/jail.h> 55 #include <sys/mount.h> 56 #include <sys/namei.h> 57 #include <sys/proc.h> 58 #include <sys/seqc.h> 59 #include <sys/sdt.h> 60 #include <sys/smr.h> 61 #include <sys/smp.h> 62 #include <sys/syscallsubr.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysproto.h> 65 #include <sys/vnode.h> 66 #include <ck_queue.h> 67 #ifdef KTRACE 68 #include <sys/ktrace.h> 69 #endif 70 #ifdef INVARIANTS 71 #include <machine/_inttypes.h> 72 #endif 73 74 #include <sys/capsicum.h> 75 76 #include <security/audit/audit.h> 77 #include <security/mac/mac_framework.h> 78 79 #ifdef DDB 80 #include <ddb/ddb.h> 81 #endif 82 83 #include <vm/uma.h> 84 85 /* 86 * High level overview of name caching in the VFS layer. 87 * 88 * Originally caching was implemented as part of UFS, later extracted to allow 89 * use by other filesystems. A decision was made to make it optional and 90 * completely detached from the rest of the kernel, which comes with limitations 91 * outlined near the end of this comment block. 92 * 93 * This fundamental choice needs to be revisited. In the meantime, the current 94 * state is described below. Significance of all notable routines is explained 95 * in comments placed above their implementation. Scattered thoroughout the 96 * file are TODO comments indicating shortcomings which can be fixed without 97 * reworking everything (most of the fixes will likely be reusable). Various 98 * details are omitted from this explanation to not clutter the overview, they 99 * have to be checked by reading the code and associated commentary. 100 * 101 * Keep in mind that it's individual path components which are cached, not full 102 * paths. That is, for a fully cached path "foo/bar/baz" there are 3 entries, 103 * one for each name. 104 * 105 * I. Data organization 106 * 107 * Entries are described by "struct namecache" objects and stored in a hash 108 * table. See cache_get_hash for more information. 109 * 110 * "struct vnode" contains pointers to source entries (names which can be found 111 * when traversing through said vnode), destination entries (names of that 112 * vnode (see "Limitations" for a breakdown on the subject) and a pointer to 113 * the parent vnode. 114 * 115 * The (directory vnode; name) tuple reliably determines the target entry if 116 * it exists. 117 * 118 * Since there are no small locks at this time (all are 32 bytes in size on 119 * LP64), the code works around the problem by introducing lock arrays to 120 * protect hash buckets and vnode lists. 121 * 122 * II. Filesystem integration 123 * 124 * Filesystems participating in name caching do the following: 125 * - set vop_lookup routine to vfs_cache_lookup 126 * - set vop_cachedlookup to whatever can perform the lookup if the above fails 127 * - if they support lockless lookup (see below), vop_fplookup_vexec and 128 * vop_fplookup_symlink are set along with the MNTK_FPLOOKUP flag on the 129 * mount point 130 * - call cache_purge or cache_vop_* routines to eliminate stale entries as 131 * applicable 132 * - call cache_enter to add entries depending on the MAKEENTRY flag 133 * 134 * With the above in mind, there are 2 entry points when doing lookups: 135 * - ... -> namei -> cache_fplookup -- this is the default 136 * - ... -> VOP_LOOKUP -> vfs_cache_lookup -- normally only called by namei 137 * should the above fail 138 * 139 * Example code flow how an entry is added: 140 * ... -> namei -> cache_fplookup -> cache_fplookup_noentry -> VOP_LOOKUP -> 141 * vfs_cache_lookup -> VOP_CACHEDLOOKUP -> ufs_lookup_ino -> cache_enter 142 * 143 * III. Performance considerations 144 * 145 * For lockless case forward lookup avoids any writes to shared areas apart 146 * from the terminal path component. In other words non-modifying lookups of 147 * different files don't suffer any scalability problems in the namecache. 148 * Looking up the same file is limited by VFS and goes beyond the scope of this 149 * file. 150 * 151 * At least on amd64 the single-threaded bottleneck for long paths is hashing 152 * (see cache_get_hash). There are cases where the code issues acquire fence 153 * multiple times, they can be combined on architectures which suffer from it. 154 * 155 * For locked case each encountered vnode has to be referenced and locked in 156 * order to be handed out to the caller (normally that's namei). This 157 * introduces significant hit single-threaded and serialization multi-threaded. 158 * 159 * Reverse lookup (e.g., "getcwd") fully scales provided it is fully cached -- 160 * avoids any writes to shared areas to any components. 161 * 162 * Unrelated insertions are partially serialized on updating the global entry 163 * counter and possibly serialized on colliding bucket or vnode locks. 164 * 165 * IV. Observability 166 * 167 * Note not everything has an explicit dtrace probe nor it should have, thus 168 * some of the one-liners below depend on implementation details. 169 * 170 * Examples: 171 * 172 * # Check what lookups failed to be handled in a lockless manner. Column 1 is 173 * # line number, column 2 is status code (see cache_fpl_status) 174 * dtrace -n 'vfs:fplookup:lookup:done { @[arg1, arg2] = count(); }' 175 * 176 * # Lengths of names added by binary name 177 * dtrace -n 'fbt::cache_enter_time:entry { @[execname] = quantize(args[2]->cn_namelen); }' 178 * 179 * # Same as above but only those which exceed 64 characters 180 * dtrace -n 'fbt::cache_enter_time:entry /args[2]->cn_namelen > 64/ { @[execname] = quantize(args[2]->cn_namelen); }' 181 * 182 * # Who is performing lookups with spurious slashes (e.g., "foo//bar") and what 183 * # path is it 184 * dtrace -n 'fbt::cache_fplookup_skip_slashes:entry { @[execname, stringof(args[0]->cnp->cn_pnbuf)] = count(); }' 185 * 186 * V. Limitations and implementation defects 187 * 188 * - since it is possible there is no entry for an open file, tools like 189 * "procstat" may fail to resolve fd -> vnode -> path to anything 190 * - even if a filesystem adds an entry, it may get purged (e.g., due to memory 191 * shortage) in which case the above problem applies 192 * - hardlinks are not tracked, thus if a vnode is reachable in more than one 193 * way, resolving a name may return a different path than the one used to 194 * open it (even if said path is still valid) 195 * - by default entries are not added for newly created files 196 * - adding an entry may need to evict negative entry first, which happens in 2 197 * distinct places (evicting on lookup, adding in a later VOP) making it 198 * impossible to simply reuse it 199 * - there is a simple scheme to evict negative entries as the cache is approaching 200 * its capacity, but it is very unclear if doing so is a good idea to begin with 201 * - vnodes are subject to being recycled even if target inode is left in memory, 202 * which loses the name cache entries when it perhaps should not. in case of tmpfs 203 * names get duplicated -- kept by filesystem itself and namecache separately 204 * - struct namecache has a fixed size and comes in 2 variants, often wasting space. 205 * now hard to replace with malloc due to dependence on SMR. 206 * - lack of better integration with the kernel also turns nullfs into a layered 207 * filesystem instead of something which can take advantage of caching 208 */ 209 210 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 211 "Name cache"); 212 213 SDT_PROVIDER_DECLARE(vfs); 214 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 215 "struct vnode *"); 216 SDT_PROBE_DEFINE3(vfs, namecache, enter, duplicate, "struct vnode *", "char *", 217 "struct vnode *"); 218 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 219 "char *"); 220 SDT_PROBE_DEFINE2(vfs, namecache, fullpath_smr, hit, "struct vnode *", 221 "const char *"); 222 SDT_PROBE_DEFINE4(vfs, namecache, fullpath_smr, miss, "struct vnode *", 223 "struct namecache *", "int", "int"); 224 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 225 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 226 "char *", "struct vnode *"); 227 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 228 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 229 "struct vnode *", "char *"); 230 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 231 "struct vnode *"); 232 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 233 "struct vnode *", "char *"); 234 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 235 "char *"); 236 SDT_PROBE_DEFINE2(vfs, namecache, removecnp, hit, "struct vnode *", 237 "struct componentname *"); 238 SDT_PROBE_DEFINE2(vfs, namecache, removecnp, miss, "struct vnode *", 239 "struct componentname *"); 240 SDT_PROBE_DEFINE3(vfs, namecache, purge, done, "struct vnode *", "size_t", "size_t"); 241 SDT_PROBE_DEFINE1(vfs, namecache, purge, batch, "int"); 242 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 243 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 244 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 245 "struct vnode *"); 246 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 247 "char *"); 248 SDT_PROBE_DEFINE2(vfs, namecache, evict_negative, done, "struct vnode *", 249 "char *"); 250 SDT_PROBE_DEFINE1(vfs, namecache, symlink, alloc__fail, "size_t"); 251 252 SDT_PROBE_DEFINE3(vfs, fplookup, lookup, done, "struct nameidata", "int", "bool"); 253 SDT_PROBE_DECLARE(vfs, namei, lookup, entry); 254 SDT_PROBE_DECLARE(vfs, namei, lookup, return); 255 256 static char __read_frequently cache_fast_lookup_enabled = true; 257 258 /* 259 * This structure describes the elements in the cache of recent 260 * names looked up by namei. 261 */ 262 struct negstate { 263 u_char neg_flag; 264 u_char neg_hit; 265 }; 266 _Static_assert(sizeof(struct negstate) <= sizeof(struct vnode *), 267 "the state must fit in a union with a pointer without growing it"); 268 269 struct namecache { 270 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 271 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 272 CK_SLIST_ENTRY(namecache) nc_hash;/* hash chain */ 273 struct vnode *nc_dvp; /* vnode of parent of name */ 274 union { 275 struct vnode *nu_vp; /* vnode the name refers to */ 276 struct negstate nu_neg;/* negative entry state */ 277 } n_un; 278 u_char nc_flag; /* flag bits */ 279 u_char nc_nlen; /* length of name */ 280 char nc_name[0]; /* segment name + nul */ 281 }; 282 283 /* 284 * struct namecache_ts repeats struct namecache layout up to the 285 * nc_nlen member. 286 * struct namecache_ts is used in place of struct namecache when time(s) need 287 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 288 * both a non-dotdot directory name plus dotdot for the directory's 289 * parent. 290 * 291 * See below for alignment requirement. 292 */ 293 struct namecache_ts { 294 struct timespec nc_time; /* timespec provided by fs */ 295 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 296 int nc_ticks; /* ticks value when entry was added */ 297 int nc_pad; 298 struct namecache nc_nc; 299 }; 300 301 TAILQ_HEAD(cache_freebatch, namecache); 302 303 /* 304 * At least mips n32 performs 64-bit accesses to timespec as found 305 * in namecache_ts and requires them to be aligned. Since others 306 * may be in the same spot suffer a little bit and enforce the 307 * alignment for everyone. Note this is a nop for 64-bit platforms. 308 */ 309 #define CACHE_ZONE_ALIGNMENT UMA_ALIGNOF(time_t) 310 311 /* 312 * TODO: the initial value of CACHE_PATH_CUTOFF was inherited from the 313 * 4.4 BSD codebase. Later on struct namecache was tweaked to become 314 * smaller and the value was bumped to retain the total size, but it 315 * was never re-evaluated for suitability. A simple test counting 316 * lengths during package building shows that the value of 45 covers 317 * about 86% of all added entries, reaching 99% at 65. 318 * 319 * Regardless of the above, use of dedicated zones instead of malloc may be 320 * inducing additional waste. This may be hard to address as said zones are 321 * tied to VFS SMR. Even if retaining them, the current split should be 322 * re-evaluated. 323 */ 324 #ifdef __LP64__ 325 #define CACHE_PATH_CUTOFF 45 326 #define CACHE_LARGE_PAD 6 327 #else 328 #define CACHE_PATH_CUTOFF 41 329 #define CACHE_LARGE_PAD 2 330 #endif 331 332 #define CACHE_ZONE_SMALL_SIZE (offsetof(struct namecache, nc_name) + CACHE_PATH_CUTOFF + 1) 333 #define CACHE_ZONE_SMALL_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_SMALL_SIZE) 334 #define CACHE_ZONE_LARGE_SIZE (offsetof(struct namecache, nc_name) + NAME_MAX + 1 + CACHE_LARGE_PAD) 335 #define CACHE_ZONE_LARGE_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_LARGE_SIZE) 336 337 _Static_assert((CACHE_ZONE_SMALL_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 338 _Static_assert((CACHE_ZONE_SMALL_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 339 _Static_assert((CACHE_ZONE_LARGE_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 340 _Static_assert((CACHE_ZONE_LARGE_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 341 342 #define nc_vp n_un.nu_vp 343 #define nc_neg n_un.nu_neg 344 345 /* 346 * Flags in namecache.nc_flag 347 */ 348 #define NCF_WHITE 0x01 349 #define NCF_ISDOTDOT 0x02 350 #define NCF_TS 0x04 351 #define NCF_DTS 0x08 352 #define NCF_DVDROP 0x10 353 #define NCF_NEGATIVE 0x20 354 #define NCF_INVALID 0x40 355 #define NCF_WIP 0x80 356 357 /* 358 * Flags in negstate.neg_flag 359 */ 360 #define NEG_HOT 0x01 361 362 static bool cache_neg_evict_cond(u_long lnumcache); 363 364 /* 365 * Mark an entry as invalid. 366 * 367 * This is called before it starts getting deconstructed. 368 */ 369 static void 370 cache_ncp_invalidate(struct namecache *ncp) 371 { 372 373 KASSERT((ncp->nc_flag & NCF_INVALID) == 0, 374 ("%s: entry %p already invalid", __func__, ncp)); 375 atomic_store_char(&ncp->nc_flag, ncp->nc_flag | NCF_INVALID); 376 atomic_thread_fence_rel(); 377 } 378 379 /* 380 * Check whether the entry can be safely used. 381 * 382 * All places which elide locks are supposed to call this after they are 383 * done with reading from an entry. 384 */ 385 #define cache_ncp_canuse(ncp) ({ \ 386 struct namecache *_ncp = (ncp); \ 387 u_char _nc_flag; \ 388 \ 389 atomic_thread_fence_acq(); \ 390 _nc_flag = atomic_load_char(&_ncp->nc_flag); \ 391 __predict_true((_nc_flag & (NCF_INVALID | NCF_WIP)) == 0); \ 392 }) 393 394 /* 395 * Like the above but also checks NCF_WHITE. 396 */ 397 #define cache_fpl_neg_ncp_canuse(ncp) ({ \ 398 struct namecache *_ncp = (ncp); \ 399 u_char _nc_flag; \ 400 \ 401 atomic_thread_fence_acq(); \ 402 _nc_flag = atomic_load_char(&_ncp->nc_flag); \ 403 __predict_true((_nc_flag & (NCF_INVALID | NCF_WIP | NCF_WHITE)) == 0); \ 404 }) 405 406 VFS_SMR_DECLARE; 407 408 static SYSCTL_NODE(_vfs_cache, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 409 "Name cache parameters"); 410 411 static u_int __read_mostly ncsize; /* the size as computed on creation or resizing */ 412 SYSCTL_UINT(_vfs_cache_param, OID_AUTO, size, CTLFLAG_RW, &ncsize, 0, 413 "Total namecache capacity"); 414 415 u_int ncsizefactor = 2; 416 SYSCTL_UINT(_vfs_cache_param, OID_AUTO, sizefactor, CTLFLAG_RW, &ncsizefactor, 0, 417 "Size factor for namecache"); 418 419 static u_long __read_mostly ncnegfactor = 5; /* ratio of negative entries */ 420 SYSCTL_ULONG(_vfs_cache_param, OID_AUTO, negfactor, CTLFLAG_RW, &ncnegfactor, 0, 421 "Ratio of negative namecache entries"); 422 423 /* 424 * Negative entry % of namecache capacity above which automatic eviction is allowed. 425 * 426 * Check cache_neg_evict_cond for details. 427 */ 428 static u_int ncnegminpct = 3; 429 430 static u_int __read_mostly neg_min; /* the above recomputed against ncsize */ 431 SYSCTL_UINT(_vfs_cache_param, OID_AUTO, negmin, CTLFLAG_RD, &neg_min, 0, 432 "Negative entry count above which automatic eviction is allowed"); 433 434 /* 435 * Structures associated with name caching. 436 */ 437 #define NCHHASH(hash) \ 438 (&nchashtbl[(hash) & nchash]) 439 static __read_mostly CK_SLIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */ 440 static u_long __read_mostly nchash; /* size of hash table */ 441 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 442 "Size of namecache hash table"); 443 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */ 444 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */ 445 446 struct nchstats nchstats; /* cache effectiveness statistics */ 447 448 static bool __read_mostly cache_rename_add = true; 449 SYSCTL_BOOL(_vfs, OID_AUTO, cache_rename_add, CTLFLAG_RW, 450 &cache_rename_add, 0, ""); 451 452 static u_int __exclusive_cache_line neg_cycle; 453 454 #define ncneghash 3 455 #define numneglists (ncneghash + 1) 456 457 struct neglist { 458 struct mtx nl_evict_lock; 459 struct mtx nl_lock __aligned(CACHE_LINE_SIZE); 460 TAILQ_HEAD(, namecache) nl_list; 461 TAILQ_HEAD(, namecache) nl_hotlist; 462 u_long nl_hotnum; 463 } __aligned(CACHE_LINE_SIZE); 464 465 static struct neglist neglists[numneglists]; 466 467 static inline struct neglist * 468 NCP2NEGLIST(struct namecache *ncp) 469 { 470 471 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]); 472 } 473 474 static inline struct negstate * 475 NCP2NEGSTATE(struct namecache *ncp) 476 { 477 478 MPASS(atomic_load_char(&ncp->nc_flag) & NCF_NEGATIVE); 479 return (&ncp->nc_neg); 480 } 481 482 #define numbucketlocks (ncbuckethash + 1) 483 static u_int __read_mostly ncbuckethash; 484 static struct mtx_padalign __read_mostly *bucketlocks; 485 #define HASH2BUCKETLOCK(hash) \ 486 ((struct mtx *)(&bucketlocks[((hash) & ncbuckethash)])) 487 488 #define numvnodelocks (ncvnodehash + 1) 489 static u_int __read_mostly ncvnodehash; 490 static struct mtx __read_mostly *vnodelocks; 491 static inline struct mtx * 492 VP2VNODELOCK(struct vnode *vp) 493 { 494 495 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]); 496 } 497 498 static void 499 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 500 { 501 struct namecache_ts *ncp_ts; 502 503 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 504 (tsp == NULL && ticksp == NULL), 505 ("No NCF_TS")); 506 507 if (tsp == NULL) 508 return; 509 510 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 511 *tsp = ncp_ts->nc_time; 512 *ticksp = ncp_ts->nc_ticks; 513 } 514 515 #ifdef DEBUG_CACHE 516 static int __read_mostly doingcache = 1; /* 1 => enable the cache */ 517 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 518 "VFS namecache enabled"); 519 #endif 520 521 /* Export size information to userland */ 522 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 523 sizeof(struct namecache), "sizeof(struct namecache)"); 524 525 /* 526 * The new name cache statistics 527 */ 528 static SYSCTL_NODE(_vfs_cache, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 529 "Name cache statistics"); 530 531 #define STATNODE_ULONG(name, varname, descr) \ 532 SYSCTL_ULONG(_vfs_cache_stats, OID_AUTO, name, CTLFLAG_RD, &varname, 0, descr); 533 #define STATNODE_COUNTER(name, varname, descr) \ 534 static COUNTER_U64_DEFINE_EARLY(varname); \ 535 SYSCTL_COUNTER_U64(_vfs_cache_stats, OID_AUTO, name, CTLFLAG_RD, &varname, \ 536 descr); 537 STATNODE_ULONG(neg, numneg, "Number of negative cache entries"); 538 STATNODE_ULONG(count, numcache, "Number of cache entries"); 539 STATNODE_COUNTER(heldvnodes, numcachehv, "Number of namecache entries with vnodes held"); 540 STATNODE_COUNTER(drops, numdrops, "Number of dropped entries due to reaching the limit"); 541 STATNODE_COUNTER(dothits, dothits, "Number of '.' hits"); 542 STATNODE_COUNTER(dotdothis, dotdothits, "Number of '..' hits"); 543 STATNODE_COUNTER(miss, nummiss, "Number of cache misses"); 544 STATNODE_COUNTER(misszap, nummisszap, "Number of cache misses we do not want to cache"); 545 STATNODE_COUNTER(posszaps, numposzaps, 546 "Number of cache hits (positive) we do not want to cache"); 547 STATNODE_COUNTER(poshits, numposhits, "Number of cache hits (positive)"); 548 STATNODE_COUNTER(negzaps, numnegzaps, 549 "Number of cache hits (negative) we do not want to cache"); 550 STATNODE_COUNTER(neghits, numneghits, "Number of cache hits (negative)"); 551 /* These count for vn_getcwd(), too. */ 552 STATNODE_COUNTER(fullpathcalls, numfullpathcalls, "Number of fullpath search calls"); 553 STATNODE_COUNTER(fullpathfail1, numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 554 STATNODE_COUNTER(fullpathfail2, numfullpathfail2, 555 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 556 STATNODE_COUNTER(fullpathfail4, numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 557 STATNODE_COUNTER(fullpathfound, numfullpathfound, "Number of successful fullpath calls"); 558 STATNODE_COUNTER(symlinktoobig, symlinktoobig, "Number of times symlink did not fit the cache"); 559 560 /* 561 * Debug or developer statistics. 562 */ 563 static SYSCTL_NODE(_vfs_cache, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 564 "Name cache debugging"); 565 #define DEBUGNODE_ULONG(name, varname, descr) \ 566 SYSCTL_ULONG(_vfs_cache_debug, OID_AUTO, name, CTLFLAG_RD, &varname, 0, descr); 567 #define DEBUGNODE_COUNTER(name, varname, descr) \ 568 static COUNTER_U64_DEFINE_EARLY(varname); \ 569 SYSCTL_COUNTER_U64(_vfs_cache_debug, OID_AUTO, name, CTLFLAG_RD, &varname, \ 570 descr); 571 DEBUGNODE_COUNTER(zap_bucket_relock_success, zap_bucket_relock_success, 572 "Number of successful removals after relocking"); 573 static long zap_bucket_fail; 574 DEBUGNODE_ULONG(zap_bucket_fail, zap_bucket_fail, ""); 575 static long zap_bucket_fail2; 576 DEBUGNODE_ULONG(zap_bucket_fail2, zap_bucket_fail2, ""); 577 static long cache_lock_vnodes_cel_3_failures; 578 DEBUGNODE_ULONG(vnodes_cel_3_failures, cache_lock_vnodes_cel_3_failures, 579 "Number of times 3-way vnode locking failed"); 580 581 static void cache_zap_locked(struct namecache *ncp); 582 static int vn_fullpath_any_smr(struct vnode *vp, struct vnode *rdir, char *buf, 583 char **retbuf, size_t *buflen, size_t addend); 584 static int vn_fullpath_any(struct vnode *vp, struct vnode *rdir, char *buf, 585 char **retbuf, size_t *buflen); 586 static int vn_fullpath_dir(struct vnode *vp, struct vnode *rdir, char *buf, 587 char **retbuf, size_t *len, size_t addend); 588 589 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 590 591 static inline void 592 cache_assert_vlp_locked(struct mtx *vlp) 593 { 594 595 if (vlp != NULL) 596 mtx_assert(vlp, MA_OWNED); 597 } 598 599 static inline void 600 cache_assert_vnode_locked(struct vnode *vp) 601 { 602 struct mtx *vlp; 603 604 vlp = VP2VNODELOCK(vp); 605 cache_assert_vlp_locked(vlp); 606 } 607 608 /* 609 * Directory vnodes with entries are held for two reasons: 610 * 1. make them less of a target for reclamation in vnlru 611 * 2. suffer smaller performance penalty in locked lookup as requeieing is avoided 612 * 613 * It will be feasible to stop doing it altogether if all filesystems start 614 * supporting lockless lookup. 615 */ 616 static void 617 cache_hold_vnode(struct vnode *vp) 618 { 619 620 cache_assert_vnode_locked(vp); 621 VNPASS(LIST_EMPTY(&vp->v_cache_src), vp); 622 vhold(vp); 623 counter_u64_add(numcachehv, 1); 624 } 625 626 static void 627 cache_drop_vnode(struct vnode *vp) 628 { 629 630 /* 631 * Called after all locks are dropped, meaning we can't assert 632 * on the state of v_cache_src. 633 */ 634 vdrop(vp); 635 counter_u64_add(numcachehv, -1); 636 } 637 638 /* 639 * UMA zones. 640 */ 641 static uma_zone_t __read_mostly cache_zone_small; 642 static uma_zone_t __read_mostly cache_zone_small_ts; 643 static uma_zone_t __read_mostly cache_zone_large; 644 static uma_zone_t __read_mostly cache_zone_large_ts; 645 646 char * 647 cache_symlink_alloc(size_t size, int flags) 648 { 649 650 if (size < CACHE_ZONE_SMALL_SIZE) { 651 return (uma_zalloc_smr(cache_zone_small, flags)); 652 } 653 if (size < CACHE_ZONE_LARGE_SIZE) { 654 return (uma_zalloc_smr(cache_zone_large, flags)); 655 } 656 counter_u64_add(symlinktoobig, 1); 657 SDT_PROBE1(vfs, namecache, symlink, alloc__fail, size); 658 return (NULL); 659 } 660 661 void 662 cache_symlink_free(char *string, size_t size) 663 { 664 665 MPASS(string != NULL); 666 KASSERT(size < CACHE_ZONE_LARGE_SIZE, 667 ("%s: size %zu too big", __func__, size)); 668 669 if (size < CACHE_ZONE_SMALL_SIZE) { 670 uma_zfree_smr(cache_zone_small, string); 671 return; 672 } 673 if (size < CACHE_ZONE_LARGE_SIZE) { 674 uma_zfree_smr(cache_zone_large, string); 675 return; 676 } 677 __assert_unreachable(); 678 } 679 680 static struct namecache * 681 cache_alloc_uma(int len, bool ts) 682 { 683 struct namecache_ts *ncp_ts; 684 struct namecache *ncp; 685 686 if (__predict_false(ts)) { 687 if (len <= CACHE_PATH_CUTOFF) 688 ncp_ts = uma_zalloc_smr(cache_zone_small_ts, M_WAITOK); 689 else 690 ncp_ts = uma_zalloc_smr(cache_zone_large_ts, M_WAITOK); 691 ncp = &ncp_ts->nc_nc; 692 } else { 693 if (len <= CACHE_PATH_CUTOFF) 694 ncp = uma_zalloc_smr(cache_zone_small, M_WAITOK); 695 else 696 ncp = uma_zalloc_smr(cache_zone_large, M_WAITOK); 697 } 698 return (ncp); 699 } 700 701 static void 702 cache_free_uma(struct namecache *ncp) 703 { 704 struct namecache_ts *ncp_ts; 705 706 if (__predict_false(ncp->nc_flag & NCF_TS)) { 707 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 708 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 709 uma_zfree_smr(cache_zone_small_ts, ncp_ts); 710 else 711 uma_zfree_smr(cache_zone_large_ts, ncp_ts); 712 } else { 713 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 714 uma_zfree_smr(cache_zone_small, ncp); 715 else 716 uma_zfree_smr(cache_zone_large, ncp); 717 } 718 } 719 720 static struct namecache * 721 cache_alloc(int len, bool ts) 722 { 723 u_long lnumcache; 724 725 /* 726 * Avoid blowout in namecache entries. 727 * 728 * Bugs: 729 * 1. filesystems may end up trying to add an already existing entry 730 * (for example this can happen after a cache miss during concurrent 731 * lookup), in which case we will call cache_neg_evict despite not 732 * adding anything. 733 * 2. the routine may fail to free anything and no provisions are made 734 * to make it try harder (see the inside for failure modes) 735 * 3. it only ever looks at negative entries. 736 */ 737 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1; 738 if (cache_neg_evict_cond(lnumcache)) { 739 lnumcache = atomic_load_long(&numcache); 740 } 741 if (__predict_false(lnumcache >= ncsize)) { 742 atomic_subtract_long(&numcache, 1); 743 counter_u64_add(numdrops, 1); 744 return (NULL); 745 } 746 return (cache_alloc_uma(len, ts)); 747 } 748 749 static void 750 cache_free(struct namecache *ncp) 751 { 752 753 MPASS(ncp != NULL); 754 if ((ncp->nc_flag & NCF_DVDROP) != 0) { 755 cache_drop_vnode(ncp->nc_dvp); 756 } 757 cache_free_uma(ncp); 758 atomic_subtract_long(&numcache, 1); 759 } 760 761 static void 762 cache_free_batch(struct cache_freebatch *batch) 763 { 764 struct namecache *ncp, *nnp; 765 int i; 766 767 i = 0; 768 if (TAILQ_EMPTY(batch)) 769 goto out; 770 TAILQ_FOREACH_SAFE(ncp, batch, nc_dst, nnp) { 771 if ((ncp->nc_flag & NCF_DVDROP) != 0) { 772 cache_drop_vnode(ncp->nc_dvp); 773 } 774 cache_free_uma(ncp); 775 i++; 776 } 777 atomic_subtract_long(&numcache, i); 778 out: 779 SDT_PROBE1(vfs, namecache, purge, batch, i); 780 } 781 782 /* 783 * Hashing. 784 * 785 * The code was made to use FNV in 2001 and this choice needs to be revisited. 786 * 787 * Short summary of the difficulty: 788 * The longest name which can be inserted is NAME_MAX characters in length (or 789 * 255 at the time of writing this comment), while majority of names used in 790 * practice are significantly shorter (mostly below 10). More importantly 791 * majority of lookups performed find names are even shorter than that. 792 * 793 * This poses a problem where hashes which do better than FNV past word size 794 * (or so) tend to come with additional overhead when finalizing the result, 795 * making them noticeably slower for the most commonly used range. 796 * 797 * Consider a path like: /usr/obj/usr/src/sys/amd64/GENERIC/vnode_if.c 798 * 799 * When looking it up the most time consuming part by a large margin (at least 800 * on amd64) is hashing. Replacing FNV with something which pessimizes short 801 * input would make the slowest part stand out even more. 802 */ 803 804 /* 805 * TODO: With the value stored we can do better than computing the hash based 806 * on the address. 807 */ 808 static void 809 cache_prehash(struct vnode *vp) 810 { 811 812 vp->v_nchash = fnv_32_buf(&vp, sizeof(vp), FNV1_32_INIT); 813 } 814 815 static uint32_t 816 cache_get_hash(char *name, u_char len, struct vnode *dvp) 817 { 818 819 return (fnv_32_buf(name, len, dvp->v_nchash)); 820 } 821 822 static uint32_t 823 cache_get_hash_iter_start(struct vnode *dvp) 824 { 825 826 return (dvp->v_nchash); 827 } 828 829 static uint32_t 830 cache_get_hash_iter(char c, uint32_t hash) 831 { 832 833 return (fnv_32_buf(&c, 1, hash)); 834 } 835 836 static uint32_t 837 cache_get_hash_iter_finish(uint32_t hash) 838 { 839 840 return (hash); 841 } 842 843 static inline struct nchashhead * 844 NCP2BUCKET(struct namecache *ncp) 845 { 846 uint32_t hash; 847 848 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); 849 return (NCHHASH(hash)); 850 } 851 852 static inline struct mtx * 853 NCP2BUCKETLOCK(struct namecache *ncp) 854 { 855 uint32_t hash; 856 857 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); 858 return (HASH2BUCKETLOCK(hash)); 859 } 860 861 #ifdef INVARIANTS 862 static void 863 cache_assert_bucket_locked(struct namecache *ncp) 864 { 865 struct mtx *blp; 866 867 blp = NCP2BUCKETLOCK(ncp); 868 mtx_assert(blp, MA_OWNED); 869 } 870 871 static void 872 cache_assert_bucket_unlocked(struct namecache *ncp) 873 { 874 struct mtx *blp; 875 876 blp = NCP2BUCKETLOCK(ncp); 877 mtx_assert(blp, MA_NOTOWNED); 878 } 879 #else 880 #define cache_assert_bucket_locked(x) do { } while (0) 881 #define cache_assert_bucket_unlocked(x) do { } while (0) 882 #endif 883 884 #define cache_sort_vnodes(x, y) _cache_sort_vnodes((void **)(x), (void **)(y)) 885 static void 886 _cache_sort_vnodes(void **p1, void **p2) 887 { 888 void *tmp; 889 890 MPASS(*p1 != NULL || *p2 != NULL); 891 892 if (*p1 > *p2) { 893 tmp = *p2; 894 *p2 = *p1; 895 *p1 = tmp; 896 } 897 } 898 899 static void 900 cache_lock_all_buckets(void) 901 { 902 u_int i; 903 904 for (i = 0; i < numbucketlocks; i++) 905 mtx_lock(&bucketlocks[i]); 906 } 907 908 static void 909 cache_unlock_all_buckets(void) 910 { 911 u_int i; 912 913 for (i = 0; i < numbucketlocks; i++) 914 mtx_unlock(&bucketlocks[i]); 915 } 916 917 static void 918 cache_lock_all_vnodes(void) 919 { 920 u_int i; 921 922 for (i = 0; i < numvnodelocks; i++) 923 mtx_lock(&vnodelocks[i]); 924 } 925 926 static void 927 cache_unlock_all_vnodes(void) 928 { 929 u_int i; 930 931 for (i = 0; i < numvnodelocks; i++) 932 mtx_unlock(&vnodelocks[i]); 933 } 934 935 static int 936 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 937 { 938 939 cache_sort_vnodes(&vlp1, &vlp2); 940 941 if (vlp1 != NULL) { 942 if (!mtx_trylock(vlp1)) 943 return (EAGAIN); 944 } 945 if (!mtx_trylock(vlp2)) { 946 if (vlp1 != NULL) 947 mtx_unlock(vlp1); 948 return (EAGAIN); 949 } 950 951 return (0); 952 } 953 954 static void 955 cache_lock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 956 { 957 958 MPASS(vlp1 != NULL || vlp2 != NULL); 959 MPASS(vlp1 <= vlp2); 960 961 if (vlp1 != NULL) 962 mtx_lock(vlp1); 963 if (vlp2 != NULL) 964 mtx_lock(vlp2); 965 } 966 967 static void 968 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 969 { 970 971 MPASS(vlp1 != NULL || vlp2 != NULL); 972 973 if (vlp1 != NULL) 974 mtx_unlock(vlp1); 975 if (vlp2 != NULL) 976 mtx_unlock(vlp2); 977 } 978 979 static int 980 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 981 { 982 struct nchstats snap; 983 984 if (req->oldptr == NULL) 985 return (SYSCTL_OUT(req, 0, sizeof(snap))); 986 987 snap = nchstats; 988 snap.ncs_goodhits = counter_u64_fetch(numposhits); 989 snap.ncs_neghits = counter_u64_fetch(numneghits); 990 snap.ncs_badhits = counter_u64_fetch(numposzaps) + 991 counter_u64_fetch(numnegzaps); 992 snap.ncs_miss = counter_u64_fetch(nummisszap) + 993 counter_u64_fetch(nummiss); 994 995 return (SYSCTL_OUT(req, &snap, sizeof(snap))); 996 } 997 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD | 998 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU", 999 "VFS cache effectiveness statistics"); 1000 1001 static void 1002 cache_recalc_neg_min(u_int val) 1003 { 1004 1005 neg_min = (ncsize * val) / 100; 1006 } 1007 1008 static int 1009 sysctl_negminpct(SYSCTL_HANDLER_ARGS) 1010 { 1011 u_int val; 1012 int error; 1013 1014 val = ncnegminpct; 1015 error = sysctl_handle_int(oidp, &val, 0, req); 1016 if (error != 0 || req->newptr == NULL) 1017 return (error); 1018 1019 if (val == ncnegminpct) 1020 return (0); 1021 if (val < 0 || val > 99) 1022 return (EINVAL); 1023 ncnegminpct = val; 1024 cache_recalc_neg_min(val); 1025 return (0); 1026 } 1027 1028 SYSCTL_PROC(_vfs_cache_param, OID_AUTO, negminpct, 1029 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_negminpct, 1030 "I", "Negative entry \% of namecache capacity above which automatic eviction is allowed"); 1031 1032 #ifdef DIAGNOSTIC 1033 /* 1034 * Grab an atomic snapshot of the name cache hash chain lengths 1035 */ 1036 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, 1037 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 1038 "hash table stats"); 1039 1040 static int 1041 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 1042 { 1043 struct nchashhead *ncpp; 1044 struct namecache *ncp; 1045 int i, error, n_nchash, *cntbuf; 1046 1047 retry: 1048 n_nchash = nchash + 1; /* nchash is max index, not count */ 1049 if (req->oldptr == NULL) 1050 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 1051 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 1052 cache_lock_all_buckets(); 1053 if (n_nchash != nchash + 1) { 1054 cache_unlock_all_buckets(); 1055 free(cntbuf, M_TEMP); 1056 goto retry; 1057 } 1058 /* Scan hash tables counting entries */ 1059 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 1060 CK_SLIST_FOREACH(ncp, ncpp, nc_hash) 1061 cntbuf[i]++; 1062 cache_unlock_all_buckets(); 1063 for (error = 0, i = 0; i < n_nchash; i++) 1064 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 1065 break; 1066 free(cntbuf, M_TEMP); 1067 return (error); 1068 } 1069 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 1070 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 1071 "nchash chain lengths"); 1072 1073 static int 1074 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 1075 { 1076 int error; 1077 struct nchashhead *ncpp; 1078 struct namecache *ncp; 1079 int n_nchash; 1080 int count, maxlength, used, pct; 1081 1082 if (!req->oldptr) 1083 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 1084 1085 cache_lock_all_buckets(); 1086 n_nchash = nchash + 1; /* nchash is max index, not count */ 1087 used = 0; 1088 maxlength = 0; 1089 1090 /* Scan hash tables for applicable entries */ 1091 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 1092 count = 0; 1093 CK_SLIST_FOREACH(ncp, ncpp, nc_hash) { 1094 count++; 1095 } 1096 if (count) 1097 used++; 1098 if (maxlength < count) 1099 maxlength = count; 1100 } 1101 n_nchash = nchash + 1; 1102 cache_unlock_all_buckets(); 1103 pct = (used * 100) / (n_nchash / 100); 1104 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 1105 if (error) 1106 return (error); 1107 error = SYSCTL_OUT(req, &used, sizeof(used)); 1108 if (error) 1109 return (error); 1110 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 1111 if (error) 1112 return (error); 1113 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 1114 if (error) 1115 return (error); 1116 return (0); 1117 } 1118 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 1119 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 1120 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 1121 #endif 1122 1123 /* 1124 * Negative entries management 1125 * 1126 * Various workloads create plenty of negative entries and barely use them 1127 * afterwards. Moreover malicious users can keep performing bogus lookups 1128 * adding even more entries. For example "make tinderbox" as of writing this 1129 * comment ends up with 2.6M namecache entries in total, 1.2M of which are 1130 * negative. 1131 * 1132 * As such, a rather aggressive eviction method is needed. The currently 1133 * employed method is a placeholder. 1134 * 1135 * Entries are split over numneglists separate lists, each of which is further 1136 * split into hot and cold entries. Entries get promoted after getting a hit. 1137 * Eviction happens on addition of new entry. 1138 */ 1139 static SYSCTL_NODE(_vfs_cache, OID_AUTO, neg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1140 "Name cache negative entry statistics"); 1141 1142 SYSCTL_ULONG(_vfs_cache_neg, OID_AUTO, count, CTLFLAG_RD, &numneg, 0, 1143 "Number of negative cache entries"); 1144 1145 static COUNTER_U64_DEFINE_EARLY(neg_created); 1146 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, created, CTLFLAG_RD, &neg_created, 1147 "Number of created negative entries"); 1148 1149 static COUNTER_U64_DEFINE_EARLY(neg_evicted); 1150 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evicted, CTLFLAG_RD, &neg_evicted, 1151 "Number of evicted negative entries"); 1152 1153 static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_empty); 1154 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_empty, CTLFLAG_RD, 1155 &neg_evict_skipped_empty, 1156 "Number of times evicting failed due to lack of entries"); 1157 1158 static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_missed); 1159 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_missed, CTLFLAG_RD, 1160 &neg_evict_skipped_missed, 1161 "Number of times evicting failed due to target entry disappearing"); 1162 1163 static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_contended); 1164 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_contended, CTLFLAG_RD, 1165 &neg_evict_skipped_contended, 1166 "Number of times evicting failed due to contention"); 1167 1168 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, hits, CTLFLAG_RD, &numneghits, 1169 "Number of cache hits (negative)"); 1170 1171 static int 1172 sysctl_neg_hot(SYSCTL_HANDLER_ARGS) 1173 { 1174 int i, out; 1175 1176 out = 0; 1177 for (i = 0; i < numneglists; i++) 1178 out += neglists[i].nl_hotnum; 1179 1180 return (SYSCTL_OUT(req, &out, sizeof(out))); 1181 } 1182 SYSCTL_PROC(_vfs_cache_neg, OID_AUTO, hot, CTLTYPE_INT | CTLFLAG_RD | 1183 CTLFLAG_MPSAFE, 0, 0, sysctl_neg_hot, "I", 1184 "Number of hot negative entries"); 1185 1186 static void 1187 cache_neg_init(struct namecache *ncp) 1188 { 1189 struct negstate *ns; 1190 1191 ncp->nc_flag |= NCF_NEGATIVE; 1192 ns = NCP2NEGSTATE(ncp); 1193 ns->neg_flag = 0; 1194 ns->neg_hit = 0; 1195 counter_u64_add(neg_created, 1); 1196 } 1197 1198 #define CACHE_NEG_PROMOTION_THRESH 2 1199 1200 static bool 1201 cache_neg_hit_prep(struct namecache *ncp) 1202 { 1203 struct negstate *ns; 1204 u_char n; 1205 1206 ns = NCP2NEGSTATE(ncp); 1207 n = atomic_load_char(&ns->neg_hit); 1208 for (;;) { 1209 if (n >= CACHE_NEG_PROMOTION_THRESH) 1210 return (false); 1211 if (atomic_fcmpset_8(&ns->neg_hit, &n, n + 1)) 1212 break; 1213 } 1214 return (n + 1 == CACHE_NEG_PROMOTION_THRESH); 1215 } 1216 1217 /* 1218 * Nothing to do here but it is provided for completeness as some 1219 * cache_neg_hit_prep callers may end up returning without even 1220 * trying to promote. 1221 */ 1222 #define cache_neg_hit_abort(ncp) do { } while (0) 1223 1224 static void 1225 cache_neg_hit_finish(struct namecache *ncp) 1226 { 1227 1228 SDT_PROBE2(vfs, namecache, lookup, hit__negative, ncp->nc_dvp, ncp->nc_name); 1229 counter_u64_add(numneghits, 1); 1230 } 1231 1232 /* 1233 * Move a negative entry to the hot list. 1234 */ 1235 static void 1236 cache_neg_promote_locked(struct namecache *ncp) 1237 { 1238 struct neglist *nl; 1239 struct negstate *ns; 1240 1241 ns = NCP2NEGSTATE(ncp); 1242 nl = NCP2NEGLIST(ncp); 1243 mtx_assert(&nl->nl_lock, MA_OWNED); 1244 if ((ns->neg_flag & NEG_HOT) == 0) { 1245 TAILQ_REMOVE(&nl->nl_list, ncp, nc_dst); 1246 TAILQ_INSERT_TAIL(&nl->nl_hotlist, ncp, nc_dst); 1247 nl->nl_hotnum++; 1248 ns->neg_flag |= NEG_HOT; 1249 } 1250 } 1251 1252 /* 1253 * Move a hot negative entry to the cold list. 1254 */ 1255 static void 1256 cache_neg_demote_locked(struct namecache *ncp) 1257 { 1258 struct neglist *nl; 1259 struct negstate *ns; 1260 1261 ns = NCP2NEGSTATE(ncp); 1262 nl = NCP2NEGLIST(ncp); 1263 mtx_assert(&nl->nl_lock, MA_OWNED); 1264 MPASS(ns->neg_flag & NEG_HOT); 1265 TAILQ_REMOVE(&nl->nl_hotlist, ncp, nc_dst); 1266 TAILQ_INSERT_TAIL(&nl->nl_list, ncp, nc_dst); 1267 nl->nl_hotnum--; 1268 ns->neg_flag &= ~NEG_HOT; 1269 atomic_store_char(&ns->neg_hit, 0); 1270 } 1271 1272 /* 1273 * Move a negative entry to the hot list if it matches the lookup. 1274 * 1275 * We have to take locks, but they may be contended and in the worst 1276 * case we may need to go off CPU. We don't want to spin within the 1277 * smr section and we can't block with it. Exiting the section means 1278 * the found entry could have been evicted. We are going to look it 1279 * up again. 1280 */ 1281 static bool 1282 cache_neg_promote_cond(struct vnode *dvp, struct componentname *cnp, 1283 struct namecache *oncp, uint32_t hash) 1284 { 1285 struct namecache *ncp; 1286 struct neglist *nl; 1287 u_char nc_flag; 1288 1289 nl = NCP2NEGLIST(oncp); 1290 1291 mtx_lock(&nl->nl_lock); 1292 /* 1293 * For hash iteration. 1294 */ 1295 vfs_smr_enter(); 1296 1297 /* 1298 * Avoid all surprises by only succeeding if we got the same entry and 1299 * bailing completely otherwise. 1300 * XXX There are no provisions to keep the vnode around, meaning we may 1301 * end up promoting a negative entry for a *new* vnode and returning 1302 * ENOENT on its account. This is the error we want to return anyway 1303 * and promotion is harmless. 1304 * 1305 * In particular at this point there can be a new ncp which matches the 1306 * search but hashes to a different neglist. 1307 */ 1308 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1309 if (ncp == oncp) 1310 break; 1311 } 1312 1313 /* 1314 * No match to begin with. 1315 */ 1316 if (__predict_false(ncp == NULL)) { 1317 goto out_abort; 1318 } 1319 1320 /* 1321 * The newly found entry may be something different... 1322 */ 1323 if (!(ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1324 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))) { 1325 goto out_abort; 1326 } 1327 1328 /* 1329 * ... and not even negative. 1330 */ 1331 nc_flag = atomic_load_char(&ncp->nc_flag); 1332 if ((nc_flag & NCF_NEGATIVE) == 0) { 1333 goto out_abort; 1334 } 1335 1336 if (!cache_ncp_canuse(ncp)) { 1337 goto out_abort; 1338 } 1339 1340 cache_neg_promote_locked(ncp); 1341 cache_neg_hit_finish(ncp); 1342 vfs_smr_exit(); 1343 mtx_unlock(&nl->nl_lock); 1344 return (true); 1345 out_abort: 1346 vfs_smr_exit(); 1347 mtx_unlock(&nl->nl_lock); 1348 return (false); 1349 } 1350 1351 static void 1352 cache_neg_promote(struct namecache *ncp) 1353 { 1354 struct neglist *nl; 1355 1356 nl = NCP2NEGLIST(ncp); 1357 mtx_lock(&nl->nl_lock); 1358 cache_neg_promote_locked(ncp); 1359 mtx_unlock(&nl->nl_lock); 1360 } 1361 1362 static void 1363 cache_neg_insert(struct namecache *ncp) 1364 { 1365 struct neglist *nl; 1366 1367 MPASS(ncp->nc_flag & NCF_NEGATIVE); 1368 cache_assert_bucket_locked(ncp); 1369 nl = NCP2NEGLIST(ncp); 1370 mtx_lock(&nl->nl_lock); 1371 TAILQ_INSERT_TAIL(&nl->nl_list, ncp, nc_dst); 1372 mtx_unlock(&nl->nl_lock); 1373 atomic_add_long(&numneg, 1); 1374 } 1375 1376 static void 1377 cache_neg_remove(struct namecache *ncp) 1378 { 1379 struct neglist *nl; 1380 struct negstate *ns; 1381 1382 cache_assert_bucket_locked(ncp); 1383 nl = NCP2NEGLIST(ncp); 1384 ns = NCP2NEGSTATE(ncp); 1385 mtx_lock(&nl->nl_lock); 1386 if ((ns->neg_flag & NEG_HOT) != 0) { 1387 TAILQ_REMOVE(&nl->nl_hotlist, ncp, nc_dst); 1388 nl->nl_hotnum--; 1389 } else { 1390 TAILQ_REMOVE(&nl->nl_list, ncp, nc_dst); 1391 } 1392 mtx_unlock(&nl->nl_lock); 1393 atomic_subtract_long(&numneg, 1); 1394 } 1395 1396 static struct neglist * 1397 cache_neg_evict_select_list(void) 1398 { 1399 struct neglist *nl; 1400 u_int c; 1401 1402 c = atomic_fetchadd_int(&neg_cycle, 1) + 1; 1403 nl = &neglists[c % numneglists]; 1404 if (!mtx_trylock(&nl->nl_evict_lock)) { 1405 counter_u64_add(neg_evict_skipped_contended, 1); 1406 return (NULL); 1407 } 1408 return (nl); 1409 } 1410 1411 static struct namecache * 1412 cache_neg_evict_select_entry(struct neglist *nl) 1413 { 1414 struct namecache *ncp, *lncp; 1415 struct negstate *ns, *lns; 1416 int i; 1417 1418 mtx_assert(&nl->nl_evict_lock, MA_OWNED); 1419 mtx_assert(&nl->nl_lock, MA_OWNED); 1420 ncp = TAILQ_FIRST(&nl->nl_list); 1421 if (ncp == NULL) 1422 return (NULL); 1423 lncp = ncp; 1424 lns = NCP2NEGSTATE(lncp); 1425 for (i = 1; i < 4; i++) { 1426 ncp = TAILQ_NEXT(ncp, nc_dst); 1427 if (ncp == NULL) 1428 break; 1429 ns = NCP2NEGSTATE(ncp); 1430 if (ns->neg_hit < lns->neg_hit) { 1431 lncp = ncp; 1432 lns = ns; 1433 } 1434 } 1435 return (lncp); 1436 } 1437 1438 static bool 1439 cache_neg_evict(void) 1440 { 1441 struct namecache *ncp, *ncp2; 1442 struct neglist *nl; 1443 struct vnode *dvp; 1444 struct mtx *dvlp; 1445 struct mtx *blp; 1446 uint32_t hash; 1447 u_char nlen; 1448 bool evicted; 1449 1450 nl = cache_neg_evict_select_list(); 1451 if (nl == NULL) { 1452 return (false); 1453 } 1454 1455 mtx_lock(&nl->nl_lock); 1456 ncp = TAILQ_FIRST(&nl->nl_hotlist); 1457 if (ncp != NULL) { 1458 cache_neg_demote_locked(ncp); 1459 } 1460 ncp = cache_neg_evict_select_entry(nl); 1461 if (ncp == NULL) { 1462 counter_u64_add(neg_evict_skipped_empty, 1); 1463 mtx_unlock(&nl->nl_lock); 1464 mtx_unlock(&nl->nl_evict_lock); 1465 return (false); 1466 } 1467 nlen = ncp->nc_nlen; 1468 dvp = ncp->nc_dvp; 1469 hash = cache_get_hash(ncp->nc_name, nlen, dvp); 1470 dvlp = VP2VNODELOCK(dvp); 1471 blp = HASH2BUCKETLOCK(hash); 1472 mtx_unlock(&nl->nl_lock); 1473 mtx_unlock(&nl->nl_evict_lock); 1474 mtx_lock(dvlp); 1475 mtx_lock(blp); 1476 /* 1477 * Note that since all locks were dropped above, the entry may be 1478 * gone or reallocated to be something else. 1479 */ 1480 CK_SLIST_FOREACH(ncp2, (NCHHASH(hash)), nc_hash) { 1481 if (ncp2 == ncp && ncp2->nc_dvp == dvp && 1482 ncp2->nc_nlen == nlen && (ncp2->nc_flag & NCF_NEGATIVE) != 0) 1483 break; 1484 } 1485 if (ncp2 == NULL) { 1486 counter_u64_add(neg_evict_skipped_missed, 1); 1487 ncp = NULL; 1488 evicted = false; 1489 } else { 1490 MPASS(dvlp == VP2VNODELOCK(ncp->nc_dvp)); 1491 MPASS(blp == NCP2BUCKETLOCK(ncp)); 1492 SDT_PROBE2(vfs, namecache, evict_negative, done, ncp->nc_dvp, 1493 ncp->nc_name); 1494 cache_zap_locked(ncp); 1495 counter_u64_add(neg_evicted, 1); 1496 evicted = true; 1497 } 1498 mtx_unlock(blp); 1499 mtx_unlock(dvlp); 1500 if (ncp != NULL) 1501 cache_free(ncp); 1502 return (evicted); 1503 } 1504 1505 /* 1506 * Maybe evict a negative entry to create more room. 1507 * 1508 * The ncnegfactor parameter limits what fraction of the total count 1509 * can comprise of negative entries. However, if the cache is just 1510 * warming up this leads to excessive evictions. As such, ncnegminpct 1511 * (recomputed to neg_min) dictates whether the above should be 1512 * applied. 1513 * 1514 * Try evicting if the cache is close to full capacity regardless of 1515 * other considerations. 1516 */ 1517 static bool 1518 cache_neg_evict_cond(u_long lnumcache) 1519 { 1520 u_long lnumneg; 1521 1522 if (ncsize - 1000 < lnumcache) 1523 goto out_evict; 1524 lnumneg = atomic_load_long(&numneg); 1525 if (lnumneg < neg_min) 1526 return (false); 1527 if (lnumneg * ncnegfactor < lnumcache) 1528 return (false); 1529 out_evict: 1530 return (cache_neg_evict()); 1531 } 1532 1533 /* 1534 * cache_zap_locked(): 1535 * 1536 * Removes a namecache entry from cache, whether it contains an actual 1537 * pointer to a vnode or if it is just a negative cache entry. 1538 */ 1539 static void 1540 cache_zap_locked(struct namecache *ncp) 1541 { 1542 struct nchashhead *ncpp; 1543 struct vnode *dvp, *vp; 1544 1545 dvp = ncp->nc_dvp; 1546 vp = ncp->nc_vp; 1547 1548 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1549 cache_assert_vnode_locked(vp); 1550 cache_assert_vnode_locked(dvp); 1551 cache_assert_bucket_locked(ncp); 1552 1553 cache_ncp_invalidate(ncp); 1554 1555 ncpp = NCP2BUCKET(ncp); 1556 CK_SLIST_REMOVE(ncpp, ncp, namecache, nc_hash); 1557 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 1558 SDT_PROBE3(vfs, namecache, zap, done, dvp, ncp->nc_name, vp); 1559 TAILQ_REMOVE(&vp->v_cache_dst, ncp, nc_dst); 1560 if (ncp == vp->v_cache_dd) { 1561 atomic_store_ptr(&vp->v_cache_dd, NULL); 1562 } 1563 } else { 1564 SDT_PROBE2(vfs, namecache, zap_negative, done, dvp, ncp->nc_name); 1565 cache_neg_remove(ncp); 1566 } 1567 if (ncp->nc_flag & NCF_ISDOTDOT) { 1568 if (ncp == dvp->v_cache_dd) { 1569 atomic_store_ptr(&dvp->v_cache_dd, NULL); 1570 } 1571 } else { 1572 LIST_REMOVE(ncp, nc_src); 1573 if (LIST_EMPTY(&dvp->v_cache_src)) { 1574 ncp->nc_flag |= NCF_DVDROP; 1575 } 1576 } 1577 } 1578 1579 static void 1580 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp) 1581 { 1582 struct mtx *blp; 1583 1584 MPASS(ncp->nc_dvp == vp); 1585 MPASS(ncp->nc_flag & NCF_NEGATIVE); 1586 cache_assert_vnode_locked(vp); 1587 1588 blp = NCP2BUCKETLOCK(ncp); 1589 mtx_lock(blp); 1590 cache_zap_locked(ncp); 1591 mtx_unlock(blp); 1592 } 1593 1594 static bool 1595 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp, 1596 struct mtx **vlpp) 1597 { 1598 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; 1599 struct mtx *blp; 1600 1601 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); 1602 cache_assert_vnode_locked(vp); 1603 1604 if (ncp->nc_flag & NCF_NEGATIVE) { 1605 if (*vlpp != NULL) { 1606 mtx_unlock(*vlpp); 1607 *vlpp = NULL; 1608 } 1609 cache_zap_negative_locked_vnode_kl(ncp, vp); 1610 return (true); 1611 } 1612 1613 pvlp = VP2VNODELOCK(vp); 1614 blp = NCP2BUCKETLOCK(ncp); 1615 vlp1 = VP2VNODELOCK(ncp->nc_dvp); 1616 vlp2 = VP2VNODELOCK(ncp->nc_vp); 1617 1618 if (*vlpp == vlp1 || *vlpp == vlp2) { 1619 to_unlock = *vlpp; 1620 *vlpp = NULL; 1621 } else { 1622 if (*vlpp != NULL) { 1623 mtx_unlock(*vlpp); 1624 *vlpp = NULL; 1625 } 1626 cache_sort_vnodes(&vlp1, &vlp2); 1627 if (vlp1 == pvlp) { 1628 mtx_lock(vlp2); 1629 to_unlock = vlp2; 1630 } else { 1631 if (!mtx_trylock(vlp1)) 1632 goto out_relock; 1633 to_unlock = vlp1; 1634 } 1635 } 1636 mtx_lock(blp); 1637 cache_zap_locked(ncp); 1638 mtx_unlock(blp); 1639 if (to_unlock != NULL) 1640 mtx_unlock(to_unlock); 1641 return (true); 1642 1643 out_relock: 1644 mtx_unlock(vlp2); 1645 mtx_lock(vlp1); 1646 mtx_lock(vlp2); 1647 MPASS(*vlpp == NULL); 1648 *vlpp = vlp1; 1649 return (false); 1650 } 1651 1652 /* 1653 * If trylocking failed we can get here. We know enough to take all needed locks 1654 * in the right order and re-lookup the entry. 1655 */ 1656 static int 1657 cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp, 1658 struct vnode *dvp, struct mtx *dvlp, struct mtx *vlp, uint32_t hash, 1659 struct mtx *blp) 1660 { 1661 struct namecache *rncp; 1662 1663 cache_assert_bucket_unlocked(ncp); 1664 1665 cache_sort_vnodes(&dvlp, &vlp); 1666 cache_lock_vnodes(dvlp, vlp); 1667 mtx_lock(blp); 1668 CK_SLIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) { 1669 if (rncp == ncp && rncp->nc_dvp == dvp && 1670 rncp->nc_nlen == cnp->cn_namelen && 1671 !bcmp(rncp->nc_name, cnp->cn_nameptr, rncp->nc_nlen)) 1672 break; 1673 } 1674 if (rncp != NULL) { 1675 cache_zap_locked(rncp); 1676 mtx_unlock(blp); 1677 cache_unlock_vnodes(dvlp, vlp); 1678 counter_u64_add(zap_bucket_relock_success, 1); 1679 return (0); 1680 } 1681 1682 mtx_unlock(blp); 1683 cache_unlock_vnodes(dvlp, vlp); 1684 return (EAGAIN); 1685 } 1686 1687 static int __noinline 1688 cache_zap_locked_bucket(struct namecache *ncp, struct componentname *cnp, 1689 uint32_t hash, struct mtx *blp) 1690 { 1691 struct mtx *dvlp, *vlp; 1692 struct vnode *dvp; 1693 1694 cache_assert_bucket_locked(ncp); 1695 1696 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1697 vlp = NULL; 1698 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1699 vlp = VP2VNODELOCK(ncp->nc_vp); 1700 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1701 cache_zap_locked(ncp); 1702 mtx_unlock(blp); 1703 cache_unlock_vnodes(dvlp, vlp); 1704 return (0); 1705 } 1706 1707 dvp = ncp->nc_dvp; 1708 mtx_unlock(blp); 1709 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp)); 1710 } 1711 1712 static __noinline int 1713 cache_remove_cnp(struct vnode *dvp, struct componentname *cnp) 1714 { 1715 struct namecache *ncp; 1716 struct mtx *blp; 1717 struct mtx *dvlp, *dvlp2; 1718 uint32_t hash; 1719 int error; 1720 1721 if (cnp->cn_namelen == 2 && 1722 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { 1723 dvlp = VP2VNODELOCK(dvp); 1724 dvlp2 = NULL; 1725 mtx_lock(dvlp); 1726 retry_dotdot: 1727 ncp = dvp->v_cache_dd; 1728 if (ncp == NULL) { 1729 mtx_unlock(dvlp); 1730 if (dvlp2 != NULL) 1731 mtx_unlock(dvlp2); 1732 SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp); 1733 return (0); 1734 } 1735 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1736 if (!cache_zap_locked_vnode_kl2(ncp, dvp, &dvlp2)) 1737 goto retry_dotdot; 1738 MPASS(dvp->v_cache_dd == NULL); 1739 mtx_unlock(dvlp); 1740 if (dvlp2 != NULL) 1741 mtx_unlock(dvlp2); 1742 cache_free(ncp); 1743 } else { 1744 atomic_store_ptr(&dvp->v_cache_dd, NULL); 1745 mtx_unlock(dvlp); 1746 if (dvlp2 != NULL) 1747 mtx_unlock(dvlp2); 1748 } 1749 SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp); 1750 return (1); 1751 } 1752 1753 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1754 blp = HASH2BUCKETLOCK(hash); 1755 retry: 1756 if (CK_SLIST_EMPTY(NCHHASH(hash))) 1757 goto out_no_entry; 1758 1759 mtx_lock(blp); 1760 1761 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1762 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1763 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1764 break; 1765 } 1766 1767 if (ncp == NULL) { 1768 mtx_unlock(blp); 1769 goto out_no_entry; 1770 } 1771 1772 error = cache_zap_locked_bucket(ncp, cnp, hash, blp); 1773 if (__predict_false(error != 0)) { 1774 zap_bucket_fail++; 1775 goto retry; 1776 } 1777 counter_u64_add(numposzaps, 1); 1778 SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp); 1779 cache_free(ncp); 1780 return (1); 1781 out_no_entry: 1782 counter_u64_add(nummisszap, 1); 1783 SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp); 1784 return (0); 1785 } 1786 1787 static int __noinline 1788 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1789 struct timespec *tsp, int *ticksp) 1790 { 1791 int ltype; 1792 1793 *vpp = dvp; 1794 counter_u64_add(dothits, 1); 1795 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); 1796 if (tsp != NULL) 1797 timespecclear(tsp); 1798 if (ticksp != NULL) 1799 *ticksp = ticks; 1800 vrefact(*vpp); 1801 /* 1802 * When we lookup "." we still can be asked to lock it 1803 * differently... 1804 */ 1805 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 1806 if (ltype != VOP_ISLOCKED(*vpp)) { 1807 if (ltype == LK_EXCLUSIVE) { 1808 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 1809 if (VN_IS_DOOMED((*vpp))) { 1810 /* forced unmount */ 1811 vrele(*vpp); 1812 *vpp = NULL; 1813 return (ENOENT); 1814 } 1815 } else 1816 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 1817 } 1818 return (-1); 1819 } 1820 1821 static int __noinline 1822 cache_lookup_dotdot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1823 struct timespec *tsp, int *ticksp) 1824 { 1825 struct namecache_ts *ncp_ts; 1826 struct namecache *ncp; 1827 struct mtx *dvlp; 1828 enum vgetstate vs; 1829 int error, ltype; 1830 bool whiteout; 1831 1832 MPASS((cnp->cn_flags & ISDOTDOT) != 0); 1833 1834 if ((cnp->cn_flags & MAKEENTRY) == 0) { 1835 cache_remove_cnp(dvp, cnp); 1836 return (0); 1837 } 1838 1839 counter_u64_add(dotdothits, 1); 1840 retry: 1841 dvlp = VP2VNODELOCK(dvp); 1842 mtx_lock(dvlp); 1843 ncp = dvp->v_cache_dd; 1844 if (ncp == NULL) { 1845 SDT_PROBE2(vfs, namecache, lookup, miss, dvp, ".."); 1846 mtx_unlock(dvlp); 1847 return (0); 1848 } 1849 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1850 if (ncp->nc_flag & NCF_NEGATIVE) 1851 *vpp = NULL; 1852 else 1853 *vpp = ncp->nc_vp; 1854 } else 1855 *vpp = ncp->nc_dvp; 1856 if (*vpp == NULL) 1857 goto negative_success; 1858 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", *vpp); 1859 cache_out_ts(ncp, tsp, ticksp); 1860 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 1861 NCF_DTS && tsp != NULL) { 1862 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 1863 *tsp = ncp_ts->nc_dotdottime; 1864 } 1865 1866 MPASS(dvp != *vpp); 1867 ltype = VOP_ISLOCKED(dvp); 1868 VOP_UNLOCK(dvp); 1869 vs = vget_prep(*vpp); 1870 mtx_unlock(dvlp); 1871 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 1872 vn_lock(dvp, ltype | LK_RETRY); 1873 if (VN_IS_DOOMED(dvp)) { 1874 if (error == 0) 1875 vput(*vpp); 1876 *vpp = NULL; 1877 return (ENOENT); 1878 } 1879 if (error) { 1880 *vpp = NULL; 1881 goto retry; 1882 } 1883 return (-1); 1884 negative_success: 1885 if (__predict_false(cnp->cn_nameiop == CREATE)) { 1886 if (cnp->cn_flags & ISLASTCN) { 1887 counter_u64_add(numnegzaps, 1); 1888 cache_zap_negative_locked_vnode_kl(ncp, dvp); 1889 mtx_unlock(dvlp); 1890 cache_free(ncp); 1891 return (0); 1892 } 1893 } 1894 1895 whiteout = (ncp->nc_flag & NCF_WHITE); 1896 cache_out_ts(ncp, tsp, ticksp); 1897 if (cache_neg_hit_prep(ncp)) 1898 cache_neg_promote(ncp); 1899 else 1900 cache_neg_hit_finish(ncp); 1901 mtx_unlock(dvlp); 1902 if (whiteout) 1903 cnp->cn_flags |= ISWHITEOUT; 1904 return (ENOENT); 1905 } 1906 1907 /** 1908 * Lookup a name in the name cache 1909 * 1910 * # Arguments 1911 * 1912 * - dvp: Parent directory in which to search. 1913 * - vpp: Return argument. Will contain desired vnode on cache hit. 1914 * - cnp: Parameters of the name search. The most interesting bits of 1915 * the cn_flags field have the following meanings: 1916 * - MAKEENTRY: If clear, free an entry from the cache rather than look 1917 * it up. 1918 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".." 1919 * - tsp: Return storage for cache timestamp. On a successful (positive 1920 * or negative) lookup, tsp will be filled with any timespec that 1921 * was stored when this cache entry was created. However, it will 1922 * be clear for "." entries. 1923 * - ticks: Return storage for alternate cache timestamp. On a successful 1924 * (positive or negative) lookup, it will contain the ticks value 1925 * that was current when the cache entry was created, unless cnp 1926 * was ".". 1927 * 1928 * Either both tsp and ticks have to be provided or neither of them. 1929 * 1930 * # Returns 1931 * 1932 * - -1: A positive cache hit. vpp will contain the desired vnode. 1933 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due 1934 * to a forced unmount. vpp will not be modified. If the entry 1935 * is a whiteout, then the ISWHITEOUT flag will be set in 1936 * cnp->cn_flags. 1937 * - 0: A cache miss. vpp will not be modified. 1938 * 1939 * # Locking 1940 * 1941 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up 1942 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the 1943 * lock is not recursively acquired. 1944 */ 1945 static int __noinline 1946 cache_lookup_fallback(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1947 struct timespec *tsp, int *ticksp) 1948 { 1949 struct namecache *ncp; 1950 struct mtx *blp; 1951 uint32_t hash; 1952 enum vgetstate vs; 1953 int error; 1954 bool whiteout; 1955 1956 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 1957 MPASS((cnp->cn_flags & (MAKEENTRY | NC_KEEPPOSENTRY)) != 0); 1958 1959 retry: 1960 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1961 blp = HASH2BUCKETLOCK(hash); 1962 mtx_lock(blp); 1963 1964 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1965 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1966 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1967 break; 1968 } 1969 1970 if (__predict_false(ncp == NULL)) { 1971 mtx_unlock(blp); 1972 SDT_PROBE2(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr); 1973 counter_u64_add(nummiss, 1); 1974 return (0); 1975 } 1976 1977 if (ncp->nc_flag & NCF_NEGATIVE) 1978 goto negative_success; 1979 1980 counter_u64_add(numposhits, 1); 1981 *vpp = ncp->nc_vp; 1982 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp); 1983 cache_out_ts(ncp, tsp, ticksp); 1984 MPASS(dvp != *vpp); 1985 vs = vget_prep(*vpp); 1986 mtx_unlock(blp); 1987 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 1988 if (error) { 1989 *vpp = NULL; 1990 goto retry; 1991 } 1992 return (-1); 1993 negative_success: 1994 /* 1995 * We don't get here with regular lookup apart from corner cases. 1996 */ 1997 if (__predict_true(cnp->cn_nameiop == CREATE)) { 1998 if (cnp->cn_flags & ISLASTCN) { 1999 counter_u64_add(numnegzaps, 1); 2000 error = cache_zap_locked_bucket(ncp, cnp, hash, blp); 2001 if (__predict_false(error != 0)) { 2002 zap_bucket_fail2++; 2003 goto retry; 2004 } 2005 cache_free(ncp); 2006 return (0); 2007 } 2008 } 2009 2010 whiteout = (ncp->nc_flag & NCF_WHITE); 2011 cache_out_ts(ncp, tsp, ticksp); 2012 if (cache_neg_hit_prep(ncp)) 2013 cache_neg_promote(ncp); 2014 else 2015 cache_neg_hit_finish(ncp); 2016 mtx_unlock(blp); 2017 if (whiteout) 2018 cnp->cn_flags |= ISWHITEOUT; 2019 return (ENOENT); 2020 } 2021 2022 int 2023 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 2024 struct timespec *tsp, int *ticksp) 2025 { 2026 struct namecache *ncp; 2027 uint32_t hash; 2028 enum vgetstate vs; 2029 int error; 2030 bool whiteout, neg_promote; 2031 u_short nc_flag; 2032 2033 MPASS((tsp == NULL && ticksp == NULL) || (tsp != NULL && ticksp != NULL)); 2034 2035 #ifdef DEBUG_CACHE 2036 if (__predict_false(!doingcache)) { 2037 cnp->cn_flags &= ~MAKEENTRY; 2038 return (0); 2039 } 2040 #endif 2041 2042 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 2043 if (cnp->cn_namelen == 1) 2044 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp)); 2045 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') 2046 return (cache_lookup_dotdot(dvp, vpp, cnp, tsp, ticksp)); 2047 } 2048 2049 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 2050 2051 if ((cnp->cn_flags & (MAKEENTRY | NC_KEEPPOSENTRY)) == 0) { 2052 cache_remove_cnp(dvp, cnp); 2053 return (0); 2054 } 2055 2056 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 2057 vfs_smr_enter(); 2058 2059 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 2060 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 2061 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 2062 break; 2063 } 2064 2065 if (__predict_false(ncp == NULL)) { 2066 vfs_smr_exit(); 2067 SDT_PROBE2(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr); 2068 counter_u64_add(nummiss, 1); 2069 return (0); 2070 } 2071 2072 nc_flag = atomic_load_char(&ncp->nc_flag); 2073 if (nc_flag & NCF_NEGATIVE) 2074 goto negative_success; 2075 2076 counter_u64_add(numposhits, 1); 2077 *vpp = ncp->nc_vp; 2078 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp); 2079 cache_out_ts(ncp, tsp, ticksp); 2080 MPASS(dvp != *vpp); 2081 if (!cache_ncp_canuse(ncp)) { 2082 vfs_smr_exit(); 2083 *vpp = NULL; 2084 goto out_fallback; 2085 } 2086 vs = vget_prep_smr(*vpp); 2087 vfs_smr_exit(); 2088 if (__predict_false(vs == VGET_NONE)) { 2089 *vpp = NULL; 2090 goto out_fallback; 2091 } 2092 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 2093 if (error) { 2094 *vpp = NULL; 2095 goto out_fallback; 2096 } 2097 return (-1); 2098 negative_success: 2099 if (cnp->cn_nameiop == CREATE) { 2100 if (cnp->cn_flags & ISLASTCN) { 2101 vfs_smr_exit(); 2102 goto out_fallback; 2103 } 2104 } 2105 2106 cache_out_ts(ncp, tsp, ticksp); 2107 whiteout = (atomic_load_char(&ncp->nc_flag) & NCF_WHITE); 2108 neg_promote = cache_neg_hit_prep(ncp); 2109 if (!cache_ncp_canuse(ncp)) { 2110 cache_neg_hit_abort(ncp); 2111 vfs_smr_exit(); 2112 goto out_fallback; 2113 } 2114 if (neg_promote) { 2115 vfs_smr_exit(); 2116 if (!cache_neg_promote_cond(dvp, cnp, ncp, hash)) 2117 goto out_fallback; 2118 } else { 2119 cache_neg_hit_finish(ncp); 2120 vfs_smr_exit(); 2121 } 2122 if (whiteout) 2123 cnp->cn_flags |= ISWHITEOUT; 2124 return (ENOENT); 2125 out_fallback: 2126 return (cache_lookup_fallback(dvp, vpp, cnp, tsp, ticksp)); 2127 } 2128 2129 struct celockstate { 2130 struct mtx *vlp[3]; 2131 struct mtx *blp[2]; 2132 }; 2133 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3)); 2134 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2)); 2135 2136 static inline void 2137 cache_celockstate_init(struct celockstate *cel) 2138 { 2139 2140 bzero(cel, sizeof(*cel)); 2141 } 2142 2143 static void 2144 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp, 2145 struct vnode *dvp) 2146 { 2147 struct mtx *vlp1, *vlp2; 2148 2149 MPASS(cel->vlp[0] == NULL); 2150 MPASS(cel->vlp[1] == NULL); 2151 MPASS(cel->vlp[2] == NULL); 2152 2153 MPASS(vp != NULL || dvp != NULL); 2154 2155 vlp1 = VP2VNODELOCK(vp); 2156 vlp2 = VP2VNODELOCK(dvp); 2157 cache_sort_vnodes(&vlp1, &vlp2); 2158 2159 if (vlp1 != NULL) { 2160 mtx_lock(vlp1); 2161 cel->vlp[0] = vlp1; 2162 } 2163 mtx_lock(vlp2); 2164 cel->vlp[1] = vlp2; 2165 } 2166 2167 static void 2168 cache_unlock_vnodes_cel(struct celockstate *cel) 2169 { 2170 2171 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL); 2172 2173 if (cel->vlp[0] != NULL) 2174 mtx_unlock(cel->vlp[0]); 2175 if (cel->vlp[1] != NULL) 2176 mtx_unlock(cel->vlp[1]); 2177 if (cel->vlp[2] != NULL) 2178 mtx_unlock(cel->vlp[2]); 2179 } 2180 2181 static bool 2182 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp) 2183 { 2184 struct mtx *vlp; 2185 bool ret; 2186 2187 cache_assert_vlp_locked(cel->vlp[0]); 2188 cache_assert_vlp_locked(cel->vlp[1]); 2189 MPASS(cel->vlp[2] == NULL); 2190 2191 MPASS(vp != NULL); 2192 vlp = VP2VNODELOCK(vp); 2193 2194 ret = true; 2195 if (vlp >= cel->vlp[1]) { 2196 mtx_lock(vlp); 2197 } else { 2198 if (mtx_trylock(vlp)) 2199 goto out; 2200 cache_lock_vnodes_cel_3_failures++; 2201 cache_unlock_vnodes_cel(cel); 2202 if (vlp < cel->vlp[0]) { 2203 mtx_lock(vlp); 2204 mtx_lock(cel->vlp[0]); 2205 mtx_lock(cel->vlp[1]); 2206 } else { 2207 if (cel->vlp[0] != NULL) 2208 mtx_lock(cel->vlp[0]); 2209 mtx_lock(vlp); 2210 mtx_lock(cel->vlp[1]); 2211 } 2212 ret = false; 2213 } 2214 out: 2215 cel->vlp[2] = vlp; 2216 return (ret); 2217 } 2218 2219 static void 2220 cache_lock_buckets_cel(struct celockstate *cel, struct mtx *blp1, 2221 struct mtx *blp2) 2222 { 2223 2224 MPASS(cel->blp[0] == NULL); 2225 MPASS(cel->blp[1] == NULL); 2226 2227 cache_sort_vnodes(&blp1, &blp2); 2228 2229 if (blp1 != NULL) { 2230 mtx_lock(blp1); 2231 cel->blp[0] = blp1; 2232 } 2233 mtx_lock(blp2); 2234 cel->blp[1] = blp2; 2235 } 2236 2237 static void 2238 cache_unlock_buckets_cel(struct celockstate *cel) 2239 { 2240 2241 if (cel->blp[0] != NULL) 2242 mtx_unlock(cel->blp[0]); 2243 mtx_unlock(cel->blp[1]); 2244 } 2245 2246 /* 2247 * Lock part of the cache affected by the insertion. 2248 * 2249 * This means vnodelocks for dvp, vp and the relevant bucketlock. 2250 * However, insertion can result in removal of an old entry. In this 2251 * case we have an additional vnode and bucketlock pair to lock. 2252 * 2253 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while 2254 * preserving the locking order (smaller address first). 2255 */ 2256 static void 2257 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 2258 uint32_t hash) 2259 { 2260 struct namecache *ncp; 2261 struct mtx *blps[2]; 2262 u_char nc_flag; 2263 2264 blps[0] = HASH2BUCKETLOCK(hash); 2265 for (;;) { 2266 blps[1] = NULL; 2267 cache_lock_vnodes_cel(cel, dvp, vp); 2268 if (vp == NULL || vp->v_type != VDIR) 2269 break; 2270 ncp = atomic_load_consume_ptr(&vp->v_cache_dd); 2271 if (ncp == NULL) 2272 break; 2273 nc_flag = atomic_load_char(&ncp->nc_flag); 2274 if ((nc_flag & NCF_ISDOTDOT) == 0) 2275 break; 2276 MPASS(ncp->nc_dvp == vp); 2277 blps[1] = NCP2BUCKETLOCK(ncp); 2278 if ((nc_flag & NCF_NEGATIVE) != 0) 2279 break; 2280 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 2281 break; 2282 /* 2283 * All vnodes got re-locked. Re-validate the state and if 2284 * nothing changed we are done. Otherwise restart. 2285 */ 2286 if (ncp == vp->v_cache_dd && 2287 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 2288 blps[1] == NCP2BUCKETLOCK(ncp) && 2289 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 2290 break; 2291 cache_unlock_vnodes_cel(cel); 2292 cel->vlp[0] = NULL; 2293 cel->vlp[1] = NULL; 2294 cel->vlp[2] = NULL; 2295 } 2296 cache_lock_buckets_cel(cel, blps[0], blps[1]); 2297 } 2298 2299 static void 2300 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 2301 uint32_t hash) 2302 { 2303 struct namecache *ncp; 2304 struct mtx *blps[2]; 2305 u_char nc_flag; 2306 2307 blps[0] = HASH2BUCKETLOCK(hash); 2308 for (;;) { 2309 blps[1] = NULL; 2310 cache_lock_vnodes_cel(cel, dvp, vp); 2311 ncp = atomic_load_consume_ptr(&dvp->v_cache_dd); 2312 if (ncp == NULL) 2313 break; 2314 nc_flag = atomic_load_char(&ncp->nc_flag); 2315 if ((nc_flag & NCF_ISDOTDOT) == 0) 2316 break; 2317 MPASS(ncp->nc_dvp == dvp); 2318 blps[1] = NCP2BUCKETLOCK(ncp); 2319 if ((nc_flag & NCF_NEGATIVE) != 0) 2320 break; 2321 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 2322 break; 2323 if (ncp == dvp->v_cache_dd && 2324 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 2325 blps[1] == NCP2BUCKETLOCK(ncp) && 2326 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 2327 break; 2328 cache_unlock_vnodes_cel(cel); 2329 cel->vlp[0] = NULL; 2330 cel->vlp[1] = NULL; 2331 cel->vlp[2] = NULL; 2332 } 2333 cache_lock_buckets_cel(cel, blps[0], blps[1]); 2334 } 2335 2336 static void 2337 cache_enter_unlock(struct celockstate *cel) 2338 { 2339 2340 cache_unlock_buckets_cel(cel); 2341 cache_unlock_vnodes_cel(cel); 2342 } 2343 2344 static void __noinline 2345 cache_enter_dotdot_prep(struct vnode *dvp, struct vnode *vp, 2346 struct componentname *cnp) 2347 { 2348 struct celockstate cel; 2349 struct namecache *ncp; 2350 uint32_t hash; 2351 int len; 2352 2353 if (atomic_load_ptr(&dvp->v_cache_dd) == NULL) 2354 return; 2355 len = cnp->cn_namelen; 2356 cache_celockstate_init(&cel); 2357 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 2358 cache_enter_lock_dd(&cel, dvp, vp, hash); 2359 ncp = dvp->v_cache_dd; 2360 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT)) { 2361 KASSERT(ncp->nc_dvp == dvp, ("wrong isdotdot parent")); 2362 cache_zap_locked(ncp); 2363 } else { 2364 ncp = NULL; 2365 } 2366 atomic_store_ptr(&dvp->v_cache_dd, NULL); 2367 cache_enter_unlock(&cel); 2368 if (ncp != NULL) 2369 cache_free(ncp); 2370 } 2371 2372 /* 2373 * Add an entry to the cache. 2374 */ 2375 void 2376 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 2377 struct timespec *tsp, struct timespec *dtsp) 2378 { 2379 struct celockstate cel; 2380 struct namecache *ncp, *n2, *ndd; 2381 struct namecache_ts *ncp_ts; 2382 struct nchashhead *ncpp; 2383 uint32_t hash; 2384 int flag; 2385 int len; 2386 2387 KASSERT(cnp->cn_namelen <= NAME_MAX, 2388 ("%s: passed len %ld exceeds NAME_MAX (%d)", __func__, cnp->cn_namelen, 2389 NAME_MAX)); 2390 #ifdef notyet 2391 /* 2392 * Not everything doing this is weeded out yet. 2393 */ 2394 VNPASS(dvp != vp, dvp); 2395 #endif 2396 VNPASS(!VN_IS_DOOMED(dvp), dvp); 2397 VNPASS(dvp->v_type != VNON, dvp); 2398 if (vp != NULL) { 2399 VNPASS(!VN_IS_DOOMED(vp), vp); 2400 VNPASS(vp->v_type != VNON, vp); 2401 } 2402 2403 #ifdef DEBUG_CACHE 2404 if (__predict_false(!doingcache)) 2405 return; 2406 #endif 2407 2408 flag = 0; 2409 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 2410 if (cnp->cn_namelen == 1) 2411 return; 2412 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 2413 cache_enter_dotdot_prep(dvp, vp, cnp); 2414 flag = NCF_ISDOTDOT; 2415 } 2416 } 2417 2418 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 2419 if (ncp == NULL) 2420 return; 2421 2422 cache_celockstate_init(&cel); 2423 ndd = NULL; 2424 ncp_ts = NULL; 2425 2426 /* 2427 * Calculate the hash key and setup as much of the new 2428 * namecache entry as possible before acquiring the lock. 2429 */ 2430 ncp->nc_flag = flag | NCF_WIP; 2431 ncp->nc_vp = vp; 2432 if (vp == NULL) 2433 cache_neg_init(ncp); 2434 ncp->nc_dvp = dvp; 2435 if (tsp != NULL) { 2436 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 2437 ncp_ts->nc_time = *tsp; 2438 ncp_ts->nc_ticks = ticks; 2439 ncp_ts->nc_nc.nc_flag |= NCF_TS; 2440 if (dtsp != NULL) { 2441 ncp_ts->nc_dotdottime = *dtsp; 2442 ncp_ts->nc_nc.nc_flag |= NCF_DTS; 2443 } 2444 } 2445 len = ncp->nc_nlen = cnp->cn_namelen; 2446 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 2447 memcpy(ncp->nc_name, cnp->cn_nameptr, len); 2448 ncp->nc_name[len] = '\0'; 2449 cache_enter_lock(&cel, dvp, vp, hash); 2450 2451 /* 2452 * See if this vnode or negative entry is already in the cache 2453 * with this name. This can happen with concurrent lookups of 2454 * the same path name. 2455 */ 2456 ncpp = NCHHASH(hash); 2457 CK_SLIST_FOREACH(n2, ncpp, nc_hash) { 2458 if (n2->nc_dvp == dvp && 2459 n2->nc_nlen == cnp->cn_namelen && 2460 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) { 2461 MPASS(cache_ncp_canuse(n2)); 2462 if ((n2->nc_flag & NCF_NEGATIVE) != 0) 2463 KASSERT(vp == NULL, 2464 ("%s: found entry pointing to a different vnode (%p != %p) ; name [%s]", 2465 __func__, NULL, vp, cnp->cn_nameptr)); 2466 else 2467 KASSERT(n2->nc_vp == vp, 2468 ("%s: found entry pointing to a different vnode (%p != %p) ; name [%s]", 2469 __func__, n2->nc_vp, vp, cnp->cn_nameptr)); 2470 /* 2471 * Entries are supposed to be immutable unless in the 2472 * process of getting destroyed. Accommodating for 2473 * changing timestamps is possible but not worth it. 2474 * This should be harmless in terms of correctness, in 2475 * the worst case resulting in an earlier expiration. 2476 * Alternatively, the found entry can be replaced 2477 * altogether. 2478 */ 2479 MPASS((n2->nc_flag & (NCF_TS | NCF_DTS)) == (ncp->nc_flag & (NCF_TS | NCF_DTS))); 2480 #if 0 2481 if (tsp != NULL) { 2482 KASSERT((n2->nc_flag & NCF_TS) != 0, 2483 ("no NCF_TS")); 2484 n2_ts = __containerof(n2, struct namecache_ts, nc_nc); 2485 n2_ts->nc_time = ncp_ts->nc_time; 2486 n2_ts->nc_ticks = ncp_ts->nc_ticks; 2487 if (dtsp != NULL) { 2488 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime; 2489 n2_ts->nc_nc.nc_flag |= NCF_DTS; 2490 } 2491 } 2492 #endif 2493 SDT_PROBE3(vfs, namecache, enter, duplicate, dvp, ncp->nc_name, 2494 vp); 2495 goto out_unlock_free; 2496 } 2497 } 2498 2499 if (flag == NCF_ISDOTDOT) { 2500 /* 2501 * See if we are trying to add .. entry, but some other lookup 2502 * has populated v_cache_dd pointer already. 2503 */ 2504 if (dvp->v_cache_dd != NULL) 2505 goto out_unlock_free; 2506 KASSERT(vp == NULL || vp->v_type == VDIR, 2507 ("wrong vnode type %p", vp)); 2508 atomic_thread_fence_rel(); 2509 atomic_store_ptr(&dvp->v_cache_dd, ncp); 2510 } 2511 2512 if (vp != NULL) { 2513 if (flag != NCF_ISDOTDOT) { 2514 /* 2515 * For this case, the cache entry maps both the 2516 * directory name in it and the name ".." for the 2517 * directory's parent. 2518 */ 2519 if ((ndd = vp->v_cache_dd) != NULL) { 2520 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0) 2521 cache_zap_locked(ndd); 2522 else 2523 ndd = NULL; 2524 } 2525 atomic_thread_fence_rel(); 2526 atomic_store_ptr(&vp->v_cache_dd, ncp); 2527 } else if (vp->v_type != VDIR) { 2528 if (vp->v_cache_dd != NULL) { 2529 atomic_store_ptr(&vp->v_cache_dd, NULL); 2530 } 2531 } 2532 } 2533 2534 if (flag != NCF_ISDOTDOT) { 2535 if (LIST_EMPTY(&dvp->v_cache_src)) { 2536 cache_hold_vnode(dvp); 2537 } 2538 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 2539 } 2540 2541 /* 2542 * If the entry is "negative", we place it into the 2543 * "negative" cache queue, otherwise, we place it into the 2544 * destination vnode's cache entries queue. 2545 */ 2546 if (vp != NULL) { 2547 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 2548 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name, 2549 vp); 2550 } else { 2551 if (cnp->cn_flags & ISWHITEOUT) 2552 atomic_store_char(&ncp->nc_flag, ncp->nc_flag | NCF_WHITE); 2553 cache_neg_insert(ncp); 2554 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, 2555 ncp->nc_name); 2556 } 2557 2558 /* 2559 * Insert the new namecache entry into the appropriate chain 2560 * within the cache entries table. 2561 */ 2562 CK_SLIST_INSERT_HEAD(ncpp, ncp, nc_hash); 2563 2564 atomic_thread_fence_rel(); 2565 /* 2566 * Mark the entry as fully constructed. 2567 * It is immutable past this point until its removal. 2568 */ 2569 atomic_store_char(&ncp->nc_flag, ncp->nc_flag & ~NCF_WIP); 2570 2571 cache_enter_unlock(&cel); 2572 if (ndd != NULL) 2573 cache_free(ndd); 2574 return; 2575 out_unlock_free: 2576 cache_enter_unlock(&cel); 2577 cache_free(ncp); 2578 return; 2579 } 2580 2581 /* 2582 * A variant of the above accepting flags. 2583 * 2584 * - VFS_CACHE_DROPOLD -- if a conflicting entry is found, drop it. 2585 * 2586 * TODO: this routine is a hack. It blindly removes the old entry, even if it 2587 * happens to match and it is doing it in an inefficient manner. It was added 2588 * to accomodate NFS which runs into a case where the target for a given name 2589 * may change from under it. Note this does nothing to solve the following 2590 * race: 2 callers of cache_enter_time_flags pass a different target vnode for 2591 * the same [dvp, cnp]. It may be argued that code doing this is broken. 2592 */ 2593 void 2594 cache_enter_time_flags(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 2595 struct timespec *tsp, struct timespec *dtsp, int flags) 2596 { 2597 2598 MPASS((flags & ~(VFS_CACHE_DROPOLD)) == 0); 2599 2600 if (flags & VFS_CACHE_DROPOLD) 2601 cache_remove_cnp(dvp, cnp); 2602 cache_enter_time(dvp, vp, cnp, tsp, dtsp); 2603 } 2604 2605 static u_int 2606 cache_roundup_2(u_int val) 2607 { 2608 u_int res; 2609 2610 for (res = 1; res <= val; res <<= 1) 2611 continue; 2612 2613 return (res); 2614 } 2615 2616 static struct nchashhead * 2617 nchinittbl(u_long elements, u_long *hashmask) 2618 { 2619 struct nchashhead *hashtbl; 2620 u_long hashsize, i; 2621 2622 hashsize = cache_roundup_2(elements) / 2; 2623 2624 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), M_VFSCACHE, M_WAITOK); 2625 for (i = 0; i < hashsize; i++) 2626 CK_SLIST_INIT(&hashtbl[i]); 2627 *hashmask = hashsize - 1; 2628 return (hashtbl); 2629 } 2630 2631 static void 2632 ncfreetbl(struct nchashhead *hashtbl) 2633 { 2634 2635 free(hashtbl, M_VFSCACHE); 2636 } 2637 2638 /* 2639 * Name cache initialization, from vfs_init() when we are booting 2640 */ 2641 static void 2642 nchinit(void *dummy __unused) 2643 { 2644 u_int i; 2645 2646 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL_SIZE, 2647 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2648 cache_zone_small_ts = uma_zcreate("STS VFS Cache", CACHE_ZONE_SMALL_TS_SIZE, 2649 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2650 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE_SIZE, 2651 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2652 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", CACHE_ZONE_LARGE_TS_SIZE, 2653 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2654 2655 VFS_SMR_ZONE_SET(cache_zone_small); 2656 VFS_SMR_ZONE_SET(cache_zone_small_ts); 2657 VFS_SMR_ZONE_SET(cache_zone_large); 2658 VFS_SMR_ZONE_SET(cache_zone_large_ts); 2659 2660 ncsize = desiredvnodes * ncsizefactor; 2661 cache_recalc_neg_min(ncnegminpct); 2662 nchashtbl = nchinittbl(desiredvnodes * 2, &nchash); 2663 ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1; 2664 if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */ 2665 ncbuckethash = 7; 2666 if (ncbuckethash > nchash) 2667 ncbuckethash = nchash; 2668 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE, 2669 M_WAITOK | M_ZERO); 2670 for (i = 0; i < numbucketlocks; i++) 2671 mtx_init(&bucketlocks[i], "ncbuc", NULL, MTX_DUPOK | MTX_RECURSE); 2672 ncvnodehash = ncbuckethash; 2673 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE, 2674 M_WAITOK | M_ZERO); 2675 for (i = 0; i < numvnodelocks; i++) 2676 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE); 2677 2678 for (i = 0; i < numneglists; i++) { 2679 mtx_init(&neglists[i].nl_evict_lock, "ncnege", NULL, MTX_DEF); 2680 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF); 2681 TAILQ_INIT(&neglists[i].nl_list); 2682 TAILQ_INIT(&neglists[i].nl_hotlist); 2683 } 2684 } 2685 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 2686 2687 void 2688 cache_vnode_init(struct vnode *vp) 2689 { 2690 2691 LIST_INIT(&vp->v_cache_src); 2692 TAILQ_INIT(&vp->v_cache_dst); 2693 vp->v_cache_dd = NULL; 2694 cache_prehash(vp); 2695 } 2696 2697 /* 2698 * Induce transient cache misses for lockless operation in cache_lookup() by 2699 * using a temporary hash table. 2700 * 2701 * This will force a fs lookup. 2702 * 2703 * Synchronisation is done in 2 steps, calling vfs_smr_synchronize each time 2704 * to observe all CPUs not performing the lookup. 2705 */ 2706 static void 2707 cache_changesize_set_temp(struct nchashhead *temptbl, u_long temphash) 2708 { 2709 2710 MPASS(temphash < nchash); 2711 /* 2712 * Change the size. The new size is smaller and can safely be used 2713 * against the existing table. All lookups which now hash wrong will 2714 * result in a cache miss, which all callers are supposed to know how 2715 * to handle. 2716 */ 2717 atomic_store_long(&nchash, temphash); 2718 atomic_thread_fence_rel(); 2719 vfs_smr_synchronize(); 2720 /* 2721 * At this point everyone sees the updated hash value, but they still 2722 * see the old table. 2723 */ 2724 atomic_store_ptr(&nchashtbl, temptbl); 2725 atomic_thread_fence_rel(); 2726 vfs_smr_synchronize(); 2727 /* 2728 * At this point everyone sees the updated table pointer and size pair. 2729 */ 2730 } 2731 2732 /* 2733 * Set the new hash table. 2734 * 2735 * Similarly to cache_changesize_set_temp(), this has to synchronize against 2736 * lockless operation in cache_lookup(). 2737 */ 2738 static void 2739 cache_changesize_set_new(struct nchashhead *new_tbl, u_long new_hash) 2740 { 2741 2742 MPASS(nchash < new_hash); 2743 /* 2744 * Change the pointer first. This wont result in out of bounds access 2745 * since the temporary table is guaranteed to be smaller. 2746 */ 2747 atomic_store_ptr(&nchashtbl, new_tbl); 2748 atomic_thread_fence_rel(); 2749 vfs_smr_synchronize(); 2750 /* 2751 * At this point everyone sees the updated pointer value, but they 2752 * still see the old size. 2753 */ 2754 atomic_store_long(&nchash, new_hash); 2755 atomic_thread_fence_rel(); 2756 vfs_smr_synchronize(); 2757 /* 2758 * At this point everyone sees the updated table pointer and size pair. 2759 */ 2760 } 2761 2762 void 2763 cache_changesize(u_long newmaxvnodes) 2764 { 2765 struct nchashhead *new_nchashtbl, *old_nchashtbl, *temptbl; 2766 u_long new_nchash, old_nchash, temphash; 2767 struct namecache *ncp; 2768 uint32_t hash; 2769 u_long newncsize; 2770 int i; 2771 2772 newncsize = newmaxvnodes * ncsizefactor; 2773 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2); 2774 if (newmaxvnodes < numbucketlocks) 2775 newmaxvnodes = numbucketlocks; 2776 2777 new_nchashtbl = nchinittbl(newmaxvnodes, &new_nchash); 2778 /* If same hash table size, nothing to do */ 2779 if (nchash == new_nchash) { 2780 ncfreetbl(new_nchashtbl); 2781 return; 2782 } 2783 2784 temptbl = nchinittbl(1, &temphash); 2785 2786 /* 2787 * Move everything from the old hash table to the new table. 2788 * None of the namecache entries in the table can be removed 2789 * because to do so, they have to be removed from the hash table. 2790 */ 2791 cache_lock_all_vnodes(); 2792 cache_lock_all_buckets(); 2793 old_nchashtbl = nchashtbl; 2794 old_nchash = nchash; 2795 cache_changesize_set_temp(temptbl, temphash); 2796 for (i = 0; i <= old_nchash; i++) { 2797 while ((ncp = CK_SLIST_FIRST(&old_nchashtbl[i])) != NULL) { 2798 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, 2799 ncp->nc_dvp); 2800 CK_SLIST_REMOVE(&old_nchashtbl[i], ncp, namecache, nc_hash); 2801 CK_SLIST_INSERT_HEAD(&new_nchashtbl[hash & new_nchash], ncp, nc_hash); 2802 } 2803 } 2804 ncsize = newncsize; 2805 cache_recalc_neg_min(ncnegminpct); 2806 cache_changesize_set_new(new_nchashtbl, new_nchash); 2807 cache_unlock_all_buckets(); 2808 cache_unlock_all_vnodes(); 2809 ncfreetbl(old_nchashtbl); 2810 ncfreetbl(temptbl); 2811 } 2812 2813 /* 2814 * Remove all entries from and to a particular vnode. 2815 */ 2816 static void 2817 cache_purge_impl(struct vnode *vp) 2818 { 2819 struct cache_freebatch batch; 2820 struct namecache *ncp; 2821 struct mtx *vlp, *vlp2; 2822 2823 TAILQ_INIT(&batch); 2824 vlp = VP2VNODELOCK(vp); 2825 vlp2 = NULL; 2826 mtx_lock(vlp); 2827 retry: 2828 while (!LIST_EMPTY(&vp->v_cache_src)) { 2829 ncp = LIST_FIRST(&vp->v_cache_src); 2830 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2831 goto retry; 2832 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2833 } 2834 while (!TAILQ_EMPTY(&vp->v_cache_dst)) { 2835 ncp = TAILQ_FIRST(&vp->v_cache_dst); 2836 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2837 goto retry; 2838 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2839 } 2840 ncp = vp->v_cache_dd; 2841 if (ncp != NULL) { 2842 KASSERT(ncp->nc_flag & NCF_ISDOTDOT, 2843 ("lost dotdot link")); 2844 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2845 goto retry; 2846 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2847 } 2848 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 2849 mtx_unlock(vlp); 2850 if (vlp2 != NULL) 2851 mtx_unlock(vlp2); 2852 cache_free_batch(&batch); 2853 } 2854 2855 /* 2856 * Opportunistic check to see if there is anything to do. 2857 */ 2858 static bool 2859 cache_has_entries(struct vnode *vp) 2860 { 2861 2862 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) && 2863 atomic_load_ptr(&vp->v_cache_dd) == NULL) 2864 return (false); 2865 return (true); 2866 } 2867 2868 void 2869 cache_purge(struct vnode *vp) 2870 { 2871 2872 SDT_PROBE1(vfs, namecache, purge, done, vp); 2873 if (!cache_has_entries(vp)) 2874 return; 2875 cache_purge_impl(vp); 2876 } 2877 2878 /* 2879 * Only to be used by vgone. 2880 */ 2881 void 2882 cache_purge_vgone(struct vnode *vp) 2883 { 2884 struct mtx *vlp; 2885 2886 VNPASS(VN_IS_DOOMED(vp), vp); 2887 if (cache_has_entries(vp)) { 2888 cache_purge_impl(vp); 2889 return; 2890 } 2891 2892 /* 2893 * Serialize against a potential thread doing cache_purge. 2894 */ 2895 vlp = VP2VNODELOCK(vp); 2896 mtx_wait_unlocked(vlp); 2897 if (cache_has_entries(vp)) { 2898 cache_purge_impl(vp); 2899 return; 2900 } 2901 return; 2902 } 2903 2904 /* 2905 * Remove all negative entries for a particular directory vnode. 2906 */ 2907 void 2908 cache_purge_negative(struct vnode *vp) 2909 { 2910 struct cache_freebatch batch; 2911 struct namecache *ncp, *nnp; 2912 struct mtx *vlp; 2913 2914 SDT_PROBE1(vfs, namecache, purge_negative, done, vp); 2915 if (LIST_EMPTY(&vp->v_cache_src)) 2916 return; 2917 TAILQ_INIT(&batch); 2918 vlp = VP2VNODELOCK(vp); 2919 mtx_lock(vlp); 2920 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) { 2921 if (!(ncp->nc_flag & NCF_NEGATIVE)) 2922 continue; 2923 cache_zap_negative_locked_vnode_kl(ncp, vp); 2924 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2925 } 2926 mtx_unlock(vlp); 2927 cache_free_batch(&batch); 2928 } 2929 2930 /* 2931 * Entry points for modifying VOP operations. 2932 */ 2933 void 2934 cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp, 2935 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp) 2936 { 2937 2938 ASSERT_VOP_IN_SEQC(fdvp); 2939 ASSERT_VOP_IN_SEQC(fvp); 2940 ASSERT_VOP_IN_SEQC(tdvp); 2941 if (tvp != NULL) 2942 ASSERT_VOP_IN_SEQC(tvp); 2943 2944 cache_purge(fvp); 2945 if (tvp != NULL) { 2946 cache_purge(tvp); 2947 KASSERT(!cache_remove_cnp(tdvp, tcnp), 2948 ("%s: lingering negative entry", __func__)); 2949 } else { 2950 cache_remove_cnp(tdvp, tcnp); 2951 } 2952 2953 /* 2954 * TODO 2955 * 2956 * Historically renaming was always purging all revelang entries, 2957 * but that's quite wasteful. In particular turns out that in many cases 2958 * the target file is immediately accessed after rename, inducing a cache 2959 * miss. 2960 * 2961 * Recode this to reduce relocking and reuse the existing entry (if any) 2962 * instead of just removing it above and allocating a new one here. 2963 */ 2964 if (cache_rename_add) { 2965 cache_enter(tdvp, fvp, tcnp); 2966 } 2967 } 2968 2969 void 2970 cache_vop_rmdir(struct vnode *dvp, struct vnode *vp) 2971 { 2972 2973 ASSERT_VOP_IN_SEQC(dvp); 2974 ASSERT_VOP_IN_SEQC(vp); 2975 cache_purge(vp); 2976 } 2977 2978 #ifdef INVARIANTS 2979 /* 2980 * Validate that if an entry exists it matches. 2981 */ 2982 void 2983 cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2984 { 2985 struct namecache *ncp; 2986 struct mtx *blp; 2987 uint32_t hash; 2988 2989 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 2990 if (CK_SLIST_EMPTY(NCHHASH(hash))) 2991 return; 2992 blp = HASH2BUCKETLOCK(hash); 2993 mtx_lock(blp); 2994 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 2995 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 2996 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) { 2997 if (ncp->nc_vp != vp) 2998 panic("%s: mismatch (%p != %p); ncp %p [%s] dvp %p\n", 2999 __func__, vp, ncp->nc_vp, ncp, ncp->nc_name, ncp->nc_dvp); 3000 } 3001 } 3002 mtx_unlock(blp); 3003 } 3004 #endif 3005 3006 /* 3007 * Flush all entries referencing a particular filesystem. 3008 */ 3009 void 3010 cache_purgevfs(struct mount *mp) 3011 { 3012 struct vnode *vp, *mvp; 3013 size_t visited, purged; 3014 3015 visited = purged = 0; 3016 /* 3017 * Somewhat wasteful iteration over all vnodes. Would be better to 3018 * support filtering and avoid the interlock to begin with. 3019 */ 3020 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3021 visited++; 3022 if (!cache_has_entries(vp)) { 3023 VI_UNLOCK(vp); 3024 continue; 3025 } 3026 vholdl(vp); 3027 VI_UNLOCK(vp); 3028 cache_purge(vp); 3029 purged++; 3030 vdrop(vp); 3031 } 3032 3033 SDT_PROBE3(vfs, namecache, purgevfs, done, mp, visited, purged); 3034 } 3035 3036 /* 3037 * Perform canonical checks and cache lookup and pass on to filesystem 3038 * through the vop_cachedlookup only if needed. 3039 */ 3040 3041 int 3042 vfs_cache_lookup(struct vop_lookup_args *ap) 3043 { 3044 struct vnode *dvp; 3045 int error; 3046 struct vnode **vpp = ap->a_vpp; 3047 struct componentname *cnp = ap->a_cnp; 3048 int flags = cnp->cn_flags; 3049 3050 *vpp = NULL; 3051 dvp = ap->a_dvp; 3052 3053 if (dvp->v_type != VDIR) 3054 return (ENOTDIR); 3055 3056 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 3057 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 3058 return (EROFS); 3059 3060 error = vn_dir_check_exec(dvp, cnp); 3061 if (error != 0) 3062 return (error); 3063 3064 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 3065 if (error == 0) 3066 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 3067 if (error == -1) 3068 return (0); 3069 return (error); 3070 } 3071 3072 /* Implementation of the getcwd syscall. */ 3073 int 3074 sys___getcwd(struct thread *td, struct __getcwd_args *uap) 3075 { 3076 char *buf, *retbuf; 3077 size_t buflen; 3078 int error; 3079 3080 buflen = uap->buflen; 3081 if (__predict_false(buflen < 2)) 3082 return (EINVAL); 3083 if (buflen > MAXPATHLEN) 3084 buflen = MAXPATHLEN; 3085 3086 buf = uma_zalloc(namei_zone, M_WAITOK); 3087 error = vn_getcwd(buf, &retbuf, &buflen); 3088 if (error == 0) 3089 error = copyout(retbuf, uap->buf, buflen); 3090 uma_zfree(namei_zone, buf); 3091 return (error); 3092 } 3093 3094 int 3095 vn_getcwd(char *buf, char **retbuf, size_t *buflen) 3096 { 3097 struct pwd *pwd; 3098 int error; 3099 3100 vfs_smr_enter(); 3101 pwd = pwd_get_smr(); 3102 error = vn_fullpath_any_smr(pwd->pwd_cdir, pwd->pwd_rdir, buf, retbuf, 3103 buflen, 0); 3104 VFS_SMR_ASSERT_NOT_ENTERED(); 3105 if (error < 0) { 3106 pwd = pwd_hold(curthread); 3107 error = vn_fullpath_any(pwd->pwd_cdir, pwd->pwd_rdir, buf, 3108 retbuf, buflen); 3109 pwd_drop(pwd); 3110 } 3111 3112 #ifdef KTRACE 3113 if (KTRPOINT(curthread, KTR_NAMEI) && error == 0) 3114 ktrnamei(*retbuf); 3115 #endif 3116 return (error); 3117 } 3118 3119 static int 3120 kern___realpathat(struct thread *td, int fd, const char *path, char *buf, 3121 size_t size, int flags, enum uio_seg pathseg) 3122 { 3123 struct nameidata nd; 3124 char *retbuf, *freebuf; 3125 int error; 3126 3127 if (flags != 0) 3128 return (EINVAL); 3129 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | SAVENAME | WANTPARENT | AUDITVNODE1, 3130 pathseg, path, fd, &cap_fstat_rights); 3131 if ((error = namei(&nd)) != 0) 3132 return (error); 3133 error = vn_fullpath_hardlink(nd.ni_vp, nd.ni_dvp, nd.ni_cnd.cn_nameptr, 3134 nd.ni_cnd.cn_namelen, &retbuf, &freebuf, &size); 3135 if (error == 0) { 3136 error = copyout(retbuf, buf, size); 3137 free(freebuf, M_TEMP); 3138 } 3139 NDFREE(&nd, 0); 3140 return (error); 3141 } 3142 3143 int 3144 sys___realpathat(struct thread *td, struct __realpathat_args *uap) 3145 { 3146 3147 return (kern___realpathat(td, uap->fd, uap->path, uap->buf, uap->size, 3148 uap->flags, UIO_USERSPACE)); 3149 } 3150 3151 /* 3152 * Retrieve the full filesystem path that correspond to a vnode from the name 3153 * cache (if available) 3154 */ 3155 int 3156 vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf) 3157 { 3158 struct pwd *pwd; 3159 char *buf; 3160 size_t buflen; 3161 int error; 3162 3163 if (__predict_false(vp == NULL)) 3164 return (EINVAL); 3165 3166 buflen = MAXPATHLEN; 3167 buf = malloc(buflen, M_TEMP, M_WAITOK); 3168 vfs_smr_enter(); 3169 pwd = pwd_get_smr(); 3170 error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, &buflen, 0); 3171 VFS_SMR_ASSERT_NOT_ENTERED(); 3172 if (error < 0) { 3173 pwd = pwd_hold(curthread); 3174 error = vn_fullpath_any(vp, pwd->pwd_rdir, buf, retbuf, &buflen); 3175 pwd_drop(pwd); 3176 } 3177 if (error == 0) 3178 *freebuf = buf; 3179 else 3180 free(buf, M_TEMP); 3181 return (error); 3182 } 3183 3184 /* 3185 * This function is similar to vn_fullpath, but it attempts to lookup the 3186 * pathname relative to the global root mount point. This is required for the 3187 * auditing sub-system, as audited pathnames must be absolute, relative to the 3188 * global root mount point. 3189 */ 3190 int 3191 vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf) 3192 { 3193 char *buf; 3194 size_t buflen; 3195 int error; 3196 3197 if (__predict_false(vp == NULL)) 3198 return (EINVAL); 3199 buflen = MAXPATHLEN; 3200 buf = malloc(buflen, M_TEMP, M_WAITOK); 3201 vfs_smr_enter(); 3202 error = vn_fullpath_any_smr(vp, rootvnode, buf, retbuf, &buflen, 0); 3203 VFS_SMR_ASSERT_NOT_ENTERED(); 3204 if (error < 0) { 3205 error = vn_fullpath_any(vp, rootvnode, buf, retbuf, &buflen); 3206 } 3207 if (error == 0) 3208 *freebuf = buf; 3209 else 3210 free(buf, M_TEMP); 3211 return (error); 3212 } 3213 3214 static struct namecache * 3215 vn_dd_from_dst(struct vnode *vp) 3216 { 3217 struct namecache *ncp; 3218 3219 cache_assert_vnode_locked(vp); 3220 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) { 3221 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 3222 return (ncp); 3223 } 3224 return (NULL); 3225 } 3226 3227 int 3228 vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen) 3229 { 3230 struct vnode *dvp; 3231 struct namecache *ncp; 3232 struct mtx *vlp; 3233 int error; 3234 3235 vlp = VP2VNODELOCK(*vp); 3236 mtx_lock(vlp); 3237 ncp = (*vp)->v_cache_dd; 3238 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT) == 0) { 3239 KASSERT(ncp == vn_dd_from_dst(*vp), 3240 ("%s: mismatch for dd entry (%p != %p)", __func__, 3241 ncp, vn_dd_from_dst(*vp))); 3242 } else { 3243 ncp = vn_dd_from_dst(*vp); 3244 } 3245 if (ncp != NULL) { 3246 if (*buflen < ncp->nc_nlen) { 3247 mtx_unlock(vlp); 3248 vrele(*vp); 3249 counter_u64_add(numfullpathfail4, 1); 3250 error = ENOMEM; 3251 SDT_PROBE3(vfs, namecache, fullpath, return, error, 3252 vp, NULL); 3253 return (error); 3254 } 3255 *buflen -= ncp->nc_nlen; 3256 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); 3257 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, 3258 ncp->nc_name, vp); 3259 dvp = *vp; 3260 *vp = ncp->nc_dvp; 3261 vref(*vp); 3262 mtx_unlock(vlp); 3263 vrele(dvp); 3264 return (0); 3265 } 3266 SDT_PROBE1(vfs, namecache, fullpath, miss, vp); 3267 3268 mtx_unlock(vlp); 3269 vn_lock(*vp, LK_SHARED | LK_RETRY); 3270 error = VOP_VPTOCNP(*vp, &dvp, buf, buflen); 3271 vput(*vp); 3272 if (error) { 3273 counter_u64_add(numfullpathfail2, 1); 3274 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 3275 return (error); 3276 } 3277 3278 *vp = dvp; 3279 if (VN_IS_DOOMED(dvp)) { 3280 /* forced unmount */ 3281 vrele(dvp); 3282 error = ENOENT; 3283 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 3284 return (error); 3285 } 3286 /* 3287 * *vp has its use count incremented still. 3288 */ 3289 3290 return (0); 3291 } 3292 3293 /* 3294 * Resolve a directory to a pathname. 3295 * 3296 * The name of the directory can always be found in the namecache or fetched 3297 * from the filesystem. There is also guaranteed to be only one parent, meaning 3298 * we can just follow vnodes up until we find the root. 3299 * 3300 * The vnode must be referenced. 3301 */ 3302 static int 3303 vn_fullpath_dir(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, 3304 size_t *len, size_t addend) 3305 { 3306 #ifdef KDTRACE_HOOKS 3307 struct vnode *startvp = vp; 3308 #endif 3309 struct vnode *vp1; 3310 size_t buflen; 3311 int error; 3312 bool slash_prefixed; 3313 3314 VNPASS(vp->v_type == VDIR || VN_IS_DOOMED(vp), vp); 3315 VNPASS(vp->v_usecount > 0, vp); 3316 3317 buflen = *len; 3318 3319 slash_prefixed = true; 3320 if (addend == 0) { 3321 MPASS(*len >= 2); 3322 buflen--; 3323 buf[buflen] = '\0'; 3324 slash_prefixed = false; 3325 } 3326 3327 error = 0; 3328 3329 SDT_PROBE1(vfs, namecache, fullpath, entry, vp); 3330 counter_u64_add(numfullpathcalls, 1); 3331 while (vp != rdir && vp != rootvnode) { 3332 /* 3333 * The vp vnode must be already fully constructed, 3334 * since it is either found in namecache or obtained 3335 * from VOP_VPTOCNP(). We may test for VV_ROOT safely 3336 * without obtaining the vnode lock. 3337 */ 3338 if ((vp->v_vflag & VV_ROOT) != 0) { 3339 vn_lock(vp, LK_RETRY | LK_SHARED); 3340 3341 /* 3342 * With the vnode locked, check for races with 3343 * unmount, forced or not. Note that we 3344 * already verified that vp is not equal to 3345 * the root vnode, which means that 3346 * mnt_vnodecovered can be NULL only for the 3347 * case of unmount. 3348 */ 3349 if (VN_IS_DOOMED(vp) || 3350 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || 3351 vp1->v_mountedhere != vp->v_mount) { 3352 vput(vp); 3353 error = ENOENT; 3354 SDT_PROBE3(vfs, namecache, fullpath, return, 3355 error, vp, NULL); 3356 break; 3357 } 3358 3359 vref(vp1); 3360 vput(vp); 3361 vp = vp1; 3362 continue; 3363 } 3364 if (vp->v_type != VDIR) { 3365 vrele(vp); 3366 counter_u64_add(numfullpathfail1, 1); 3367 error = ENOTDIR; 3368 SDT_PROBE3(vfs, namecache, fullpath, return, 3369 error, vp, NULL); 3370 break; 3371 } 3372 error = vn_vptocnp(&vp, buf, &buflen); 3373 if (error) 3374 break; 3375 if (buflen == 0) { 3376 vrele(vp); 3377 error = ENOMEM; 3378 SDT_PROBE3(vfs, namecache, fullpath, return, error, 3379 startvp, NULL); 3380 break; 3381 } 3382 buf[--buflen] = '/'; 3383 slash_prefixed = true; 3384 } 3385 if (error) 3386 return (error); 3387 if (!slash_prefixed) { 3388 if (buflen == 0) { 3389 vrele(vp); 3390 counter_u64_add(numfullpathfail4, 1); 3391 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, 3392 startvp, NULL); 3393 return (ENOMEM); 3394 } 3395 buf[--buflen] = '/'; 3396 } 3397 counter_u64_add(numfullpathfound, 1); 3398 vrele(vp); 3399 3400 *retbuf = buf + buflen; 3401 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, *retbuf); 3402 *len -= buflen; 3403 *len += addend; 3404 return (0); 3405 } 3406 3407 /* 3408 * Resolve an arbitrary vnode to a pathname. 3409 * 3410 * Note 2 caveats: 3411 * - hardlinks are not tracked, thus if the vnode is not a directory this can 3412 * resolve to a different path than the one used to find it 3413 * - namecache is not mandatory, meaning names are not guaranteed to be added 3414 * (in which case resolving fails) 3415 */ 3416 static void __inline 3417 cache_rev_failed_impl(int *reason, int line) 3418 { 3419 3420 *reason = line; 3421 } 3422 #define cache_rev_failed(var) cache_rev_failed_impl((var), __LINE__) 3423 3424 static int 3425 vn_fullpath_any_smr(struct vnode *vp, struct vnode *rdir, char *buf, 3426 char **retbuf, size_t *buflen, size_t addend) 3427 { 3428 #ifdef KDTRACE_HOOKS 3429 struct vnode *startvp = vp; 3430 #endif 3431 struct vnode *tvp; 3432 struct mount *mp; 3433 struct namecache *ncp; 3434 size_t orig_buflen; 3435 int reason; 3436 int error; 3437 #ifdef KDTRACE_HOOKS 3438 int i; 3439 #endif 3440 seqc_t vp_seqc, tvp_seqc; 3441 u_char nc_flag; 3442 3443 VFS_SMR_ASSERT_ENTERED(); 3444 3445 if (!atomic_load_char(&cache_fast_lookup_enabled)) { 3446 vfs_smr_exit(); 3447 return (-1); 3448 } 3449 3450 orig_buflen = *buflen; 3451 3452 if (addend == 0) { 3453 MPASS(*buflen >= 2); 3454 *buflen -= 1; 3455 buf[*buflen] = '\0'; 3456 } 3457 3458 if (vp == rdir || vp == rootvnode) { 3459 if (addend == 0) { 3460 *buflen -= 1; 3461 buf[*buflen] = '/'; 3462 } 3463 goto out_ok; 3464 } 3465 3466 #ifdef KDTRACE_HOOKS 3467 i = 0; 3468 #endif 3469 error = -1; 3470 ncp = NULL; /* for sdt probe down below */ 3471 vp_seqc = vn_seqc_read_any(vp); 3472 if (seqc_in_modify(vp_seqc)) { 3473 cache_rev_failed(&reason); 3474 goto out_abort; 3475 } 3476 3477 for (;;) { 3478 #ifdef KDTRACE_HOOKS 3479 i++; 3480 #endif 3481 if ((vp->v_vflag & VV_ROOT) != 0) { 3482 mp = atomic_load_ptr(&vp->v_mount); 3483 if (mp == NULL) { 3484 cache_rev_failed(&reason); 3485 goto out_abort; 3486 } 3487 tvp = atomic_load_ptr(&mp->mnt_vnodecovered); 3488 tvp_seqc = vn_seqc_read_any(tvp); 3489 if (seqc_in_modify(tvp_seqc)) { 3490 cache_rev_failed(&reason); 3491 goto out_abort; 3492 } 3493 if (!vn_seqc_consistent(vp, vp_seqc)) { 3494 cache_rev_failed(&reason); 3495 goto out_abort; 3496 } 3497 vp = tvp; 3498 vp_seqc = tvp_seqc; 3499 continue; 3500 } 3501 ncp = atomic_load_consume_ptr(&vp->v_cache_dd); 3502 if (ncp == NULL) { 3503 cache_rev_failed(&reason); 3504 goto out_abort; 3505 } 3506 nc_flag = atomic_load_char(&ncp->nc_flag); 3507 if ((nc_flag & NCF_ISDOTDOT) != 0) { 3508 cache_rev_failed(&reason); 3509 goto out_abort; 3510 } 3511 if (ncp->nc_nlen >= *buflen) { 3512 cache_rev_failed(&reason); 3513 error = ENOMEM; 3514 goto out_abort; 3515 } 3516 *buflen -= ncp->nc_nlen; 3517 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); 3518 *buflen -= 1; 3519 buf[*buflen] = '/'; 3520 tvp = ncp->nc_dvp; 3521 tvp_seqc = vn_seqc_read_any(tvp); 3522 if (seqc_in_modify(tvp_seqc)) { 3523 cache_rev_failed(&reason); 3524 goto out_abort; 3525 } 3526 if (!vn_seqc_consistent(vp, vp_seqc)) { 3527 cache_rev_failed(&reason); 3528 goto out_abort; 3529 } 3530 /* 3531 * Acquire fence provided by vn_seqc_read_any above. 3532 */ 3533 if (__predict_false(atomic_load_ptr(&vp->v_cache_dd) != ncp)) { 3534 cache_rev_failed(&reason); 3535 goto out_abort; 3536 } 3537 if (!cache_ncp_canuse(ncp)) { 3538 cache_rev_failed(&reason); 3539 goto out_abort; 3540 } 3541 vp = tvp; 3542 vp_seqc = tvp_seqc; 3543 if (vp == rdir || vp == rootvnode) 3544 break; 3545 } 3546 out_ok: 3547 vfs_smr_exit(); 3548 *retbuf = buf + *buflen; 3549 *buflen = orig_buflen - *buflen + addend; 3550 SDT_PROBE2(vfs, namecache, fullpath_smr, hit, startvp, *retbuf); 3551 return (0); 3552 3553 out_abort: 3554 *buflen = orig_buflen; 3555 SDT_PROBE4(vfs, namecache, fullpath_smr, miss, startvp, ncp, reason, i); 3556 vfs_smr_exit(); 3557 return (error); 3558 } 3559 3560 static int 3561 vn_fullpath_any(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, 3562 size_t *buflen) 3563 { 3564 size_t orig_buflen, addend; 3565 int error; 3566 3567 if (*buflen < 2) 3568 return (EINVAL); 3569 3570 orig_buflen = *buflen; 3571 3572 vref(vp); 3573 addend = 0; 3574 if (vp->v_type != VDIR) { 3575 *buflen -= 1; 3576 buf[*buflen] = '\0'; 3577 error = vn_vptocnp(&vp, buf, buflen); 3578 if (error) 3579 return (error); 3580 if (*buflen == 0) { 3581 vrele(vp); 3582 return (ENOMEM); 3583 } 3584 *buflen -= 1; 3585 buf[*buflen] = '/'; 3586 addend = orig_buflen - *buflen; 3587 } 3588 3589 return (vn_fullpath_dir(vp, rdir, buf, retbuf, buflen, addend)); 3590 } 3591 3592 /* 3593 * Resolve an arbitrary vnode to a pathname (taking care of hardlinks). 3594 * 3595 * Since the namecache does not track hardlinks, the caller is 3596 * expected to first look up the target vnode with SAVENAME | 3597 * WANTPARENT flags passed to namei to get dvp and vp. 3598 * 3599 * Then we have 2 cases: 3600 * - if the found vnode is a directory, the path can be constructed just by 3601 * following names up the chain 3602 * - otherwise we populate the buffer with the saved name and start resolving 3603 * from the parent 3604 */ 3605 int 3606 vn_fullpath_hardlink(struct vnode *vp, struct vnode *dvp, 3607 const char *hrdl_name, size_t hrdl_name_length, 3608 char **retbuf, char **freebuf, size_t *buflen) 3609 { 3610 char *buf, *tmpbuf; 3611 struct pwd *pwd; 3612 size_t addend; 3613 int error; 3614 enum vtype type; 3615 3616 if (*buflen < 2) 3617 return (EINVAL); 3618 if (*buflen > MAXPATHLEN) 3619 *buflen = MAXPATHLEN; 3620 3621 buf = malloc(*buflen, M_TEMP, M_WAITOK); 3622 3623 addend = 0; 3624 3625 /* 3626 * Check for VBAD to work around the vp_crossmp bug in lookup(). 3627 * 3628 * For example consider tmpfs on /tmp and realpath /tmp. ni_vp will be 3629 * set to mount point's root vnode while ni_dvp will be vp_crossmp. 3630 * If the type is VDIR (like in this very case) we can skip looking 3631 * at ni_dvp in the first place. However, since vnodes get passed here 3632 * unlocked the target may transition to doomed state (type == VBAD) 3633 * before we get to evaluate the condition. If this happens, we will 3634 * populate part of the buffer and descend to vn_fullpath_dir with 3635 * vp == vp_crossmp. Prevent the problem by checking for VBAD. 3636 * 3637 * This should be atomic_load(&vp->v_type) but it is illegal to take 3638 * an address of a bit field, even if said field is sized to char. 3639 * Work around the problem by reading the value into a full-sized enum 3640 * and then re-reading it with atomic_load which will still prevent 3641 * the compiler from re-reading down the road. 3642 */ 3643 type = vp->v_type; 3644 type = atomic_load_int(&type); 3645 if (type == VBAD) { 3646 error = ENOENT; 3647 goto out_bad; 3648 } 3649 if (type != VDIR) { 3650 addend = hrdl_name_length + 2; 3651 if (*buflen < addend) { 3652 error = ENOMEM; 3653 goto out_bad; 3654 } 3655 *buflen -= addend; 3656 tmpbuf = buf + *buflen; 3657 tmpbuf[0] = '/'; 3658 memcpy(&tmpbuf[1], hrdl_name, hrdl_name_length); 3659 tmpbuf[addend - 1] = '\0'; 3660 vp = dvp; 3661 } 3662 3663 vfs_smr_enter(); 3664 pwd = pwd_get_smr(); 3665 error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, buflen, 3666 addend); 3667 VFS_SMR_ASSERT_NOT_ENTERED(); 3668 if (error < 0) { 3669 pwd = pwd_hold(curthread); 3670 vref(vp); 3671 error = vn_fullpath_dir(vp, pwd->pwd_rdir, buf, retbuf, buflen, 3672 addend); 3673 pwd_drop(pwd); 3674 } 3675 if (error != 0) 3676 goto out_bad; 3677 3678 *freebuf = buf; 3679 3680 return (0); 3681 out_bad: 3682 free(buf, M_TEMP); 3683 return (error); 3684 } 3685 3686 struct vnode * 3687 vn_dir_dd_ino(struct vnode *vp) 3688 { 3689 struct namecache *ncp; 3690 struct vnode *ddvp; 3691 struct mtx *vlp; 3692 enum vgetstate vs; 3693 3694 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 3695 vlp = VP2VNODELOCK(vp); 3696 mtx_lock(vlp); 3697 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 3698 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 3699 continue; 3700 ddvp = ncp->nc_dvp; 3701 vs = vget_prep(ddvp); 3702 mtx_unlock(vlp); 3703 if (vget_finish(ddvp, LK_SHARED | LK_NOWAIT, vs)) 3704 return (NULL); 3705 return (ddvp); 3706 } 3707 mtx_unlock(vlp); 3708 return (NULL); 3709 } 3710 3711 int 3712 vn_commname(struct vnode *vp, char *buf, u_int buflen) 3713 { 3714 struct namecache *ncp; 3715 struct mtx *vlp; 3716 int l; 3717 3718 vlp = VP2VNODELOCK(vp); 3719 mtx_lock(vlp); 3720 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 3721 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 3722 break; 3723 if (ncp == NULL) { 3724 mtx_unlock(vlp); 3725 return (ENOENT); 3726 } 3727 l = min(ncp->nc_nlen, buflen - 1); 3728 memcpy(buf, ncp->nc_name, l); 3729 mtx_unlock(vlp); 3730 buf[l] = '\0'; 3731 return (0); 3732 } 3733 3734 /* 3735 * This function updates path string to vnode's full global path 3736 * and checks the size of the new path string against the pathlen argument. 3737 * 3738 * Requires a locked, referenced vnode. 3739 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 3740 * 3741 * If vp is a directory, the call to vn_fullpath_global() always succeeds 3742 * because it falls back to the ".." lookup if the namecache lookup fails. 3743 */ 3744 int 3745 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 3746 u_int pathlen) 3747 { 3748 struct nameidata nd; 3749 struct vnode *vp1; 3750 char *rpath, *fbuf; 3751 int error; 3752 3753 ASSERT_VOP_ELOCKED(vp, __func__); 3754 3755 /* Construct global filesystem path from vp. */ 3756 VOP_UNLOCK(vp); 3757 error = vn_fullpath_global(vp, &rpath, &fbuf); 3758 3759 if (error != 0) { 3760 vrele(vp); 3761 return (error); 3762 } 3763 3764 if (strlen(rpath) >= pathlen) { 3765 vrele(vp); 3766 error = ENAMETOOLONG; 3767 goto out; 3768 } 3769 3770 /* 3771 * Re-lookup the vnode by path to detect a possible rename. 3772 * As a side effect, the vnode is relocked. 3773 * If vnode was renamed, return ENOENT. 3774 */ 3775 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, UIO_SYSSPACE, path); 3776 error = namei(&nd); 3777 if (error != 0) { 3778 vrele(vp); 3779 goto out; 3780 } 3781 NDFREE(&nd, NDF_ONLY_PNBUF); 3782 vp1 = nd.ni_vp; 3783 vrele(vp); 3784 if (vp1 == vp) 3785 strcpy(path, rpath); 3786 else { 3787 vput(vp1); 3788 error = ENOENT; 3789 } 3790 3791 out: 3792 free(fbuf, M_TEMP); 3793 return (error); 3794 } 3795 3796 #ifdef DDB 3797 static void 3798 db_print_vpath(struct vnode *vp) 3799 { 3800 3801 while (vp != NULL) { 3802 db_printf("%p: ", vp); 3803 if (vp == rootvnode) { 3804 db_printf("/"); 3805 vp = NULL; 3806 } else { 3807 if (vp->v_vflag & VV_ROOT) { 3808 db_printf("<mount point>"); 3809 vp = vp->v_mount->mnt_vnodecovered; 3810 } else { 3811 struct namecache *ncp; 3812 char *ncn; 3813 int i; 3814 3815 ncp = TAILQ_FIRST(&vp->v_cache_dst); 3816 if (ncp != NULL) { 3817 ncn = ncp->nc_name; 3818 for (i = 0; i < ncp->nc_nlen; i++) 3819 db_printf("%c", *ncn++); 3820 vp = ncp->nc_dvp; 3821 } else { 3822 vp = NULL; 3823 } 3824 } 3825 } 3826 db_printf("\n"); 3827 } 3828 3829 return; 3830 } 3831 3832 DB_SHOW_COMMAND(vpath, db_show_vpath) 3833 { 3834 struct vnode *vp; 3835 3836 if (!have_addr) { 3837 db_printf("usage: show vpath <struct vnode *>\n"); 3838 return; 3839 } 3840 3841 vp = (struct vnode *)addr; 3842 db_print_vpath(vp); 3843 } 3844 3845 #endif 3846 3847 static int cache_fast_lookup = 1; 3848 3849 #define CACHE_FPL_FAILED -2020 3850 3851 void 3852 cache_fast_lookup_enabled_recalc(void) 3853 { 3854 int lookup_flag; 3855 int mac_on; 3856 3857 #ifdef MAC 3858 mac_on = mac_vnode_check_lookup_enabled(); 3859 mac_on |= mac_vnode_check_readlink_enabled(); 3860 #else 3861 mac_on = 0; 3862 #endif 3863 3864 lookup_flag = atomic_load_int(&cache_fast_lookup); 3865 if (lookup_flag && !mac_on) { 3866 atomic_store_char(&cache_fast_lookup_enabled, true); 3867 } else { 3868 atomic_store_char(&cache_fast_lookup_enabled, false); 3869 } 3870 } 3871 3872 static int 3873 syscal_vfs_cache_fast_lookup(SYSCTL_HANDLER_ARGS) 3874 { 3875 int error, old; 3876 3877 old = atomic_load_int(&cache_fast_lookup); 3878 error = sysctl_handle_int(oidp, arg1, arg2, req); 3879 if (error == 0 && req->newptr && old != atomic_load_int(&cache_fast_lookup)) 3880 cache_fast_lookup_enabled_recalc(); 3881 return (error); 3882 } 3883 SYSCTL_PROC(_vfs, OID_AUTO, cache_fast_lookup, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, 3884 &cache_fast_lookup, 0, syscal_vfs_cache_fast_lookup, "IU", ""); 3885 3886 /* 3887 * Components of nameidata (or objects it can point to) which may 3888 * need restoring in case fast path lookup fails. 3889 */ 3890 struct nameidata_outer { 3891 size_t ni_pathlen; 3892 int cn_flags; 3893 }; 3894 3895 struct nameidata_saved { 3896 #ifdef INVARIANTS 3897 char *cn_nameptr; 3898 size_t ni_pathlen; 3899 #endif 3900 }; 3901 3902 #ifdef INVARIANTS 3903 struct cache_fpl_debug { 3904 size_t ni_pathlen; 3905 }; 3906 #endif 3907 3908 struct cache_fpl { 3909 struct nameidata *ndp; 3910 struct componentname *cnp; 3911 char *nulchar; 3912 struct vnode *dvp; 3913 struct vnode *tvp; 3914 seqc_t dvp_seqc; 3915 seqc_t tvp_seqc; 3916 uint32_t hash; 3917 struct nameidata_saved snd; 3918 struct nameidata_outer snd_outer; 3919 int line; 3920 enum cache_fpl_status status:8; 3921 bool in_smr; 3922 bool fsearch; 3923 bool savename; 3924 struct pwd **pwd; 3925 #ifdef INVARIANTS 3926 struct cache_fpl_debug debug; 3927 #endif 3928 }; 3929 3930 static bool cache_fplookup_mp_supported(struct mount *mp); 3931 static bool cache_fplookup_is_mp(struct cache_fpl *fpl); 3932 static int cache_fplookup_cross_mount(struct cache_fpl *fpl); 3933 static int cache_fplookup_partial_setup(struct cache_fpl *fpl); 3934 static int cache_fplookup_skip_slashes(struct cache_fpl *fpl); 3935 static int cache_fplookup_trailingslash(struct cache_fpl *fpl); 3936 static void cache_fpl_pathlen_dec(struct cache_fpl *fpl); 3937 static void cache_fpl_pathlen_inc(struct cache_fpl *fpl); 3938 static void cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n); 3939 static void cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n); 3940 3941 static void 3942 cache_fpl_cleanup_cnp(struct componentname *cnp) 3943 { 3944 3945 uma_zfree(namei_zone, cnp->cn_pnbuf); 3946 #ifdef DIAGNOSTIC 3947 cnp->cn_pnbuf = NULL; 3948 cnp->cn_nameptr = NULL; 3949 #endif 3950 } 3951 3952 static struct vnode * 3953 cache_fpl_handle_root(struct cache_fpl *fpl) 3954 { 3955 struct nameidata *ndp; 3956 struct componentname *cnp; 3957 3958 ndp = fpl->ndp; 3959 cnp = fpl->cnp; 3960 3961 MPASS(*(cnp->cn_nameptr) == '/'); 3962 cnp->cn_nameptr++; 3963 cache_fpl_pathlen_dec(fpl); 3964 3965 if (__predict_false(*(cnp->cn_nameptr) == '/')) { 3966 do { 3967 cnp->cn_nameptr++; 3968 cache_fpl_pathlen_dec(fpl); 3969 } while (*(cnp->cn_nameptr) == '/'); 3970 } 3971 3972 return (ndp->ni_rootdir); 3973 } 3974 3975 static void 3976 cache_fpl_checkpoint_outer(struct cache_fpl *fpl) 3977 { 3978 3979 fpl->snd_outer.ni_pathlen = fpl->ndp->ni_pathlen; 3980 fpl->snd_outer.cn_flags = fpl->ndp->ni_cnd.cn_flags; 3981 } 3982 3983 static void 3984 cache_fpl_checkpoint(struct cache_fpl *fpl) 3985 { 3986 3987 #ifdef INVARIANTS 3988 fpl->snd.cn_nameptr = fpl->ndp->ni_cnd.cn_nameptr; 3989 fpl->snd.ni_pathlen = fpl->debug.ni_pathlen; 3990 #endif 3991 } 3992 3993 static void 3994 cache_fpl_restore_partial(struct cache_fpl *fpl) 3995 { 3996 3997 fpl->ndp->ni_cnd.cn_flags = fpl->snd_outer.cn_flags; 3998 #ifdef INVARIANTS 3999 fpl->debug.ni_pathlen = fpl->snd.ni_pathlen; 4000 #endif 4001 } 4002 4003 static void 4004 cache_fpl_restore_abort(struct cache_fpl *fpl) 4005 { 4006 4007 cache_fpl_restore_partial(fpl); 4008 /* 4009 * It is 0 on entry by API contract. 4010 */ 4011 fpl->ndp->ni_resflags = 0; 4012 fpl->ndp->ni_cnd.cn_nameptr = fpl->ndp->ni_cnd.cn_pnbuf; 4013 fpl->ndp->ni_pathlen = fpl->snd_outer.ni_pathlen; 4014 } 4015 4016 #ifdef INVARIANTS 4017 #define cache_fpl_smr_assert_entered(fpl) ({ \ 4018 struct cache_fpl *_fpl = (fpl); \ 4019 MPASS(_fpl->in_smr == true); \ 4020 VFS_SMR_ASSERT_ENTERED(); \ 4021 }) 4022 #define cache_fpl_smr_assert_not_entered(fpl) ({ \ 4023 struct cache_fpl *_fpl = (fpl); \ 4024 MPASS(_fpl->in_smr == false); \ 4025 VFS_SMR_ASSERT_NOT_ENTERED(); \ 4026 }) 4027 static void 4028 cache_fpl_assert_status(struct cache_fpl *fpl) 4029 { 4030 4031 switch (fpl->status) { 4032 case CACHE_FPL_STATUS_UNSET: 4033 __assert_unreachable(); 4034 break; 4035 case CACHE_FPL_STATUS_DESTROYED: 4036 case CACHE_FPL_STATUS_ABORTED: 4037 case CACHE_FPL_STATUS_PARTIAL: 4038 case CACHE_FPL_STATUS_HANDLED: 4039 break; 4040 } 4041 } 4042 #else 4043 #define cache_fpl_smr_assert_entered(fpl) do { } while (0) 4044 #define cache_fpl_smr_assert_not_entered(fpl) do { } while (0) 4045 #define cache_fpl_assert_status(fpl) do { } while (0) 4046 #endif 4047 4048 #define cache_fpl_smr_enter_initial(fpl) ({ \ 4049 struct cache_fpl *_fpl = (fpl); \ 4050 vfs_smr_enter(); \ 4051 _fpl->in_smr = true; \ 4052 }) 4053 4054 #define cache_fpl_smr_enter(fpl) ({ \ 4055 struct cache_fpl *_fpl = (fpl); \ 4056 MPASS(_fpl->in_smr == false); \ 4057 vfs_smr_enter(); \ 4058 _fpl->in_smr = true; \ 4059 }) 4060 4061 #define cache_fpl_smr_exit(fpl) ({ \ 4062 struct cache_fpl *_fpl = (fpl); \ 4063 MPASS(_fpl->in_smr == true); \ 4064 vfs_smr_exit(); \ 4065 _fpl->in_smr = false; \ 4066 }) 4067 4068 static int 4069 cache_fpl_aborted_early_impl(struct cache_fpl *fpl, int line) 4070 { 4071 4072 if (fpl->status != CACHE_FPL_STATUS_UNSET) { 4073 KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL, 4074 ("%s: converting to abort from %d at %d, set at %d\n", 4075 __func__, fpl->status, line, fpl->line)); 4076 } 4077 cache_fpl_smr_assert_not_entered(fpl); 4078 fpl->status = CACHE_FPL_STATUS_ABORTED; 4079 fpl->line = line; 4080 return (CACHE_FPL_FAILED); 4081 } 4082 4083 #define cache_fpl_aborted_early(x) cache_fpl_aborted_early_impl((x), __LINE__) 4084 4085 static int __noinline 4086 cache_fpl_aborted_impl(struct cache_fpl *fpl, int line) 4087 { 4088 struct nameidata *ndp; 4089 struct componentname *cnp; 4090 4091 ndp = fpl->ndp; 4092 cnp = fpl->cnp; 4093 4094 if (fpl->status != CACHE_FPL_STATUS_UNSET) { 4095 KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL, 4096 ("%s: converting to abort from %d at %d, set at %d\n", 4097 __func__, fpl->status, line, fpl->line)); 4098 } 4099 fpl->status = CACHE_FPL_STATUS_ABORTED; 4100 fpl->line = line; 4101 if (fpl->in_smr) 4102 cache_fpl_smr_exit(fpl); 4103 cache_fpl_restore_abort(fpl); 4104 /* 4105 * Resolving symlinks overwrites data passed by the caller. 4106 * Let namei know. 4107 */ 4108 if (ndp->ni_loopcnt > 0) { 4109 fpl->status = CACHE_FPL_STATUS_DESTROYED; 4110 cache_fpl_cleanup_cnp(cnp); 4111 } 4112 return (CACHE_FPL_FAILED); 4113 } 4114 4115 #define cache_fpl_aborted(x) cache_fpl_aborted_impl((x), __LINE__) 4116 4117 static int __noinline 4118 cache_fpl_partial_impl(struct cache_fpl *fpl, int line) 4119 { 4120 4121 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, 4122 ("%s: setting to partial at %d, but already set to %d at %d\n", 4123 __func__, line, fpl->status, fpl->line)); 4124 cache_fpl_smr_assert_entered(fpl); 4125 fpl->status = CACHE_FPL_STATUS_PARTIAL; 4126 fpl->line = line; 4127 return (cache_fplookup_partial_setup(fpl)); 4128 } 4129 4130 #define cache_fpl_partial(x) cache_fpl_partial_impl((x), __LINE__) 4131 4132 static int 4133 cache_fpl_handled_impl(struct cache_fpl *fpl, int line) 4134 { 4135 4136 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, 4137 ("%s: setting to handled at %d, but already set to %d at %d\n", 4138 __func__, line, fpl->status, fpl->line)); 4139 cache_fpl_smr_assert_not_entered(fpl); 4140 fpl->status = CACHE_FPL_STATUS_HANDLED; 4141 fpl->line = line; 4142 return (0); 4143 } 4144 4145 #define cache_fpl_handled(x) cache_fpl_handled_impl((x), __LINE__) 4146 4147 static int 4148 cache_fpl_handled_error_impl(struct cache_fpl *fpl, int error, int line) 4149 { 4150 4151 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, 4152 ("%s: setting to handled at %d, but already set to %d at %d\n", 4153 __func__, line, fpl->status, fpl->line)); 4154 MPASS(error != 0); 4155 MPASS(error != CACHE_FPL_FAILED); 4156 cache_fpl_smr_assert_not_entered(fpl); 4157 fpl->status = CACHE_FPL_STATUS_HANDLED; 4158 fpl->line = line; 4159 fpl->dvp = NULL; 4160 fpl->tvp = NULL; 4161 fpl->savename = false; 4162 return (error); 4163 } 4164 4165 #define cache_fpl_handled_error(x, e) cache_fpl_handled_error_impl((x), (e), __LINE__) 4166 4167 static bool 4168 cache_fpl_terminated(struct cache_fpl *fpl) 4169 { 4170 4171 return (fpl->status != CACHE_FPL_STATUS_UNSET); 4172 } 4173 4174 #define CACHE_FPL_SUPPORTED_CN_FLAGS \ 4175 (NC_NOMAKEENTRY | NC_KEEPPOSENTRY | LOCKLEAF | LOCKPARENT | WANTPARENT | \ 4176 FAILIFEXISTS | FOLLOW | EMPTYPATH | LOCKSHARED | SAVENAME | SAVESTART | \ 4177 WILLBEDIR | ISOPEN | NOMACCHECK | AUDITVNODE1 | AUDITVNODE2 | NOCAPCHECK | \ 4178 OPENREAD | OPENWRITE) 4179 4180 #define CACHE_FPL_INTERNAL_CN_FLAGS \ 4181 (ISDOTDOT | MAKEENTRY | ISLASTCN) 4182 4183 _Static_assert((CACHE_FPL_SUPPORTED_CN_FLAGS & CACHE_FPL_INTERNAL_CN_FLAGS) == 0, 4184 "supported and internal flags overlap"); 4185 4186 static bool 4187 cache_fpl_islastcn(struct nameidata *ndp) 4188 { 4189 4190 return (*ndp->ni_next == 0); 4191 } 4192 4193 static bool 4194 cache_fpl_istrailingslash(struct cache_fpl *fpl) 4195 { 4196 4197 MPASS(fpl->nulchar > fpl->cnp->cn_pnbuf); 4198 return (*(fpl->nulchar - 1) == '/'); 4199 } 4200 4201 static bool 4202 cache_fpl_isdotdot(struct componentname *cnp) 4203 { 4204 4205 if (cnp->cn_namelen == 2 && 4206 cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') 4207 return (true); 4208 return (false); 4209 } 4210 4211 static bool 4212 cache_can_fplookup(struct cache_fpl *fpl) 4213 { 4214 struct nameidata *ndp; 4215 struct componentname *cnp; 4216 struct thread *td; 4217 4218 ndp = fpl->ndp; 4219 cnp = fpl->cnp; 4220 td = curthread; 4221 4222 if (!atomic_load_char(&cache_fast_lookup_enabled)) { 4223 cache_fpl_aborted_early(fpl); 4224 return (false); 4225 } 4226 if ((cnp->cn_flags & ~CACHE_FPL_SUPPORTED_CN_FLAGS) != 0) { 4227 cache_fpl_aborted_early(fpl); 4228 return (false); 4229 } 4230 if (IN_CAPABILITY_MODE(td)) { 4231 cache_fpl_aborted_early(fpl); 4232 return (false); 4233 } 4234 if (AUDITING_TD(td)) { 4235 cache_fpl_aborted_early(fpl); 4236 return (false); 4237 } 4238 if (ndp->ni_startdir != NULL) { 4239 cache_fpl_aborted_early(fpl); 4240 return (false); 4241 } 4242 return (true); 4243 } 4244 4245 static int __noinline 4246 cache_fplookup_dirfd(struct cache_fpl *fpl, struct vnode **vpp) 4247 { 4248 struct nameidata *ndp; 4249 struct componentname *cnp; 4250 int error; 4251 bool fsearch; 4252 4253 ndp = fpl->ndp; 4254 cnp = fpl->cnp; 4255 4256 error = fgetvp_lookup_smr(ndp->ni_dirfd, ndp, vpp, &fsearch); 4257 if (__predict_false(error != 0)) { 4258 return (cache_fpl_aborted(fpl)); 4259 } 4260 fpl->fsearch = fsearch; 4261 if ((*vpp)->v_type != VDIR) { 4262 if (!((cnp->cn_flags & EMPTYPATH) != 0 && cnp->cn_pnbuf[0] == '\0')) { 4263 cache_fpl_smr_exit(fpl); 4264 return (cache_fpl_handled_error(fpl, ENOTDIR)); 4265 } 4266 } 4267 return (0); 4268 } 4269 4270 static int __noinline 4271 cache_fplookup_negative_promote(struct cache_fpl *fpl, struct namecache *oncp, 4272 uint32_t hash) 4273 { 4274 struct componentname *cnp; 4275 struct vnode *dvp; 4276 4277 cnp = fpl->cnp; 4278 dvp = fpl->dvp; 4279 4280 cache_fpl_smr_exit(fpl); 4281 if (cache_neg_promote_cond(dvp, cnp, oncp, hash)) 4282 return (cache_fpl_handled_error(fpl, ENOENT)); 4283 else 4284 return (cache_fpl_aborted(fpl)); 4285 } 4286 4287 /* 4288 * The target vnode is not supported, prepare for the slow path to take over. 4289 */ 4290 static int __noinline 4291 cache_fplookup_partial_setup(struct cache_fpl *fpl) 4292 { 4293 struct nameidata *ndp; 4294 struct componentname *cnp; 4295 enum vgetstate dvs; 4296 struct vnode *dvp; 4297 struct pwd *pwd; 4298 seqc_t dvp_seqc; 4299 4300 ndp = fpl->ndp; 4301 cnp = fpl->cnp; 4302 pwd = *(fpl->pwd); 4303 dvp = fpl->dvp; 4304 dvp_seqc = fpl->dvp_seqc; 4305 4306 if (!pwd_hold_smr(pwd)) { 4307 return (cache_fpl_aborted(fpl)); 4308 } 4309 4310 /* 4311 * Note that seqc is checked before the vnode is locked, so by 4312 * the time regular lookup gets to it it may have moved. 4313 * 4314 * Ultimately this does not affect correctness, any lookup errors 4315 * are userspace racing with itself. It is guaranteed that any 4316 * path which ultimately gets found could also have been found 4317 * by regular lookup going all the way in absence of concurrent 4318 * modifications. 4319 */ 4320 dvs = vget_prep_smr(dvp); 4321 cache_fpl_smr_exit(fpl); 4322 if (__predict_false(dvs == VGET_NONE)) { 4323 pwd_drop(pwd); 4324 return (cache_fpl_aborted(fpl)); 4325 } 4326 4327 vget_finish_ref(dvp, dvs); 4328 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4329 vrele(dvp); 4330 pwd_drop(pwd); 4331 return (cache_fpl_aborted(fpl)); 4332 } 4333 4334 cache_fpl_restore_partial(fpl); 4335 #ifdef INVARIANTS 4336 if (cnp->cn_nameptr != fpl->snd.cn_nameptr) { 4337 panic("%s: cn_nameptr mismatch (%p != %p) full [%s]\n", __func__, 4338 cnp->cn_nameptr, fpl->snd.cn_nameptr, cnp->cn_pnbuf); 4339 } 4340 #endif 4341 4342 ndp->ni_startdir = dvp; 4343 cnp->cn_flags |= MAKEENTRY; 4344 if (cache_fpl_islastcn(ndp)) 4345 cnp->cn_flags |= ISLASTCN; 4346 if (cache_fpl_isdotdot(cnp)) 4347 cnp->cn_flags |= ISDOTDOT; 4348 4349 /* 4350 * Skip potential extra slashes parsing did not take care of. 4351 * cache_fplookup_skip_slashes explains the mechanism. 4352 */ 4353 if (__predict_false(*(cnp->cn_nameptr) == '/')) { 4354 do { 4355 cnp->cn_nameptr++; 4356 cache_fpl_pathlen_dec(fpl); 4357 } while (*(cnp->cn_nameptr) == '/'); 4358 } 4359 4360 ndp->ni_pathlen = fpl->nulchar - cnp->cn_nameptr + 1; 4361 #ifdef INVARIANTS 4362 if (ndp->ni_pathlen != fpl->debug.ni_pathlen) { 4363 panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", 4364 __func__, ndp->ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, 4365 cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); 4366 } 4367 #endif 4368 return (0); 4369 } 4370 4371 static int 4372 cache_fplookup_final_child(struct cache_fpl *fpl, enum vgetstate tvs) 4373 { 4374 struct componentname *cnp; 4375 struct vnode *tvp; 4376 seqc_t tvp_seqc; 4377 int error, lkflags; 4378 4379 cnp = fpl->cnp; 4380 tvp = fpl->tvp; 4381 tvp_seqc = fpl->tvp_seqc; 4382 4383 if ((cnp->cn_flags & LOCKLEAF) != 0) { 4384 lkflags = LK_SHARED; 4385 if ((cnp->cn_flags & LOCKSHARED) == 0) 4386 lkflags = LK_EXCLUSIVE; 4387 error = vget_finish(tvp, lkflags, tvs); 4388 if (__predict_false(error != 0)) { 4389 return (cache_fpl_aborted(fpl)); 4390 } 4391 } else { 4392 vget_finish_ref(tvp, tvs); 4393 } 4394 4395 if (!vn_seqc_consistent(tvp, tvp_seqc)) { 4396 if ((cnp->cn_flags & LOCKLEAF) != 0) 4397 vput(tvp); 4398 else 4399 vrele(tvp); 4400 return (cache_fpl_aborted(fpl)); 4401 } 4402 4403 return (cache_fpl_handled(fpl)); 4404 } 4405 4406 /* 4407 * They want to possibly modify the state of the namecache. 4408 */ 4409 static int __noinline 4410 cache_fplookup_final_modifying(struct cache_fpl *fpl) 4411 { 4412 struct nameidata *ndp; 4413 struct componentname *cnp; 4414 enum vgetstate dvs; 4415 struct vnode *dvp, *tvp; 4416 struct mount *mp; 4417 seqc_t dvp_seqc; 4418 int error; 4419 bool docache; 4420 4421 ndp = fpl->ndp; 4422 cnp = fpl->cnp; 4423 dvp = fpl->dvp; 4424 dvp_seqc = fpl->dvp_seqc; 4425 4426 MPASS(*(cnp->cn_nameptr) != '/'); 4427 MPASS(cache_fpl_islastcn(ndp)); 4428 if ((cnp->cn_flags & LOCKPARENT) == 0) 4429 MPASS((cnp->cn_flags & WANTPARENT) != 0); 4430 MPASS((cnp->cn_flags & TRAILINGSLASH) == 0); 4431 MPASS(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == DELETE || 4432 cnp->cn_nameiop == RENAME); 4433 MPASS((cnp->cn_flags & MAKEENTRY) == 0); 4434 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 4435 4436 docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; 4437 if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) 4438 docache = false; 4439 4440 /* 4441 * Regular lookup nulifies the slash, which we don't do here. 4442 * Don't take chances with filesystem routines seeing it for 4443 * the last entry. 4444 */ 4445 if (cache_fpl_istrailingslash(fpl)) { 4446 return (cache_fpl_partial(fpl)); 4447 } 4448 4449 mp = atomic_load_ptr(&dvp->v_mount); 4450 if (__predict_false(mp == NULL)) { 4451 return (cache_fpl_aborted(fpl)); 4452 } 4453 4454 if (__predict_false(mp->mnt_flag & MNT_RDONLY)) { 4455 cache_fpl_smr_exit(fpl); 4456 /* 4457 * Original code keeps not checking for CREATE which 4458 * might be a bug. For now let the old lookup decide. 4459 */ 4460 if (cnp->cn_nameiop == CREATE) { 4461 return (cache_fpl_aborted(fpl)); 4462 } 4463 return (cache_fpl_handled_error(fpl, EROFS)); 4464 } 4465 4466 if (fpl->tvp != NULL && (cnp->cn_flags & FAILIFEXISTS) != 0) { 4467 cache_fpl_smr_exit(fpl); 4468 return (cache_fpl_handled_error(fpl, EEXIST)); 4469 } 4470 4471 /* 4472 * Secure access to dvp; check cache_fplookup_partial_setup for 4473 * reasoning. 4474 * 4475 * XXX At least UFS requires its lookup routine to be called for 4476 * the last path component, which leads to some level of complication 4477 * and inefficiency: 4478 * - the target routine always locks the target vnode, but our caller 4479 * may not need it locked 4480 * - some of the VOP machinery asserts that the parent is locked, which 4481 * once more may be not required 4482 * 4483 * TODO: add a flag for filesystems which don't need this. 4484 */ 4485 dvs = vget_prep_smr(dvp); 4486 cache_fpl_smr_exit(fpl); 4487 if (__predict_false(dvs == VGET_NONE)) { 4488 return (cache_fpl_aborted(fpl)); 4489 } 4490 4491 vget_finish_ref(dvp, dvs); 4492 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4493 vrele(dvp); 4494 return (cache_fpl_aborted(fpl)); 4495 } 4496 4497 error = vn_lock(dvp, LK_EXCLUSIVE); 4498 if (__predict_false(error != 0)) { 4499 vrele(dvp); 4500 return (cache_fpl_aborted(fpl)); 4501 } 4502 4503 tvp = NULL; 4504 cnp->cn_flags |= ISLASTCN; 4505 if (docache) 4506 cnp->cn_flags |= MAKEENTRY; 4507 if (cache_fpl_isdotdot(cnp)) 4508 cnp->cn_flags |= ISDOTDOT; 4509 cnp->cn_lkflags = LK_EXCLUSIVE; 4510 error = VOP_LOOKUP(dvp, &tvp, cnp); 4511 switch (error) { 4512 case EJUSTRETURN: 4513 case 0: 4514 break; 4515 case ENOTDIR: 4516 case ENOENT: 4517 vput(dvp); 4518 return (cache_fpl_handled_error(fpl, error)); 4519 default: 4520 vput(dvp); 4521 return (cache_fpl_aborted(fpl)); 4522 } 4523 4524 fpl->tvp = tvp; 4525 fpl->savename = (cnp->cn_flags & SAVENAME) != 0; 4526 4527 if (tvp == NULL) { 4528 if ((cnp->cn_flags & SAVESTART) != 0) { 4529 ndp->ni_startdir = dvp; 4530 vrefact(ndp->ni_startdir); 4531 cnp->cn_flags |= SAVENAME; 4532 fpl->savename = true; 4533 } 4534 MPASS(error == EJUSTRETURN); 4535 if ((cnp->cn_flags & LOCKPARENT) == 0) { 4536 VOP_UNLOCK(dvp); 4537 } 4538 return (cache_fpl_handled(fpl)); 4539 } 4540 4541 /* 4542 * There are very hairy corner cases concerning various flag combinations 4543 * and locking state. In particular here we only hold one lock instead of 4544 * two. 4545 * 4546 * Skip the complexity as it is of no significance for normal workloads. 4547 */ 4548 if (__predict_false(tvp == dvp)) { 4549 vput(dvp); 4550 vrele(tvp); 4551 return (cache_fpl_aborted(fpl)); 4552 } 4553 4554 /* 4555 * If they want the symlink itself we are fine, but if they want to 4556 * follow it regular lookup has to be engaged. 4557 */ 4558 if (tvp->v_type == VLNK) { 4559 if ((cnp->cn_flags & FOLLOW) != 0) { 4560 vput(dvp); 4561 vput(tvp); 4562 return (cache_fpl_aborted(fpl)); 4563 } 4564 } 4565 4566 /* 4567 * Since we expect this to be the terminal vnode it should almost never 4568 * be a mount point. 4569 */ 4570 if (__predict_false(cache_fplookup_is_mp(fpl))) { 4571 vput(dvp); 4572 vput(tvp); 4573 return (cache_fpl_aborted(fpl)); 4574 } 4575 4576 if ((cnp->cn_flags & FAILIFEXISTS) != 0) { 4577 vput(dvp); 4578 vput(tvp); 4579 return (cache_fpl_handled_error(fpl, EEXIST)); 4580 } 4581 4582 if ((cnp->cn_flags & LOCKLEAF) == 0) { 4583 VOP_UNLOCK(tvp); 4584 } 4585 4586 if ((cnp->cn_flags & LOCKPARENT) == 0) { 4587 VOP_UNLOCK(dvp); 4588 } 4589 4590 if ((cnp->cn_flags & SAVESTART) != 0) { 4591 ndp->ni_startdir = dvp; 4592 vrefact(ndp->ni_startdir); 4593 cnp->cn_flags |= SAVENAME; 4594 fpl->savename = true; 4595 } 4596 4597 return (cache_fpl_handled(fpl)); 4598 } 4599 4600 static int __noinline 4601 cache_fplookup_modifying(struct cache_fpl *fpl) 4602 { 4603 struct nameidata *ndp; 4604 4605 ndp = fpl->ndp; 4606 4607 if (!cache_fpl_islastcn(ndp)) { 4608 return (cache_fpl_partial(fpl)); 4609 } 4610 return (cache_fplookup_final_modifying(fpl)); 4611 } 4612 4613 static int __noinline 4614 cache_fplookup_final_withparent(struct cache_fpl *fpl) 4615 { 4616 struct componentname *cnp; 4617 enum vgetstate dvs, tvs; 4618 struct vnode *dvp, *tvp; 4619 seqc_t dvp_seqc; 4620 int error; 4621 4622 cnp = fpl->cnp; 4623 dvp = fpl->dvp; 4624 dvp_seqc = fpl->dvp_seqc; 4625 tvp = fpl->tvp; 4626 4627 MPASS((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0); 4628 4629 /* 4630 * This is less efficient than it can be for simplicity. 4631 */ 4632 dvs = vget_prep_smr(dvp); 4633 if (__predict_false(dvs == VGET_NONE)) { 4634 return (cache_fpl_aborted(fpl)); 4635 } 4636 tvs = vget_prep_smr(tvp); 4637 if (__predict_false(tvs == VGET_NONE)) { 4638 cache_fpl_smr_exit(fpl); 4639 vget_abort(dvp, dvs); 4640 return (cache_fpl_aborted(fpl)); 4641 } 4642 4643 cache_fpl_smr_exit(fpl); 4644 4645 if ((cnp->cn_flags & LOCKPARENT) != 0) { 4646 error = vget_finish(dvp, LK_EXCLUSIVE, dvs); 4647 if (__predict_false(error != 0)) { 4648 vget_abort(tvp, tvs); 4649 return (cache_fpl_aborted(fpl)); 4650 } 4651 } else { 4652 vget_finish_ref(dvp, dvs); 4653 } 4654 4655 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4656 vget_abort(tvp, tvs); 4657 if ((cnp->cn_flags & LOCKPARENT) != 0) 4658 vput(dvp); 4659 else 4660 vrele(dvp); 4661 return (cache_fpl_aborted(fpl)); 4662 } 4663 4664 error = cache_fplookup_final_child(fpl, tvs); 4665 if (__predict_false(error != 0)) { 4666 MPASS(fpl->status == CACHE_FPL_STATUS_ABORTED || 4667 fpl->status == CACHE_FPL_STATUS_DESTROYED); 4668 if ((cnp->cn_flags & LOCKPARENT) != 0) 4669 vput(dvp); 4670 else 4671 vrele(dvp); 4672 return (error); 4673 } 4674 4675 MPASS(fpl->status == CACHE_FPL_STATUS_HANDLED); 4676 return (0); 4677 } 4678 4679 static int 4680 cache_fplookup_final(struct cache_fpl *fpl) 4681 { 4682 struct componentname *cnp; 4683 enum vgetstate tvs; 4684 struct vnode *dvp, *tvp; 4685 seqc_t dvp_seqc; 4686 4687 cnp = fpl->cnp; 4688 dvp = fpl->dvp; 4689 dvp_seqc = fpl->dvp_seqc; 4690 tvp = fpl->tvp; 4691 4692 MPASS(*(cnp->cn_nameptr) != '/'); 4693 4694 if (cnp->cn_nameiop != LOOKUP) { 4695 return (cache_fplookup_final_modifying(fpl)); 4696 } 4697 4698 if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) 4699 return (cache_fplookup_final_withparent(fpl)); 4700 4701 tvs = vget_prep_smr(tvp); 4702 if (__predict_false(tvs == VGET_NONE)) { 4703 return (cache_fpl_partial(fpl)); 4704 } 4705 4706 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4707 cache_fpl_smr_exit(fpl); 4708 vget_abort(tvp, tvs); 4709 return (cache_fpl_aborted(fpl)); 4710 } 4711 4712 cache_fpl_smr_exit(fpl); 4713 return (cache_fplookup_final_child(fpl, tvs)); 4714 } 4715 4716 /* 4717 * Comment from locked lookup: 4718 * Check for degenerate name (e.g. / or "") which is a way of talking about a 4719 * directory, e.g. like "/." or ".". 4720 */ 4721 static int __noinline 4722 cache_fplookup_degenerate(struct cache_fpl *fpl) 4723 { 4724 struct componentname *cnp; 4725 struct vnode *dvp; 4726 enum vgetstate dvs; 4727 int error, lkflags; 4728 #ifdef INVARIANTS 4729 char *cp; 4730 #endif 4731 4732 fpl->tvp = fpl->dvp; 4733 fpl->tvp_seqc = fpl->dvp_seqc; 4734 4735 cnp = fpl->cnp; 4736 dvp = fpl->dvp; 4737 4738 #ifdef INVARIANTS 4739 for (cp = cnp->cn_pnbuf; *cp != '\0'; cp++) { 4740 KASSERT(*cp == '/', 4741 ("%s: encountered non-slash; string [%s]\n", __func__, 4742 cnp->cn_pnbuf)); 4743 } 4744 #endif 4745 4746 if (__predict_false(cnp->cn_nameiop != LOOKUP)) { 4747 cache_fpl_smr_exit(fpl); 4748 return (cache_fpl_handled_error(fpl, EISDIR)); 4749 } 4750 4751 MPASS((cnp->cn_flags & SAVESTART) == 0); 4752 4753 if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) { 4754 return (cache_fplookup_final_withparent(fpl)); 4755 } 4756 4757 dvs = vget_prep_smr(dvp); 4758 cache_fpl_smr_exit(fpl); 4759 if (__predict_false(dvs == VGET_NONE)) { 4760 return (cache_fpl_aborted(fpl)); 4761 } 4762 4763 if ((cnp->cn_flags & LOCKLEAF) != 0) { 4764 lkflags = LK_SHARED; 4765 if ((cnp->cn_flags & LOCKSHARED) == 0) 4766 lkflags = LK_EXCLUSIVE; 4767 error = vget_finish(dvp, lkflags, dvs); 4768 if (__predict_false(error != 0)) { 4769 return (cache_fpl_aborted(fpl)); 4770 } 4771 } else { 4772 vget_finish_ref(dvp, dvs); 4773 } 4774 return (cache_fpl_handled(fpl)); 4775 } 4776 4777 static int __noinline 4778 cache_fplookup_emptypath(struct cache_fpl *fpl) 4779 { 4780 struct nameidata *ndp; 4781 struct componentname *cnp; 4782 enum vgetstate tvs; 4783 struct vnode *tvp; 4784 int error, lkflags; 4785 4786 fpl->tvp = fpl->dvp; 4787 fpl->tvp_seqc = fpl->dvp_seqc; 4788 4789 ndp = fpl->ndp; 4790 cnp = fpl->cnp; 4791 tvp = fpl->tvp; 4792 4793 MPASS(*cnp->cn_pnbuf == '\0'); 4794 4795 if (__predict_false((cnp->cn_flags & EMPTYPATH) == 0)) { 4796 cache_fpl_smr_exit(fpl); 4797 return (cache_fpl_handled_error(fpl, ENOENT)); 4798 } 4799 4800 MPASS((cnp->cn_flags & (LOCKPARENT | WANTPARENT)) == 0); 4801 4802 tvs = vget_prep_smr(tvp); 4803 cache_fpl_smr_exit(fpl); 4804 if (__predict_false(tvs == VGET_NONE)) { 4805 return (cache_fpl_aborted(fpl)); 4806 } 4807 4808 if ((cnp->cn_flags & LOCKLEAF) != 0) { 4809 lkflags = LK_SHARED; 4810 if ((cnp->cn_flags & LOCKSHARED) == 0) 4811 lkflags = LK_EXCLUSIVE; 4812 error = vget_finish(tvp, lkflags, tvs); 4813 if (__predict_false(error != 0)) { 4814 return (cache_fpl_aborted(fpl)); 4815 } 4816 } else { 4817 vget_finish_ref(tvp, tvs); 4818 } 4819 4820 ndp->ni_resflags |= NIRES_EMPTYPATH; 4821 return (cache_fpl_handled(fpl)); 4822 } 4823 4824 static int __noinline 4825 cache_fplookup_noentry(struct cache_fpl *fpl) 4826 { 4827 struct nameidata *ndp; 4828 struct componentname *cnp; 4829 enum vgetstate dvs; 4830 struct vnode *dvp, *tvp; 4831 seqc_t dvp_seqc; 4832 int error; 4833 bool docache; 4834 4835 ndp = fpl->ndp; 4836 cnp = fpl->cnp; 4837 dvp = fpl->dvp; 4838 dvp_seqc = fpl->dvp_seqc; 4839 4840 MPASS((cnp->cn_flags & MAKEENTRY) == 0); 4841 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 4842 MPASS(!cache_fpl_isdotdot(cnp)); 4843 4844 /* 4845 * Hack: delayed name len checking. 4846 */ 4847 if (__predict_false(cnp->cn_namelen > NAME_MAX)) { 4848 cache_fpl_smr_exit(fpl); 4849 return (cache_fpl_handled_error(fpl, ENAMETOOLONG)); 4850 } 4851 4852 if (cnp->cn_nameptr[0] == '/') { 4853 return (cache_fplookup_skip_slashes(fpl)); 4854 } 4855 4856 if (cnp->cn_pnbuf[0] == '\0') { 4857 return (cache_fplookup_emptypath(fpl)); 4858 } 4859 4860 if (cnp->cn_nameptr[0] == '\0') { 4861 if (fpl->tvp == NULL) { 4862 return (cache_fplookup_degenerate(fpl)); 4863 } 4864 return (cache_fplookup_trailingslash(fpl)); 4865 } 4866 4867 if (cnp->cn_nameiop != LOOKUP) { 4868 fpl->tvp = NULL; 4869 return (cache_fplookup_modifying(fpl)); 4870 } 4871 4872 MPASS((cnp->cn_flags & SAVESTART) == 0); 4873 4874 /* 4875 * Only try to fill in the component if it is the last one, 4876 * otherwise not only there may be several to handle but the 4877 * walk may be complicated. 4878 */ 4879 if (!cache_fpl_islastcn(ndp)) { 4880 return (cache_fpl_partial(fpl)); 4881 } 4882 4883 /* 4884 * Regular lookup nulifies the slash, which we don't do here. 4885 * Don't take chances with filesystem routines seeing it for 4886 * the last entry. 4887 */ 4888 if (cache_fpl_istrailingslash(fpl)) { 4889 return (cache_fpl_partial(fpl)); 4890 } 4891 4892 /* 4893 * Secure access to dvp; check cache_fplookup_partial_setup for 4894 * reasoning. 4895 */ 4896 dvs = vget_prep_smr(dvp); 4897 cache_fpl_smr_exit(fpl); 4898 if (__predict_false(dvs == VGET_NONE)) { 4899 return (cache_fpl_aborted(fpl)); 4900 } 4901 4902 vget_finish_ref(dvp, dvs); 4903 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4904 vrele(dvp); 4905 return (cache_fpl_aborted(fpl)); 4906 } 4907 4908 error = vn_lock(dvp, LK_SHARED); 4909 if (__predict_false(error != 0)) { 4910 vrele(dvp); 4911 return (cache_fpl_aborted(fpl)); 4912 } 4913 4914 tvp = NULL; 4915 /* 4916 * TODO: provide variants which don't require locking either vnode. 4917 */ 4918 cnp->cn_flags |= ISLASTCN; 4919 docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; 4920 if (docache) 4921 cnp->cn_flags |= MAKEENTRY; 4922 cnp->cn_lkflags = LK_SHARED; 4923 if ((cnp->cn_flags & LOCKSHARED) == 0) { 4924 cnp->cn_lkflags = LK_EXCLUSIVE; 4925 } 4926 error = VOP_LOOKUP(dvp, &tvp, cnp); 4927 switch (error) { 4928 case EJUSTRETURN: 4929 case 0: 4930 break; 4931 case ENOTDIR: 4932 case ENOENT: 4933 vput(dvp); 4934 return (cache_fpl_handled_error(fpl, error)); 4935 default: 4936 vput(dvp); 4937 return (cache_fpl_aborted(fpl)); 4938 } 4939 4940 fpl->tvp = tvp; 4941 if (!fpl->savename) { 4942 MPASS((cnp->cn_flags & SAVENAME) == 0); 4943 } 4944 4945 if (tvp == NULL) { 4946 MPASS(error == EJUSTRETURN); 4947 if ((cnp->cn_flags & (WANTPARENT | LOCKPARENT)) == 0) { 4948 vput(dvp); 4949 } else if ((cnp->cn_flags & LOCKPARENT) == 0) { 4950 VOP_UNLOCK(dvp); 4951 } 4952 return (cache_fpl_handled(fpl)); 4953 } 4954 4955 if (tvp->v_type == VLNK) { 4956 if ((cnp->cn_flags & FOLLOW) != 0) { 4957 vput(dvp); 4958 vput(tvp); 4959 return (cache_fpl_aborted(fpl)); 4960 } 4961 } 4962 4963 if (__predict_false(cache_fplookup_is_mp(fpl))) { 4964 vput(dvp); 4965 vput(tvp); 4966 return (cache_fpl_aborted(fpl)); 4967 } 4968 4969 if ((cnp->cn_flags & LOCKLEAF) == 0) { 4970 VOP_UNLOCK(tvp); 4971 } 4972 4973 if ((cnp->cn_flags & (WANTPARENT | LOCKPARENT)) == 0) { 4974 vput(dvp); 4975 } else if ((cnp->cn_flags & LOCKPARENT) == 0) { 4976 VOP_UNLOCK(dvp); 4977 } 4978 return (cache_fpl_handled(fpl)); 4979 } 4980 4981 static int __noinline 4982 cache_fplookup_dot(struct cache_fpl *fpl) 4983 { 4984 int error; 4985 4986 MPASS(!seqc_in_modify(fpl->dvp_seqc)); 4987 /* 4988 * Just re-assign the value. seqc will be checked later for the first 4989 * non-dot path component in line and/or before deciding to return the 4990 * vnode. 4991 */ 4992 fpl->tvp = fpl->dvp; 4993 fpl->tvp_seqc = fpl->dvp_seqc; 4994 4995 counter_u64_add(dothits, 1); 4996 SDT_PROBE3(vfs, namecache, lookup, hit, fpl->dvp, ".", fpl->dvp); 4997 4998 error = 0; 4999 if (cache_fplookup_is_mp(fpl)) { 5000 error = cache_fplookup_cross_mount(fpl); 5001 } 5002 return (error); 5003 } 5004 5005 static int __noinline 5006 cache_fplookup_dotdot(struct cache_fpl *fpl) 5007 { 5008 struct nameidata *ndp; 5009 struct componentname *cnp; 5010 struct namecache *ncp; 5011 struct vnode *dvp; 5012 struct prison *pr; 5013 u_char nc_flag; 5014 5015 ndp = fpl->ndp; 5016 cnp = fpl->cnp; 5017 dvp = fpl->dvp; 5018 5019 MPASS(cache_fpl_isdotdot(cnp)); 5020 5021 /* 5022 * XXX this is racy the same way regular lookup is 5023 */ 5024 for (pr = cnp->cn_cred->cr_prison; pr != NULL; 5025 pr = pr->pr_parent) 5026 if (dvp == pr->pr_root) 5027 break; 5028 5029 if (dvp == ndp->ni_rootdir || 5030 dvp == ndp->ni_topdir || 5031 dvp == rootvnode || 5032 pr != NULL) { 5033 fpl->tvp = dvp; 5034 fpl->tvp_seqc = vn_seqc_read_any(dvp); 5035 if (seqc_in_modify(fpl->tvp_seqc)) { 5036 return (cache_fpl_aborted(fpl)); 5037 } 5038 return (0); 5039 } 5040 5041 if ((dvp->v_vflag & VV_ROOT) != 0) { 5042 /* 5043 * TODO 5044 * The opposite of climb mount is needed here. 5045 */ 5046 return (cache_fpl_partial(fpl)); 5047 } 5048 5049 ncp = atomic_load_consume_ptr(&dvp->v_cache_dd); 5050 if (ncp == NULL) { 5051 return (cache_fpl_aborted(fpl)); 5052 } 5053 5054 nc_flag = atomic_load_char(&ncp->nc_flag); 5055 if ((nc_flag & NCF_ISDOTDOT) != 0) { 5056 if ((nc_flag & NCF_NEGATIVE) != 0) 5057 return (cache_fpl_aborted(fpl)); 5058 fpl->tvp = ncp->nc_vp; 5059 } else { 5060 fpl->tvp = ncp->nc_dvp; 5061 } 5062 5063 fpl->tvp_seqc = vn_seqc_read_any(fpl->tvp); 5064 if (seqc_in_modify(fpl->tvp_seqc)) { 5065 return (cache_fpl_partial(fpl)); 5066 } 5067 5068 /* 5069 * Acquire fence provided by vn_seqc_read_any above. 5070 */ 5071 if (__predict_false(atomic_load_ptr(&dvp->v_cache_dd) != ncp)) { 5072 return (cache_fpl_aborted(fpl)); 5073 } 5074 5075 if (!cache_ncp_canuse(ncp)) { 5076 return (cache_fpl_aborted(fpl)); 5077 } 5078 5079 counter_u64_add(dotdothits, 1); 5080 return (0); 5081 } 5082 5083 static int __noinline 5084 cache_fplookup_neg(struct cache_fpl *fpl, struct namecache *ncp, uint32_t hash) 5085 { 5086 u_char nc_flag __diagused; 5087 bool neg_promote; 5088 5089 #ifdef INVARIANTS 5090 nc_flag = atomic_load_char(&ncp->nc_flag); 5091 MPASS((nc_flag & NCF_NEGATIVE) != 0); 5092 #endif 5093 /* 5094 * If they want to create an entry we need to replace this one. 5095 */ 5096 if (__predict_false(fpl->cnp->cn_nameiop != LOOKUP)) { 5097 fpl->tvp = NULL; 5098 return (cache_fplookup_modifying(fpl)); 5099 } 5100 neg_promote = cache_neg_hit_prep(ncp); 5101 if (!cache_fpl_neg_ncp_canuse(ncp)) { 5102 cache_neg_hit_abort(ncp); 5103 return (cache_fpl_partial(fpl)); 5104 } 5105 if (neg_promote) { 5106 return (cache_fplookup_negative_promote(fpl, ncp, hash)); 5107 } 5108 cache_neg_hit_finish(ncp); 5109 cache_fpl_smr_exit(fpl); 5110 return (cache_fpl_handled_error(fpl, ENOENT)); 5111 } 5112 5113 /* 5114 * Resolve a symlink. Called by filesystem-specific routines. 5115 * 5116 * Code flow is: 5117 * ... -> cache_fplookup_symlink -> VOP_FPLOOKUP_SYMLINK -> cache_symlink_resolve 5118 */ 5119 int 5120 cache_symlink_resolve(struct cache_fpl *fpl, const char *string, size_t len) 5121 { 5122 struct nameidata *ndp; 5123 struct componentname *cnp; 5124 size_t adjust; 5125 5126 ndp = fpl->ndp; 5127 cnp = fpl->cnp; 5128 5129 if (__predict_false(len == 0)) { 5130 return (ENOENT); 5131 } 5132 5133 if (__predict_false(len > MAXPATHLEN - 2)) { 5134 if (cache_fpl_istrailingslash(fpl)) { 5135 return (EAGAIN); 5136 } 5137 } 5138 5139 ndp->ni_pathlen = fpl->nulchar - cnp->cn_nameptr - cnp->cn_namelen + 1; 5140 #ifdef INVARIANTS 5141 if (ndp->ni_pathlen != fpl->debug.ni_pathlen) { 5142 panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", 5143 __func__, ndp->ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, 5144 cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); 5145 } 5146 #endif 5147 5148 if (__predict_false(len + ndp->ni_pathlen > MAXPATHLEN)) { 5149 return (ENAMETOOLONG); 5150 } 5151 5152 if (__predict_false(ndp->ni_loopcnt++ >= MAXSYMLINKS)) { 5153 return (ELOOP); 5154 } 5155 5156 adjust = len; 5157 if (ndp->ni_pathlen > 1) { 5158 bcopy(ndp->ni_next, cnp->cn_pnbuf + len, ndp->ni_pathlen); 5159 } else { 5160 if (cache_fpl_istrailingslash(fpl)) { 5161 adjust = len + 1; 5162 cnp->cn_pnbuf[len] = '/'; 5163 cnp->cn_pnbuf[len + 1] = '\0'; 5164 } else { 5165 cnp->cn_pnbuf[len] = '\0'; 5166 } 5167 } 5168 bcopy(string, cnp->cn_pnbuf, len); 5169 5170 ndp->ni_pathlen += adjust; 5171 cache_fpl_pathlen_add(fpl, adjust); 5172 cnp->cn_nameptr = cnp->cn_pnbuf; 5173 fpl->nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; 5174 fpl->tvp = NULL; 5175 return (0); 5176 } 5177 5178 static int __noinline 5179 cache_fplookup_symlink(struct cache_fpl *fpl) 5180 { 5181 struct mount *mp; 5182 struct nameidata *ndp; 5183 struct componentname *cnp; 5184 struct vnode *dvp, *tvp; 5185 int error; 5186 5187 ndp = fpl->ndp; 5188 cnp = fpl->cnp; 5189 dvp = fpl->dvp; 5190 tvp = fpl->tvp; 5191 5192 if (cache_fpl_islastcn(ndp)) { 5193 if ((cnp->cn_flags & FOLLOW) == 0) { 5194 return (cache_fplookup_final(fpl)); 5195 } 5196 } 5197 5198 mp = atomic_load_ptr(&dvp->v_mount); 5199 if (__predict_false(mp == NULL)) { 5200 return (cache_fpl_aborted(fpl)); 5201 } 5202 5203 /* 5204 * Note this check races against setting the flag just like regular 5205 * lookup. 5206 */ 5207 if (__predict_false((mp->mnt_flag & MNT_NOSYMFOLLOW) != 0)) { 5208 cache_fpl_smr_exit(fpl); 5209 return (cache_fpl_handled_error(fpl, EACCES)); 5210 } 5211 5212 error = VOP_FPLOOKUP_SYMLINK(tvp, fpl); 5213 if (__predict_false(error != 0)) { 5214 switch (error) { 5215 case EAGAIN: 5216 return (cache_fpl_partial(fpl)); 5217 case ENOENT: 5218 case ENAMETOOLONG: 5219 case ELOOP: 5220 cache_fpl_smr_exit(fpl); 5221 return (cache_fpl_handled_error(fpl, error)); 5222 default: 5223 return (cache_fpl_aborted(fpl)); 5224 } 5225 } 5226 5227 if (*(cnp->cn_nameptr) == '/') { 5228 fpl->dvp = cache_fpl_handle_root(fpl); 5229 fpl->dvp_seqc = vn_seqc_read_any(fpl->dvp); 5230 if (seqc_in_modify(fpl->dvp_seqc)) { 5231 return (cache_fpl_aborted(fpl)); 5232 } 5233 /* 5234 * The main loop assumes that ->dvp points to a vnode belonging 5235 * to a filesystem which can do lockless lookup, but the absolute 5236 * symlink can be wandering off to one which does not. 5237 */ 5238 mp = atomic_load_ptr(&fpl->dvp->v_mount); 5239 if (__predict_false(mp == NULL)) { 5240 return (cache_fpl_aborted(fpl)); 5241 } 5242 if (!cache_fplookup_mp_supported(mp)) { 5243 cache_fpl_checkpoint(fpl); 5244 return (cache_fpl_partial(fpl)); 5245 } 5246 } 5247 return (0); 5248 } 5249 5250 static int 5251 cache_fplookup_next(struct cache_fpl *fpl) 5252 { 5253 struct componentname *cnp; 5254 struct namecache *ncp; 5255 struct vnode *dvp, *tvp; 5256 u_char nc_flag; 5257 uint32_t hash; 5258 int error; 5259 5260 cnp = fpl->cnp; 5261 dvp = fpl->dvp; 5262 hash = fpl->hash; 5263 5264 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 5265 if (cnp->cn_namelen == 1) { 5266 return (cache_fplookup_dot(fpl)); 5267 } 5268 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 5269 return (cache_fplookup_dotdot(fpl)); 5270 } 5271 } 5272 5273 MPASS(!cache_fpl_isdotdot(cnp)); 5274 5275 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 5276 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 5277 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 5278 break; 5279 } 5280 5281 if (__predict_false(ncp == NULL)) { 5282 return (cache_fplookup_noentry(fpl)); 5283 } 5284 5285 tvp = atomic_load_ptr(&ncp->nc_vp); 5286 nc_flag = atomic_load_char(&ncp->nc_flag); 5287 if ((nc_flag & NCF_NEGATIVE) != 0) { 5288 return (cache_fplookup_neg(fpl, ncp, hash)); 5289 } 5290 5291 if (!cache_ncp_canuse(ncp)) { 5292 return (cache_fpl_partial(fpl)); 5293 } 5294 5295 fpl->tvp = tvp; 5296 fpl->tvp_seqc = vn_seqc_read_any(tvp); 5297 if (seqc_in_modify(fpl->tvp_seqc)) { 5298 return (cache_fpl_partial(fpl)); 5299 } 5300 5301 counter_u64_add(numposhits, 1); 5302 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, tvp); 5303 5304 error = 0; 5305 if (cache_fplookup_is_mp(fpl)) { 5306 error = cache_fplookup_cross_mount(fpl); 5307 } 5308 return (error); 5309 } 5310 5311 static bool 5312 cache_fplookup_mp_supported(struct mount *mp) 5313 { 5314 5315 MPASS(mp != NULL); 5316 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) == 0) 5317 return (false); 5318 return (true); 5319 } 5320 5321 /* 5322 * Walk up the mount stack (if any). 5323 * 5324 * Correctness is provided in the following ways: 5325 * - all vnodes are protected from freeing with SMR 5326 * - struct mount objects are type stable making them always safe to access 5327 * - stability of the particular mount is provided by busying it 5328 * - relationship between the vnode which is mounted on and the mount is 5329 * verified with the vnode sequence counter after busying 5330 * - association between root vnode of the mount and the mount is protected 5331 * by busy 5332 * 5333 * From that point on we can read the sequence counter of the root vnode 5334 * and get the next mount on the stack (if any) using the same protection. 5335 * 5336 * By the end of successful walk we are guaranteed the reached state was 5337 * indeed present at least at some point which matches the regular lookup. 5338 */ 5339 static int __noinline 5340 cache_fplookup_climb_mount(struct cache_fpl *fpl) 5341 { 5342 struct mount *mp, *prev_mp; 5343 struct mount_pcpu *mpcpu, *prev_mpcpu; 5344 struct vnode *vp; 5345 seqc_t vp_seqc; 5346 5347 vp = fpl->tvp; 5348 vp_seqc = fpl->tvp_seqc; 5349 5350 VNPASS(vp->v_type == VDIR || vp->v_type == VBAD, vp); 5351 mp = atomic_load_ptr(&vp->v_mountedhere); 5352 if (__predict_false(mp == NULL)) { 5353 return (0); 5354 } 5355 5356 prev_mp = NULL; 5357 for (;;) { 5358 if (!vfs_op_thread_enter_crit(mp, mpcpu)) { 5359 if (prev_mp != NULL) 5360 vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); 5361 return (cache_fpl_partial(fpl)); 5362 } 5363 if (prev_mp != NULL) 5364 vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); 5365 if (!vn_seqc_consistent(vp, vp_seqc)) { 5366 vfs_op_thread_exit_crit(mp, mpcpu); 5367 return (cache_fpl_partial(fpl)); 5368 } 5369 if (!cache_fplookup_mp_supported(mp)) { 5370 vfs_op_thread_exit_crit(mp, mpcpu); 5371 return (cache_fpl_partial(fpl)); 5372 } 5373 vp = atomic_load_ptr(&mp->mnt_rootvnode); 5374 if (vp == NULL) { 5375 vfs_op_thread_exit_crit(mp, mpcpu); 5376 return (cache_fpl_partial(fpl)); 5377 } 5378 vp_seqc = vn_seqc_read_any(vp); 5379 if (seqc_in_modify(vp_seqc)) { 5380 vfs_op_thread_exit_crit(mp, mpcpu); 5381 return (cache_fpl_partial(fpl)); 5382 } 5383 prev_mp = mp; 5384 prev_mpcpu = mpcpu; 5385 mp = atomic_load_ptr(&vp->v_mountedhere); 5386 if (mp == NULL) 5387 break; 5388 } 5389 5390 vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); 5391 fpl->tvp = vp; 5392 fpl->tvp_seqc = vp_seqc; 5393 return (0); 5394 } 5395 5396 static int __noinline 5397 cache_fplookup_cross_mount(struct cache_fpl *fpl) 5398 { 5399 struct mount *mp; 5400 struct mount_pcpu *mpcpu; 5401 struct vnode *vp; 5402 seqc_t vp_seqc; 5403 5404 vp = fpl->tvp; 5405 vp_seqc = fpl->tvp_seqc; 5406 5407 VNPASS(vp->v_type == VDIR || vp->v_type == VBAD, vp); 5408 mp = atomic_load_ptr(&vp->v_mountedhere); 5409 if (__predict_false(mp == NULL)) { 5410 return (0); 5411 } 5412 5413 if (!vfs_op_thread_enter_crit(mp, mpcpu)) { 5414 return (cache_fpl_partial(fpl)); 5415 } 5416 if (!vn_seqc_consistent(vp, vp_seqc)) { 5417 vfs_op_thread_exit_crit(mp, mpcpu); 5418 return (cache_fpl_partial(fpl)); 5419 } 5420 if (!cache_fplookup_mp_supported(mp)) { 5421 vfs_op_thread_exit_crit(mp, mpcpu); 5422 return (cache_fpl_partial(fpl)); 5423 } 5424 vp = atomic_load_ptr(&mp->mnt_rootvnode); 5425 if (__predict_false(vp == NULL)) { 5426 vfs_op_thread_exit_crit(mp, mpcpu); 5427 return (cache_fpl_partial(fpl)); 5428 } 5429 vp_seqc = vn_seqc_read_any(vp); 5430 vfs_op_thread_exit_crit(mp, mpcpu); 5431 if (seqc_in_modify(vp_seqc)) { 5432 return (cache_fpl_partial(fpl)); 5433 } 5434 mp = atomic_load_ptr(&vp->v_mountedhere); 5435 if (__predict_false(mp != NULL)) { 5436 /* 5437 * There are possibly more mount points on top. 5438 * Normally this does not happen so for simplicity just start 5439 * over. 5440 */ 5441 return (cache_fplookup_climb_mount(fpl)); 5442 } 5443 5444 fpl->tvp = vp; 5445 fpl->tvp_seqc = vp_seqc; 5446 return (0); 5447 } 5448 5449 /* 5450 * Check if a vnode is mounted on. 5451 */ 5452 static bool 5453 cache_fplookup_is_mp(struct cache_fpl *fpl) 5454 { 5455 struct vnode *vp; 5456 5457 vp = fpl->tvp; 5458 return ((vn_irflag_read(vp) & VIRF_MOUNTPOINT) != 0); 5459 } 5460 5461 /* 5462 * Parse the path. 5463 * 5464 * The code was originally copy-pasted from regular lookup and despite 5465 * clean ups leaves performance on the table. Any modifications here 5466 * must take into account that in case off fallback the resulting 5467 * nameidata state has to be compatible with the original. 5468 */ 5469 5470 /* 5471 * Debug ni_pathlen tracking. 5472 */ 5473 #ifdef INVARIANTS 5474 static void 5475 cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n) 5476 { 5477 5478 fpl->debug.ni_pathlen += n; 5479 KASSERT(fpl->debug.ni_pathlen <= PATH_MAX, 5480 ("%s: pathlen overflow to %zd\n", __func__, fpl->debug.ni_pathlen)); 5481 } 5482 5483 static void 5484 cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n) 5485 { 5486 5487 fpl->debug.ni_pathlen -= n; 5488 KASSERT(fpl->debug.ni_pathlen <= PATH_MAX, 5489 ("%s: pathlen underflow to %zd\n", __func__, fpl->debug.ni_pathlen)); 5490 } 5491 5492 static void 5493 cache_fpl_pathlen_inc(struct cache_fpl *fpl) 5494 { 5495 5496 cache_fpl_pathlen_add(fpl, 1); 5497 } 5498 5499 static void 5500 cache_fpl_pathlen_dec(struct cache_fpl *fpl) 5501 { 5502 5503 cache_fpl_pathlen_sub(fpl, 1); 5504 } 5505 #else 5506 static void 5507 cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n) 5508 { 5509 } 5510 5511 static void 5512 cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n) 5513 { 5514 } 5515 5516 static void 5517 cache_fpl_pathlen_inc(struct cache_fpl *fpl) 5518 { 5519 } 5520 5521 static void 5522 cache_fpl_pathlen_dec(struct cache_fpl *fpl) 5523 { 5524 } 5525 #endif 5526 5527 static void 5528 cache_fplookup_parse(struct cache_fpl *fpl) 5529 { 5530 struct nameidata *ndp; 5531 struct componentname *cnp; 5532 struct vnode *dvp; 5533 char *cp; 5534 uint32_t hash; 5535 5536 ndp = fpl->ndp; 5537 cnp = fpl->cnp; 5538 dvp = fpl->dvp; 5539 5540 /* 5541 * Find the end of this path component, it is either / or nul. 5542 * 5543 * Store / as a temporary sentinel so that we only have one character 5544 * to test for. Pathnames tend to be short so this should not be 5545 * resulting in cache misses. 5546 * 5547 * TODO: fix this to be word-sized. 5548 */ 5549 MPASS(&cnp->cn_nameptr[fpl->debug.ni_pathlen - 1] >= cnp->cn_pnbuf); 5550 KASSERT(&cnp->cn_nameptr[fpl->debug.ni_pathlen - 1] == fpl->nulchar, 5551 ("%s: mismatch between pathlen (%zu) and nulchar (%p != %p), string [%s]\n", 5552 __func__, fpl->debug.ni_pathlen, &cnp->cn_nameptr[fpl->debug.ni_pathlen - 1], 5553 fpl->nulchar, cnp->cn_pnbuf)); 5554 KASSERT(*fpl->nulchar == '\0', 5555 ("%s: expected nul at %p; string [%s]\n", __func__, fpl->nulchar, 5556 cnp->cn_pnbuf)); 5557 hash = cache_get_hash_iter_start(dvp); 5558 *fpl->nulchar = '/'; 5559 for (cp = cnp->cn_nameptr; *cp != '/'; cp++) { 5560 KASSERT(*cp != '\0', 5561 ("%s: encountered unexpected nul; string [%s]\n", __func__, 5562 cnp->cn_nameptr)); 5563 hash = cache_get_hash_iter(*cp, hash); 5564 continue; 5565 } 5566 *fpl->nulchar = '\0'; 5567 fpl->hash = cache_get_hash_iter_finish(hash); 5568 5569 cnp->cn_namelen = cp - cnp->cn_nameptr; 5570 cache_fpl_pathlen_sub(fpl, cnp->cn_namelen); 5571 5572 #ifdef INVARIANTS 5573 /* 5574 * cache_get_hash only accepts lengths up to NAME_MAX. This is fine since 5575 * we are going to fail this lookup with ENAMETOOLONG (see below). 5576 */ 5577 if (cnp->cn_namelen <= NAME_MAX) { 5578 if (fpl->hash != cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp)) { 5579 panic("%s: mismatched hash for [%s] len %ld", __func__, 5580 cnp->cn_nameptr, cnp->cn_namelen); 5581 } 5582 } 5583 #endif 5584 5585 /* 5586 * Hack: we have to check if the found path component's length exceeds 5587 * NAME_MAX. However, the condition is very rarely true and check can 5588 * be elided in the common case -- if an entry was found in the cache, 5589 * then it could not have been too long to begin with. 5590 */ 5591 ndp->ni_next = cp; 5592 } 5593 5594 static void 5595 cache_fplookup_parse_advance(struct cache_fpl *fpl) 5596 { 5597 struct nameidata *ndp; 5598 struct componentname *cnp; 5599 5600 ndp = fpl->ndp; 5601 cnp = fpl->cnp; 5602 5603 cnp->cn_nameptr = ndp->ni_next; 5604 KASSERT(*(cnp->cn_nameptr) == '/', 5605 ("%s: should have seen slash at %p ; buf %p [%s]\n", __func__, 5606 cnp->cn_nameptr, cnp->cn_pnbuf, cnp->cn_pnbuf)); 5607 cnp->cn_nameptr++; 5608 cache_fpl_pathlen_dec(fpl); 5609 } 5610 5611 /* 5612 * Skip spurious slashes in a pathname (e.g., "foo///bar") and retry. 5613 * 5614 * Lockless lookup tries to elide checking for spurious slashes and should they 5615 * be present is guaranteed to fail to find an entry. In this case the caller 5616 * must check if the name starts with a slash and call this routine. It is 5617 * going to fast forward across the spurious slashes and set the state up for 5618 * retry. 5619 */ 5620 static int __noinline 5621 cache_fplookup_skip_slashes(struct cache_fpl *fpl) 5622 { 5623 struct nameidata *ndp; 5624 struct componentname *cnp; 5625 5626 ndp = fpl->ndp; 5627 cnp = fpl->cnp; 5628 5629 MPASS(*(cnp->cn_nameptr) == '/'); 5630 do { 5631 cnp->cn_nameptr++; 5632 cache_fpl_pathlen_dec(fpl); 5633 } while (*(cnp->cn_nameptr) == '/'); 5634 5635 /* 5636 * Go back to one slash so that cache_fplookup_parse_advance has 5637 * something to skip. 5638 */ 5639 cnp->cn_nameptr--; 5640 cache_fpl_pathlen_inc(fpl); 5641 5642 /* 5643 * cache_fplookup_parse_advance starts from ndp->ni_next 5644 */ 5645 ndp->ni_next = cnp->cn_nameptr; 5646 5647 /* 5648 * See cache_fplookup_dot. 5649 */ 5650 fpl->tvp = fpl->dvp; 5651 fpl->tvp_seqc = fpl->dvp_seqc; 5652 5653 return (0); 5654 } 5655 5656 /* 5657 * Handle trailing slashes (e.g., "foo/"). 5658 * 5659 * If a trailing slash is found the terminal vnode must be a directory. 5660 * Regular lookup shortens the path by nulifying the first trailing slash and 5661 * sets the TRAILINGSLASH flag to denote this took place. There are several 5662 * checks on it performed later. 5663 * 5664 * Similarly to spurious slashes, lockless lookup handles this in a speculative 5665 * manner relying on an invariant that a non-directory vnode will get a miss. 5666 * In this case cn_nameptr[0] == '\0' and cn_namelen == 0. 5667 * 5668 * Thus for a path like "foo/bar/" the code unwinds the state back to "bar/" 5669 * and denotes this is the last path component, which avoids looping back. 5670 * 5671 * Only plain lookups are supported for now to restrict corner cases to handle. 5672 */ 5673 static int __noinline 5674 cache_fplookup_trailingslash(struct cache_fpl *fpl) 5675 { 5676 #ifdef INVARIANTS 5677 size_t ni_pathlen; 5678 #endif 5679 struct nameidata *ndp; 5680 struct componentname *cnp; 5681 struct namecache *ncp; 5682 struct vnode *tvp; 5683 char *cn_nameptr_orig, *cn_nameptr_slash; 5684 seqc_t tvp_seqc; 5685 u_char nc_flag; 5686 5687 ndp = fpl->ndp; 5688 cnp = fpl->cnp; 5689 tvp = fpl->tvp; 5690 tvp_seqc = fpl->tvp_seqc; 5691 5692 MPASS(fpl->dvp == fpl->tvp); 5693 KASSERT(cache_fpl_istrailingslash(fpl), 5694 ("%s: expected trailing slash at %p; string [%s]\n", __func__, fpl->nulchar - 1, 5695 cnp->cn_pnbuf)); 5696 KASSERT(cnp->cn_nameptr[0] == '\0', 5697 ("%s: expected nul char at %p; string [%s]\n", __func__, &cnp->cn_nameptr[0], 5698 cnp->cn_pnbuf)); 5699 KASSERT(cnp->cn_namelen == 0, 5700 ("%s: namelen 0 but got %ld; string [%s]\n", __func__, cnp->cn_namelen, 5701 cnp->cn_pnbuf)); 5702 MPASS(cnp->cn_nameptr > cnp->cn_pnbuf); 5703 5704 if (cnp->cn_nameiop != LOOKUP) { 5705 return (cache_fpl_aborted(fpl)); 5706 } 5707 5708 if (__predict_false(tvp->v_type != VDIR)) { 5709 if (!vn_seqc_consistent(tvp, tvp_seqc)) { 5710 return (cache_fpl_aborted(fpl)); 5711 } 5712 cache_fpl_smr_exit(fpl); 5713 return (cache_fpl_handled_error(fpl, ENOTDIR)); 5714 } 5715 5716 /* 5717 * Denote the last component. 5718 */ 5719 ndp->ni_next = &cnp->cn_nameptr[0]; 5720 MPASS(cache_fpl_islastcn(ndp)); 5721 5722 /* 5723 * Unwind trailing slashes. 5724 */ 5725 cn_nameptr_orig = cnp->cn_nameptr; 5726 while (cnp->cn_nameptr >= cnp->cn_pnbuf) { 5727 cnp->cn_nameptr--; 5728 if (cnp->cn_nameptr[0] != '/') { 5729 break; 5730 } 5731 } 5732 5733 /* 5734 * Unwind to the beginning of the path component. 5735 * 5736 * Note the path may or may not have started with a slash. 5737 */ 5738 cn_nameptr_slash = cnp->cn_nameptr; 5739 while (cnp->cn_nameptr > cnp->cn_pnbuf) { 5740 cnp->cn_nameptr--; 5741 if (cnp->cn_nameptr[0] == '/') { 5742 break; 5743 } 5744 } 5745 if (cnp->cn_nameptr[0] == '/') { 5746 cnp->cn_nameptr++; 5747 } 5748 5749 cnp->cn_namelen = cn_nameptr_slash - cnp->cn_nameptr + 1; 5750 cache_fpl_pathlen_add(fpl, cn_nameptr_orig - cnp->cn_nameptr); 5751 cache_fpl_checkpoint(fpl); 5752 5753 #ifdef INVARIANTS 5754 ni_pathlen = fpl->nulchar - cnp->cn_nameptr + 1; 5755 if (ni_pathlen != fpl->debug.ni_pathlen) { 5756 panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", 5757 __func__, ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, 5758 cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); 5759 } 5760 #endif 5761 5762 /* 5763 * If this was a "./" lookup the parent directory is already correct. 5764 */ 5765 if (cnp->cn_nameptr[0] == '.' && cnp->cn_namelen == 1) { 5766 return (0); 5767 } 5768 5769 /* 5770 * Otherwise we need to look it up. 5771 */ 5772 tvp = fpl->tvp; 5773 ncp = atomic_load_consume_ptr(&tvp->v_cache_dd); 5774 if (__predict_false(ncp == NULL)) { 5775 return (cache_fpl_aborted(fpl)); 5776 } 5777 nc_flag = atomic_load_char(&ncp->nc_flag); 5778 if ((nc_flag & NCF_ISDOTDOT) != 0) { 5779 return (cache_fpl_aborted(fpl)); 5780 } 5781 fpl->dvp = ncp->nc_dvp; 5782 fpl->dvp_seqc = vn_seqc_read_any(fpl->dvp); 5783 if (seqc_in_modify(fpl->dvp_seqc)) { 5784 return (cache_fpl_aborted(fpl)); 5785 } 5786 return (0); 5787 } 5788 5789 /* 5790 * See the API contract for VOP_FPLOOKUP_VEXEC. 5791 */ 5792 static int __noinline 5793 cache_fplookup_failed_vexec(struct cache_fpl *fpl, int error) 5794 { 5795 struct componentname *cnp; 5796 struct vnode *dvp; 5797 seqc_t dvp_seqc; 5798 5799 cnp = fpl->cnp; 5800 dvp = fpl->dvp; 5801 dvp_seqc = fpl->dvp_seqc; 5802 5803 /* 5804 * Hack: delayed empty path checking. 5805 */ 5806 if (cnp->cn_pnbuf[0] == '\0') { 5807 return (cache_fplookup_emptypath(fpl)); 5808 } 5809 5810 /* 5811 * TODO: Due to ignoring trailing slashes lookup will perform a 5812 * permission check on the last dir when it should not be doing it. It 5813 * may fail, but said failure should be ignored. It is possible to fix 5814 * it up fully without resorting to regular lookup, but for now just 5815 * abort. 5816 */ 5817 if (cache_fpl_istrailingslash(fpl)) { 5818 return (cache_fpl_aborted(fpl)); 5819 } 5820 5821 /* 5822 * Hack: delayed degenerate path checking. 5823 */ 5824 if (cnp->cn_nameptr[0] == '\0' && fpl->tvp == NULL) { 5825 return (cache_fplookup_degenerate(fpl)); 5826 } 5827 5828 /* 5829 * Hack: delayed name len checking. 5830 */ 5831 if (__predict_false(cnp->cn_namelen > NAME_MAX)) { 5832 cache_fpl_smr_exit(fpl); 5833 return (cache_fpl_handled_error(fpl, ENAMETOOLONG)); 5834 } 5835 5836 /* 5837 * Hack: they may be looking up foo/bar, where foo is not a directory. 5838 * In such a case we need to return ENOTDIR, but we may happen to get 5839 * here with a different error. 5840 */ 5841 if (dvp->v_type != VDIR) { 5842 error = ENOTDIR; 5843 } 5844 5845 /* 5846 * Hack: handle O_SEARCH. 5847 * 5848 * Open Group Base Specifications Issue 7, 2018 edition states: 5849 * <quote> 5850 * If the access mode of the open file description associated with the 5851 * file descriptor is not O_SEARCH, the function shall check whether 5852 * directory searches are permitted using the current permissions of 5853 * the directory underlying the file descriptor. If the access mode is 5854 * O_SEARCH, the function shall not perform the check. 5855 * </quote> 5856 * 5857 * Regular lookup tests for the NOEXECCHECK flag for every path 5858 * component to decide whether to do the permission check. However, 5859 * since most lookups never have the flag (and when they do it is only 5860 * present for the first path component), lockless lookup only acts on 5861 * it if there is a permission problem. Here the flag is represented 5862 * with a boolean so that we don't have to clear it on the way out. 5863 * 5864 * For simplicity this always aborts. 5865 * TODO: check if this is the first lookup and ignore the permission 5866 * problem. Note the flag has to survive fallback (if it happens to be 5867 * performed). 5868 */ 5869 if (fpl->fsearch) { 5870 return (cache_fpl_aborted(fpl)); 5871 } 5872 5873 switch (error) { 5874 case EAGAIN: 5875 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 5876 error = cache_fpl_aborted(fpl); 5877 } else { 5878 cache_fpl_partial(fpl); 5879 } 5880 break; 5881 default: 5882 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 5883 error = cache_fpl_aborted(fpl); 5884 } else { 5885 cache_fpl_smr_exit(fpl); 5886 cache_fpl_handled_error(fpl, error); 5887 } 5888 break; 5889 } 5890 return (error); 5891 } 5892 5893 static int 5894 cache_fplookup_impl(struct vnode *dvp, struct cache_fpl *fpl) 5895 { 5896 struct nameidata *ndp; 5897 struct componentname *cnp; 5898 struct mount *mp; 5899 int error; 5900 5901 ndp = fpl->ndp; 5902 cnp = fpl->cnp; 5903 5904 cache_fpl_checkpoint(fpl); 5905 5906 /* 5907 * The vnode at hand is almost always stable, skip checking for it. 5908 * Worst case this postpones the check towards the end of the iteration 5909 * of the main loop. 5910 */ 5911 fpl->dvp = dvp; 5912 fpl->dvp_seqc = vn_seqc_read_notmodify(fpl->dvp); 5913 5914 mp = atomic_load_ptr(&dvp->v_mount); 5915 if (__predict_false(mp == NULL || !cache_fplookup_mp_supported(mp))) { 5916 return (cache_fpl_aborted(fpl)); 5917 } 5918 5919 MPASS(fpl->tvp == NULL); 5920 5921 for (;;) { 5922 cache_fplookup_parse(fpl); 5923 5924 error = VOP_FPLOOKUP_VEXEC(fpl->dvp, cnp->cn_cred); 5925 if (__predict_false(error != 0)) { 5926 error = cache_fplookup_failed_vexec(fpl, error); 5927 break; 5928 } 5929 5930 error = cache_fplookup_next(fpl); 5931 if (__predict_false(cache_fpl_terminated(fpl))) { 5932 break; 5933 } 5934 5935 VNPASS(!seqc_in_modify(fpl->tvp_seqc), fpl->tvp); 5936 5937 if (fpl->tvp->v_type == VLNK) { 5938 error = cache_fplookup_symlink(fpl); 5939 if (cache_fpl_terminated(fpl)) { 5940 break; 5941 } 5942 } else { 5943 if (cache_fpl_islastcn(ndp)) { 5944 error = cache_fplookup_final(fpl); 5945 break; 5946 } 5947 5948 if (!vn_seqc_consistent(fpl->dvp, fpl->dvp_seqc)) { 5949 error = cache_fpl_aborted(fpl); 5950 break; 5951 } 5952 5953 fpl->dvp = fpl->tvp; 5954 fpl->dvp_seqc = fpl->tvp_seqc; 5955 cache_fplookup_parse_advance(fpl); 5956 } 5957 5958 cache_fpl_checkpoint(fpl); 5959 } 5960 5961 return (error); 5962 } 5963 5964 /* 5965 * Fast path lookup protected with SMR and sequence counters. 5966 * 5967 * Note: all VOP_FPLOOKUP_VEXEC routines have a comment referencing this one. 5968 * 5969 * Filesystems can opt in by setting the MNTK_FPLOOKUP flag and meeting criteria 5970 * outlined below. 5971 * 5972 * Traditional vnode lookup conceptually looks like this: 5973 * 5974 * vn_lock(current); 5975 * for (;;) { 5976 * next = find(); 5977 * vn_lock(next); 5978 * vn_unlock(current); 5979 * current = next; 5980 * if (last) 5981 * break; 5982 * } 5983 * return (current); 5984 * 5985 * Each jump to the next vnode is safe memory-wise and atomic with respect to 5986 * any modifications thanks to holding respective locks. 5987 * 5988 * The same guarantee can be provided with a combination of safe memory 5989 * reclamation and sequence counters instead. If all operations which affect 5990 * the relationship between the current vnode and the one we are looking for 5991 * also modify the counter, we can verify whether all the conditions held as 5992 * we made the jump. This includes things like permissions, mount points etc. 5993 * Counter modification is provided by enclosing relevant places in 5994 * vn_seqc_write_begin()/end() calls. 5995 * 5996 * Thus this translates to: 5997 * 5998 * vfs_smr_enter(); 5999 * dvp_seqc = seqc_read_any(dvp); 6000 * if (seqc_in_modify(dvp_seqc)) // someone is altering the vnode 6001 * abort(); 6002 * for (;;) { 6003 * tvp = find(); 6004 * tvp_seqc = seqc_read_any(tvp); 6005 * if (seqc_in_modify(tvp_seqc)) // someone is altering the target vnode 6006 * abort(); 6007 * if (!seqc_consistent(dvp, dvp_seqc) // someone is altering the vnode 6008 * abort(); 6009 * dvp = tvp; // we know nothing of importance has changed 6010 * dvp_seqc = tvp_seqc; // store the counter for the tvp iteration 6011 * if (last) 6012 * break; 6013 * } 6014 * vget(); // secure the vnode 6015 * if (!seqc_consistent(tvp, tvp_seqc) // final check 6016 * abort(); 6017 * // at this point we know nothing has changed for any parent<->child pair 6018 * // as they were crossed during the lookup, meaning we matched the guarantee 6019 * // of the locked variant 6020 * return (tvp); 6021 * 6022 * The API contract for VOP_FPLOOKUP_VEXEC routines is as follows: 6023 * - they are called while within vfs_smr protection which they must never exit 6024 * - EAGAIN can be returned to denote checking could not be performed, it is 6025 * always valid to return it 6026 * - if the sequence counter has not changed the result must be valid 6027 * - if the sequence counter has changed both false positives and false negatives 6028 * are permitted (since the result will be rejected later) 6029 * - for simple cases of unix permission checks vaccess_vexec_smr can be used 6030 * 6031 * Caveats to watch out for: 6032 * - vnodes are passed unlocked and unreferenced with nothing stopping 6033 * VOP_RECLAIM, in turn meaning that ->v_data can become NULL. It is advised 6034 * to use atomic_load_ptr to fetch it. 6035 * - the aforementioned object can also get freed, meaning absent other means it 6036 * should be protected with vfs_smr 6037 * - either safely checking permissions as they are modified or guaranteeing 6038 * their stability is left to the routine 6039 */ 6040 int 6041 cache_fplookup(struct nameidata *ndp, enum cache_fpl_status *status, 6042 struct pwd **pwdp) 6043 { 6044 struct cache_fpl fpl; 6045 struct pwd *pwd; 6046 struct vnode *dvp; 6047 struct componentname *cnp; 6048 int error; 6049 6050 fpl.status = CACHE_FPL_STATUS_UNSET; 6051 fpl.in_smr = false; 6052 fpl.ndp = ndp; 6053 fpl.cnp = cnp = &ndp->ni_cnd; 6054 MPASS(ndp->ni_lcf == 0); 6055 KASSERT ((cnp->cn_flags & CACHE_FPL_INTERNAL_CN_FLAGS) == 0, 6056 ("%s: internal flags found in cn_flags %" PRIx64, __func__, 6057 cnp->cn_flags)); 6058 if ((cnp->cn_flags & SAVESTART) != 0) { 6059 MPASS(cnp->cn_nameiop != LOOKUP); 6060 } 6061 MPASS(cnp->cn_nameptr == cnp->cn_pnbuf); 6062 6063 if (__predict_false(!cache_can_fplookup(&fpl))) { 6064 *status = fpl.status; 6065 SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status); 6066 return (EOPNOTSUPP); 6067 } 6068 6069 cache_fpl_checkpoint_outer(&fpl); 6070 6071 cache_fpl_smr_enter_initial(&fpl); 6072 #ifdef INVARIANTS 6073 fpl.debug.ni_pathlen = ndp->ni_pathlen; 6074 #endif 6075 fpl.nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; 6076 fpl.fsearch = false; 6077 fpl.savename = (cnp->cn_flags & SAVENAME) != 0; 6078 fpl.tvp = NULL; /* for degenerate path handling */ 6079 fpl.pwd = pwdp; 6080 pwd = pwd_get_smr(); 6081 *(fpl.pwd) = pwd; 6082 ndp->ni_rootdir = pwd->pwd_rdir; 6083 ndp->ni_topdir = pwd->pwd_jdir; 6084 6085 if (cnp->cn_pnbuf[0] == '/') { 6086 dvp = cache_fpl_handle_root(&fpl); 6087 MPASS(ndp->ni_resflags == 0); 6088 ndp->ni_resflags = NIRES_ABS; 6089 } else { 6090 if (ndp->ni_dirfd == AT_FDCWD) { 6091 dvp = pwd->pwd_cdir; 6092 } else { 6093 error = cache_fplookup_dirfd(&fpl, &dvp); 6094 if (__predict_false(error != 0)) { 6095 goto out; 6096 } 6097 } 6098 } 6099 6100 SDT_PROBE4(vfs, namei, lookup, entry, dvp, cnp->cn_pnbuf, cnp->cn_flags, true); 6101 error = cache_fplookup_impl(dvp, &fpl); 6102 out: 6103 cache_fpl_smr_assert_not_entered(&fpl); 6104 cache_fpl_assert_status(&fpl); 6105 *status = fpl.status; 6106 if (SDT_PROBES_ENABLED()) { 6107 SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status); 6108 if (fpl.status == CACHE_FPL_STATUS_HANDLED) 6109 SDT_PROBE4(vfs, namei, lookup, return, error, ndp->ni_vp, true, 6110 ndp); 6111 } 6112 6113 if (__predict_true(fpl.status == CACHE_FPL_STATUS_HANDLED)) { 6114 MPASS(error != CACHE_FPL_FAILED); 6115 if (error != 0) { 6116 MPASS(fpl.dvp == NULL); 6117 MPASS(fpl.tvp == NULL); 6118 MPASS(fpl.savename == false); 6119 } 6120 ndp->ni_dvp = fpl.dvp; 6121 ndp->ni_vp = fpl.tvp; 6122 if (fpl.savename) { 6123 cnp->cn_flags |= HASBUF; 6124 } else { 6125 cache_fpl_cleanup_cnp(cnp); 6126 } 6127 } 6128 return (error); 6129 } 6130